mindspore 2.0.0rc1__cp38-cp38-manylinux1_x86_64.whl → 2.2.0__cp38-cp38-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +2 -2
- mindspore/__init__.py +5 -2
- mindspore/_akg/akg/build_module.py +5 -6
- mindspore/_akg/akg/composite/build_module.py +49 -16
- mindspore/_akg/akg/composite/split_stitch.py +10 -11
- mindspore/_akg/akg/config/repository.json +195 -0
- mindspore/_akg/akg/global_configs.py +5 -1
- mindspore/_akg/akg/ms/info_version_adapt.py +67 -1
- mindspore/_akg/akg/tvm/api.py +4 -3
- mindspore/_akg/akg/tvm/autotvm/__init__.py +1 -2
- mindspore/_akg/akg/tvm/autotvm/graph_tuner/base_graph_tuner.py +1 -5
- mindspore/_akg/akg/tvm/autotvm/measure/__init__.py +1 -1
- mindspore/_akg/akg/tvm/autotvm/measure/measure.py +1 -10
- mindspore/_akg/akg/tvm/autotvm/measure/measure_methods.py +1 -372
- mindspore/_akg/akg/tvm/build_module.py +16 -1
- mindspore/_akg/akg/tvm/contrib/graph_runtime.py +0 -53
- mindspore/_akg/akg/tvm/hybrid/parser.py +7 -6
- mindspore/_akg/akg/tvm/ir_builder.py +1 -1
- mindspore/_akg/akg/tvm/module.py +1 -2
- mindspore/_akg/akg/tvm/stmt.py +2 -2
- mindspore/_akg/akg/utils/composite_op_helper.py +9 -10
- mindspore/_akg/akg/utils/kernel_exec.py +58 -260
- mindspore/_akg/akg/utils/op_dsl.py +17 -1
- mindspore/_akg/akg/utils/result_analysis.py +4 -24
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +198 -0
- mindspore/_c_dataengine.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_check_jit_forbidden_api.py +5 -1
- mindspore/_checkparam.py +79 -62
- mindspore/_extends/graph_kernel/__init__.py +0 -1
- mindspore/_extends/graph_kernel/model/graph_split.py +2 -0
- mindspore/_extends/graph_kernel/model/model_builder.py +9 -50
- mindspore/_extends/graph_kernel/splitter.py +1 -9
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +128 -21
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +2 -2
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +4 -2
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +18 -13
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +13 -9
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +1 -1
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -1
- mindspore/_extends/parse/__init__.py +19 -17
- mindspore/_extends/parse/namespace.py +7 -36
- mindspore/_extends/parse/parser.py +375 -189
- mindspore/_extends/parse/resources.py +36 -41
- mindspore/_extends/parse/standard_method.py +350 -245
- mindspore/_extends/parse/trope.py +2 -12
- mindspore/_extends/remote/kernel_build_server.py +24 -7
- mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
- mindspore/_install_custom.py +43 -0
- mindspore/_mindspore_offline_debug.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/amp.py +85 -19
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/base.py +2 -2
- mindspore/boost/boost.py +27 -32
- mindspore/boost/boost_cell_wrapper.py +37 -13
- mindspore/boost/grad_accumulation.py +1 -1
- mindspore/boost/grad_freeze.py +34 -6
- mindspore/boost/group_loss_scale_manager.py +15 -14
- mindspore/boost/less_batch_normalization.py +28 -3
- mindspore/common/__init__.py +15 -11
- mindspore/common/_auto_dynamic.py +68 -0
- mindspore/common/_jit_fallback_utils.py +111 -0
- mindspore/common/_register_for_adapter.py +17 -5
- mindspore/common/_register_for_tensor.py +2 -2
- mindspore/common/_stub_tensor.py +18 -15
- mindspore/common/_utils.py +31 -7
- mindspore/common/api.py +269 -101
- mindspore/common/auto_dynamic_shape.py +498 -0
- mindspore/common/dtype.py +61 -21
- mindspore/common/dump.py +9 -7
- mindspore/common/initializer.py +106 -76
- mindspore/common/jit_config.py +35 -14
- mindspore/common/lazy_inline.py +187 -0
- mindspore/common/mindir_util.py +101 -0
- mindspore/common/mutable.py +10 -13
- mindspore/common/parameter.py +246 -55
- mindspore/common/seed.py +13 -7
- mindspore/common/sparse_tensor.py +29 -33
- mindspore/common/tensor.py +907 -251
- mindspore/communication/__init__.py +7 -4
- mindspore/communication/_comm_helper.py +84 -4
- mindspore/communication/management.py +160 -88
- mindspore/config/op_info.config +99 -75
- mindspore/config/super_bar_config.json +36 -4
- mindspore/context.py +526 -219
- mindspore/dataset/__init__.py +9 -46
- mindspore/dataset/audio/__init__.py +4 -19
- mindspore/dataset/audio/transforms.py +545 -233
- mindspore/dataset/audio/utils.py +21 -18
- mindspore/dataset/callback/ds_callback.py +42 -13
- mindspore/dataset/core/config.py +158 -100
- mindspore/dataset/core/validator_helpers.py +1 -63
- mindspore/dataset/debug/debug_hook.py +45 -13
- mindspore/dataset/debug/pre_defined_hook.py +5 -5
- mindspore/dataset/engine/__init__.py +0 -5
- mindspore/dataset/engine/cache_client.py +38 -15
- mindspore/dataset/engine/datasets.py +615 -278
- mindspore/dataset/engine/datasets_audio.py +154 -283
- mindspore/dataset/engine/datasets_standard_format.py +104 -116
- mindspore/dataset/engine/datasets_text.py +443 -326
- mindspore/dataset/engine/datasets_user_defined.py +251 -164
- mindspore/dataset/engine/datasets_vision.py +839 -1443
- mindspore/dataset/engine/iterators.py +11 -4
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +7 -3
- mindspore/dataset/engine/obs/util.py +3 -0
- mindspore/dataset/engine/offload.py +6 -6
- mindspore/dataset/engine/queue.py +15 -14
- mindspore/dataset/engine/samplers.py +39 -23
- mindspore/dataset/engine/serializer_deserializer.py +22 -6
- mindspore/dataset/engine/validators.py +21 -331
- mindspore/dataset/text/__init__.py +5 -33
- mindspore/dataset/text/transforms.py +334 -165
- mindspore/dataset/text/utils.py +215 -145
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/c_transforms.py +3 -2
- mindspore/dataset/transforms/py_transforms_util.py +40 -12
- mindspore/dataset/transforms/transforms.py +174 -71
- mindspore/dataset/utils/browse_dataset.py +25 -17
- mindspore/dataset/utils/line_reader.py +24 -21
- mindspore/dataset/vision/__init__.py +5 -26
- mindspore/dataset/vision/c_transforms.py +177 -165
- mindspore/dataset/vision/py_transforms.py +114 -119
- mindspore/dataset/vision/py_transforms_util.py +54 -51
- mindspore/dataset/vision/transforms.py +1127 -381
- mindspore/dataset/vision/utils.py +54 -38
- mindspore/dataset/vision/validators.py +12 -2
- mindspore/experimental/map_parameter.py +38 -4
- mindspore/{dataset/datapreprocess → experimental/optim}/__init__.py +14 -4
- mindspore/experimental/optim/adam.py +192 -0
- mindspore/experimental/optim/adamw.py +181 -0
- mindspore/experimental/optim/lr_scheduler.py +1427 -0
- mindspore/experimental/optim/optimizer.py +252 -0
- mindspore/experimental/optim/sgd.py +147 -0
- mindspore/gen_ops.py +273 -0
- mindspore/include/OWNERS +1 -2
- mindspore/include/api/context.h +21 -1
- mindspore/include/api/data_type.h +2 -1
- mindspore/include/api/graph.h +0 -15
- mindspore/include/api/kernel.h +2 -0
- mindspore/include/api/kernel_api.h +37 -12
- mindspore/include/api/model.h +29 -42
- mindspore/include/api/model_group.h +14 -3
- mindspore/include/api/model_parallel_runner.h +18 -2
- mindspore/include/api/serialization.h +26 -0
- mindspore/include/api/status.h +1 -0
- mindspore/include/api/types.h +38 -4
- mindspore/include/c_api/ms/abstract.h +67 -0
- mindspore/include/c_api/ms/attribute.h +197 -0
- mindspore/include/c_api/ms/base/handle_types.h +43 -0
- mindspore/include/c_api/ms/base/macros.h +32 -0
- mindspore/include/c_api/ms/base/status.h +33 -0
- mindspore/include/c_api/ms/base/types.h +282 -0
- mindspore/include/c_api/ms/context.h +102 -0
- mindspore/include/c_api/ms/graph.h +160 -0
- mindspore/include/c_api/ms/node.h +606 -0
- mindspore/include/c_api/ms/tensor.h +161 -0
- mindspore/include/c_api/ms/value.h +84 -0
- mindspore/include/c_api/status_c.h +3 -0
- mindspore/include/dataset/constants.h +6 -12
- mindspore/include/dataset/execute.h +23 -13
- mindspore/include/dataset/text.h +26 -26
- mindspore/include/dataset/transforms.h +25 -31
- mindspore/include/dataset/vision.h +60 -60
- mindspore/include/dataset/vision_ascend.h +5 -6
- mindspore/include/dataset/vision_lite.h +17 -17
- mindspore/include/mindapi/base/format.h +0 -1
- mindspore/include/mindapi/base/type_id.h +2 -1
- mindspore/include/mindapi/base/types.h +5 -1
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libjemalloc.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +9000 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/cpu/libakg.so +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
- mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
- mindspore/log.py +9 -6
- mindspore/mindrecord/filereader.py +33 -4
- mindspore/mindrecord/filewriter.py +70 -35
- mindspore/mindrecord/mindpage.py +40 -34
- mindspore/mindrecord/shardreader.py +1 -1
- mindspore/mindrecord/shardsegment.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +25 -18
- mindspore/mindrecord/tools/cifar10_to_mr.py +25 -18
- mindspore/mindrecord/tools/csv_to_mr.py +29 -13
- mindspore/mindrecord/tools/imagenet_to_mr.py +24 -10
- mindspore/mindrecord/tools/mnist_to_mr.py +24 -11
- mindspore/mindrecord/tools/tfrecord_to_mr.py +31 -26
- mindspore/nn/cell.py +463 -169
- mindspore/nn/dynamic_lr.py +47 -43
- mindspore/nn/layer/activation.py +225 -82
- mindspore/nn/layer/basic.py +121 -79
- mindspore/nn/layer/channel_shuffle.py +21 -21
- mindspore/nn/layer/combined.py +33 -26
- mindspore/nn/layer/container.py +277 -22
- mindspore/nn/layer/conv.py +441 -304
- mindspore/nn/layer/dense.py +19 -13
- mindspore/nn/layer/embedding.py +62 -49
- mindspore/nn/layer/flash_attention.py +264 -0
- mindspore/nn/layer/image.py +50 -39
- mindspore/nn/layer/math.py +62 -51
- mindspore/nn/layer/normalization.py +219 -167
- mindspore/nn/layer/padding.py +58 -70
- mindspore/nn/layer/pooling.py +334 -287
- mindspore/nn/layer/rnn_cells.py +53 -38
- mindspore/nn/layer/rnns.py +59 -56
- mindspore/nn/layer/thor_layer.py +52 -44
- mindspore/nn/layer/timedistributed.py +6 -4
- mindspore/nn/layer/transformer.py +284 -164
- mindspore/nn/learning_rate_schedule.py +34 -25
- mindspore/nn/loss/__init__.py +3 -2
- mindspore/nn/loss/loss.py +554 -311
- mindspore/nn/optim/ada_grad.py +12 -9
- mindspore/nn/optim/adadelta.py +14 -11
- mindspore/nn/optim/adafactor.py +19 -16
- mindspore/nn/optim/adam.py +62 -47
- mindspore/nn/optim/adamax.py +13 -10
- mindspore/nn/optim/adasum.py +12 -8
- mindspore/nn/optim/asgd.py +10 -9
- mindspore/nn/optim/ftrl.py +20 -17
- mindspore/nn/optim/lamb.py +16 -12
- mindspore/nn/optim/lars.py +8 -6
- mindspore/nn/optim/lazyadam.py +25 -20
- mindspore/nn/optim/momentum.py +10 -7
- mindspore/nn/optim/optimizer.py +61 -9
- mindspore/nn/optim/proximal_ada_grad.py +14 -13
- mindspore/nn/optim/rmsprop.py +17 -13
- mindspore/nn/optim/rprop.py +30 -17
- mindspore/nn/optim/sgd.py +40 -23
- mindspore/nn/optim/thor.py +24 -26
- mindspore/nn/probability/bijector/bijector.py +11 -11
- mindspore/nn/probability/bijector/exp.py +1 -1
- mindspore/nn/probability/bijector/gumbel_cdf.py +3 -3
- mindspore/nn/probability/bijector/invert.py +1 -1
- mindspore/nn/probability/bijector/power_transform.py +29 -29
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +5 -5
- mindspore/nn/probability/bnn_layers/bnn_cell_wrapper.py +4 -2
- mindspore/nn/probability/bnn_layers/conv_variational.py +13 -13
- mindspore/nn/probability/bnn_layers/dense_variational.py +12 -12
- mindspore/nn/probability/bnn_layers/layer_distribution.py +9 -8
- mindspore/nn/probability/distribution/_utils/custom_ops.py +19 -3
- mindspore/nn/probability/distribution/_utils/utils.py +1 -1
- mindspore/nn/probability/distribution/bernoulli.py +9 -9
- mindspore/nn/probability/distribution/beta.py +8 -8
- mindspore/nn/probability/distribution/categorical.py +23 -15
- mindspore/nn/probability/distribution/cauchy.py +5 -6
- mindspore/nn/probability/distribution/distribution.py +3 -3
- mindspore/nn/probability/distribution/exponential.py +4 -4
- mindspore/nn/probability/distribution/gamma.py +10 -10
- mindspore/nn/probability/distribution/geometric.py +8 -8
- mindspore/nn/probability/distribution/gumbel.py +8 -9
- mindspore/nn/probability/distribution/half_normal.py +5 -5
- mindspore/nn/probability/distribution/laplace.py +5 -5
- mindspore/nn/probability/distribution/log_normal.py +12 -11
- mindspore/nn/probability/distribution/logistic.py +8 -8
- mindspore/nn/probability/distribution/normal.py +6 -5
- mindspore/nn/probability/distribution/poisson.py +10 -11
- mindspore/nn/probability/distribution/student_t.py +8 -9
- mindspore/nn/probability/distribution/transformed_distribution.py +5 -5
- mindspore/nn/probability/distribution/uniform.py +11 -11
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +9 -9
- mindspore/nn/wrap/cell_wrapper.py +188 -63
- mindspore/nn/wrap/grad_reducer.py +21 -12
- mindspore/nn/wrap/loss_scale.py +136 -49
- mindspore/numpy/__init__.py +4 -4
- mindspore/numpy/array_creations.py +55 -56
- mindspore/numpy/array_ops.py +134 -35
- mindspore/numpy/logic_ops.py +66 -20
- mindspore/numpy/math_ops.py +142 -139
- mindspore/numpy/utils_const.py +2 -2
- mindspore/offline_debug/convert_async.py +2 -2
- mindspore/ops/_grad_experimental/__init__.py +7 -5
- mindspore/ops/_grad_experimental/grad_array_ops.py +231 -348
- mindspore/ops/{_grad → _grad_experimental}/grad_base.py +1 -33
- mindspore/ops/{_grad → _grad_experimental}/grad_comm_ops.py +25 -13
- mindspore/ops/{_grad/__init__.py → _grad_experimental/grad_debug_ops.py} +15 -7
- mindspore/ops/{_grad → _grad_experimental}/grad_implementations.py +17 -11
- mindspore/ops/_grad_experimental/grad_inner_ops.py +33 -52
- mindspore/ops/_grad_experimental/grad_math_ops.py +151 -1224
- mindspore/ops/_grad_experimental/grad_nn_ops.py +141 -414
- mindspore/ops/{_grad → _grad_experimental}/grad_quant_ops.py +10 -6
- mindspore/ops/_grad_experimental/grad_sparse.py +317 -2
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -13
- mindspore/ops/{_grad → _grad_experimental}/taylor_rule.py +1 -1
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/flash_attention/__init__.py +0 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +406 -0
- mindspore/{_extends/graph_kernel/expanders/complex/__init__.py → ops/_op_impl/_custom_op/flash_attention/constants.py} +27 -8
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +467 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +563 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +193 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +435 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/__init__.py +0 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/sparse_tiling.py +45 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/strategy.py +67 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +62 -0
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/aicpu/__init__.py +41 -1
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d.py +37 -0
- mindspore/ops/_op_impl/aicpu/bias_add_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/cast.py +52 -0
- mindspore/ops/_op_impl/aicpu/coalesce.py +2 -0
- mindspore/ops/_op_impl/aicpu/col2im.py +3 -1
- mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
- mindspore/ops/_op_impl/aicpu/dropout_genmask.py +6 -0
- mindspore/ops/_op_impl/aicpu/eps.py +32 -0
- mindspore/ops/_op_impl/aicpu/eye.py +4 -4
- mindspore/ops/_op_impl/aicpu/fft_with_size.py +6 -0
- mindspore/ops/_op_impl/aicpu/fill_diagonal.py +5 -0
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -2
- mindspore/ops/_op_impl/aicpu/im2col.py +3 -5
- mindspore/ops/_op_impl/aicpu/lgamma.py +1 -0
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +6 -3
- mindspore/ops/_op_impl/aicpu/lu.py +39 -0
- mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/masked_scatter.py +1 -0
- mindspore/ops/_op_impl/aicpu/masked_select_grad.py +3 -0
- mindspore/ops/_op_impl/aicpu/matrix_band_part.py +59 -0
- mindspore/ops/_op_impl/aicpu/matrix_power.py +6 -1
- mindspore/ops/_op_impl/aicpu/median.py +1 -0
- mindspore/ops/_op_impl/aicpu/multinomial.py +9 -9
- mindspore/ops/_op_impl/aicpu/not_equal.py +0 -5
- mindspore/ops/_op_impl/aicpu/pad_v3.py +3 -1
- mindspore/ops/_op_impl/aicpu/pad_v3_grad.py +2 -0
- mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +15 -7
- mindspore/ops/_op_impl/aicpu/random_categorical.py +39 -19
- mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +5 -2
- mindspore/ops/_op_impl/aicpu/random_poisson.py +103 -52
- mindspore/ops/_op_impl/aicpu/random_shuffle.py +17 -15
- mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2.py +0 -6
- mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2_grad.py +0 -7
- mindspore/ops/_op_impl/aicpu/scatter_nd.py +2 -0
- mindspore/ops/_op_impl/aicpu/sequence_concat.py +40 -0
- mindspore/ops/_op_impl/aicpu/sequence_stack.py +40 -0
- mindspore/ops/_op_impl/aicpu/{sparseaddmm.py → sparse_addmm.py} +2 -2
- mindspore/ops/_op_impl/aicpu/{sparsesparsemaximum.py → sparse_sparse_maximum.py} +4 -4
- mindspore/ops/_op_impl/aicpu/standard_laplace.py +5 -4
- mindspore/ops/_op_impl/aicpu/standard_normal.py +5 -4
- mindspore/ops/_op_impl/aicpu/truncated_normal.py +9 -7
- mindspore/ops/_op_impl/aicpu/uniform.py +5 -3
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +8 -4
- mindspore/ops/_op_impl/aicpu/uniform_int.py +5 -5
- mindspore/ops/_op_impl/aicpu/uniform_real.py +4 -4
- mindspore/ops/_op_impl/aicpu/upsample_nearest_3d.py +14 -6
- mindspore/ops/_op_impl/aicpu/upsample_nearest_3d_grad.py +22 -8
- mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d.py +11 -6
- mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d_grad.py +21 -10
- mindspore/ops/_op_impl/tbe/__init__.py +6 -4
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool.py +2 -2
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +3 -3
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +4 -4
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +2 -2
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +3 -3
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +3 -3
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +2 -2
- mindspore/ops/_op_impl/tbe/bn_infer.py +2 -2
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +3 -2
- mindspore/ops/_op_impl/tbe/broadcast_to.py +1 -1
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +3 -3
- mindspore/ops/_op_impl/tbe/expand_dims.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_v2.py +56 -0
- mindspore/ops/_op_impl/tbe/im2col.py +4 -4
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +7 -3
- mindspore/ops/_op_impl/tbe/mem_set.py +38 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +3 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +2 -2
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +2 -0
- mindspore/ops/_primitive_cache.py +1 -1
- mindspore/ops/_tracefunc.py +241 -0
- mindspore/ops/_utils/utils.py +10 -2
- mindspore/ops/_vmap/vmap_array_ops.py +5 -3
- mindspore/ops/_vmap/vmap_base.py +5 -4
- mindspore/ops/_vmap/vmap_convolution_ops.py +1 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +6 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +11 -6
- mindspore/ops/_vmap/vmap_math_ops.py +5 -2
- mindspore/ops/_vmap/vmap_nn_ops.py +135 -11
- mindspore/ops/arg_dtype_cast.py +54 -0
- mindspore/ops/composite/__init__.py +7 -5
- mindspore/ops/composite/base.py +78 -34
- mindspore/ops/composite/math_ops.py +5 -695
- mindspore/ops/composite/multitype_ops/_compile_utils.py +403 -97
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +28 -22
- mindspore/ops/composite/multitype_ops/add_impl.py +69 -7
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +48 -10
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/mod_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +10 -7
- mindspore/ops/composite/multitype_ops/sub_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/uadd_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +9 -0
- mindspore/ops/deprecated.py +304 -0
- mindspore/ops/function/__init__.py +41 -4
- mindspore/ops/function/array_func.py +1108 -467
- mindspore/ops/function/clip_func.py +94 -27
- mindspore/ops/function/debug_func.py +3 -1
- mindspore/ops/function/grad/grad_func.py +82 -73
- mindspore/ops/function/image_func.py +28 -12
- mindspore/ops/function/linalg_func.py +135 -39
- mindspore/ops/function/math_func.py +3779 -894
- mindspore/ops/function/nn_func.py +1584 -657
- mindspore/ops/function/parameter_func.py +13 -3
- mindspore/ops/function/random_func.py +247 -153
- mindspore/ops/function/sparse_func.py +14 -11
- mindspore/ops/function/sparse_unary_func.py +173 -47
- mindspore/ops/function/spectral_func.py +8 -4
- mindspore/ops/function/vmap_func.py +8 -7
- mindspore/ops/functional.py +47 -16
- mindspore/ops/op_info_register.py +346 -86
- mindspore/ops/operations/__init__.py +38 -22
- mindspore/ops/operations/_grad_ops.py +145 -149
- mindspore/ops/operations/_inner_ops.py +298 -56
- mindspore/ops/operations/_ms_kernel.py +3 -3
- mindspore/ops/operations/_quant_ops.py +24 -28
- mindspore/ops/operations/_rl_inner_ops.py +9 -7
- mindspore/ops/operations/_scalar_ops.py +115 -0
- mindspore/ops/operations/_sequence_ops.py +148 -10
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/_thor_ops.py +2 -2
- mindspore/ops/operations/array_ops.py +1239 -561
- mindspore/ops/operations/comm_ops.py +166 -90
- mindspore/ops/operations/control_ops.py +3 -3
- mindspore/ops/operations/custom_ops.py +124 -102
- mindspore/ops/operations/debug_ops.py +24 -11
- mindspore/ops/operations/image_ops.py +86 -71
- mindspore/ops/operations/inner_ops.py +18 -13
- mindspore/ops/operations/linalg_ops.py +30 -11
- mindspore/ops/operations/math_ops.py +1730 -435
- mindspore/ops/operations/nn_ops.py +1953 -943
- mindspore/ops/operations/other_ops.py +65 -43
- mindspore/ops/operations/random_ops.py +258 -98
- mindspore/ops/operations/rl_ops.py +4 -36
- mindspore/ops/operations/sparse_ops.py +38 -33
- mindspore/ops/operations/spectral_ops.py +8 -4
- mindspore/ops/primitive.py +66 -44
- mindspore/ops/signature.py +5 -5
- mindspore/parallel/_auto_parallel_context.py +80 -19
- mindspore/parallel/_cost_model_context.py +42 -0
- mindspore/parallel/_offload_context.py +162 -72
- mindspore/parallel/_parallel_serialization.py +2 -2
- mindspore/parallel/_ps_context.py +16 -4
- mindspore/parallel/_recovery_context.py +2 -1
- mindspore/parallel/_tensor.py +15 -13
- mindspore/parallel/_transformer/layers.py +8 -6
- mindspore/parallel/_transformer/loss.py +1 -0
- mindspore/parallel/_transformer/moe.py +7 -7
- mindspore/parallel/_transformer/op_parallel_config.py +12 -1
- mindspore/parallel/_transformer/transformer.py +34 -14
- mindspore/parallel/_utils.py +36 -14
- mindspore/parallel/algo_parameter_config.py +114 -20
- mindspore/parallel/checkpoint_transform.py +16 -18
- mindspore/parallel/shard.py +16 -13
- mindspore/profiler/__init__.py +1 -1
- mindspore/profiler/common/struct_type.py +3 -3
- mindspore/profiler/common/util.py +3 -2
- mindspore/profiler/envprofiling.py +11 -4
- mindspore/profiler/parser/aicpu_data_parser.py +5 -3
- mindspore/profiler/parser/ascend_flops_generator.py +94 -0
- mindspore/profiler/parser/ascend_fpbp_generator.py +76 -0
- mindspore/profiler/parser/ascend_hccl_generator.py +288 -0
- mindspore/profiler/parser/ascend_msprof_exporter.py +213 -0
- mindspore/profiler/parser/ascend_msprof_generator.py +199 -0
- mindspore/profiler/parser/ascend_op_generator.py +276 -0
- mindspore/profiler/parser/ascend_steptrace_generator.py +94 -0
- mindspore/profiler/parser/ascend_timeline_generator.py +110 -54
- mindspore/profiler/parser/base_timeline_generator.py +11 -7
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +45 -46
- mindspore/profiler/parser/flops_parser.py +15 -11
- mindspore/profiler/parser/framework_parser.py +92 -73
- mindspore/profiler/parser/hccl_parser.py +16 -12
- mindspore/profiler/parser/integrator.py +22 -11
- mindspore/profiler/parser/memory_usage_parser.py +36 -11
- mindspore/profiler/parser/minddata_analyzer.py +12 -14
- mindspore/profiler/parser/minddata_pipeline_parser.py +1 -1
- mindspore/profiler/parser/msadvisor_parser.py +8 -4
- mindspore/profiler/parser/op_intermediate_parser.py +5 -2
- mindspore/profiler/parser/optime_parser.py +1 -1
- mindspore/profiler/parser/profiler_info.py +4 -5
- mindspore/profiler/parser/step_trace_parser.py +11 -14
- mindspore/profiler/profiling.py +678 -377
- mindspore/rewrite/api/node.py +211 -54
- mindspore/rewrite/api/node_type.py +5 -0
- mindspore/rewrite/api/pattern_engine.py +22 -23
- mindspore/rewrite/api/scoped_value.py +20 -17
- mindspore/rewrite/api/symbol_tree.py +252 -106
- mindspore/rewrite/api/tree_node_helper.py +3 -0
- mindspore/rewrite/ast_helpers/__init__.py +2 -1
- mindspore/rewrite/ast_helpers/ast_finder.py +129 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +116 -104
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +97 -46
- mindspore/rewrite/common/rewrite_elog.py +5 -1
- mindspore/rewrite/namer.py +51 -51
- mindspore/rewrite/namespace.py +14 -5
- mindspore/{ops/bprop_mindir → rewrite/node}/__init__.py +9 -4
- mindspore/rewrite/node/call_function.py +79 -0
- mindspore/rewrite/node/cell_container.py +135 -0
- mindspore/rewrite/node/control_flow.py +88 -0
- mindspore/rewrite/{node.py → node/node.py} +313 -247
- mindspore/rewrite/node/node_manager.py +254 -0
- mindspore/rewrite/node/node_topological_manager.py +243 -0
- mindspore/rewrite/parsers/arguments_parser.py +22 -21
- mindspore/rewrite/parsers/assign_parser.py +225 -239
- mindspore/rewrite/parsers/attribute_parser.py +9 -7
- mindspore/rewrite/parsers/class_def_parser.py +179 -218
- mindspore/rewrite/parsers/constant_parser.py +9 -6
- mindspore/rewrite/parsers/container_parser.py +9 -7
- mindspore/rewrite/parsers/for_parser.py +36 -15
- mindspore/rewrite/parsers/function_def_parser.py +23 -20
- mindspore/rewrite/parsers/if_parser.py +28 -24
- mindspore/rewrite/parsers/module_parser.py +202 -25
- mindspore/rewrite/{parser.py → parsers/parser.py} +4 -2
- mindspore/rewrite/{parser_register.py → parsers/parser_register.py} +1 -1
- mindspore/rewrite/parsers/return_parser.py +6 -6
- mindspore/rewrite/sparsify/sparse_transformer.py +12 -3
- mindspore/rewrite/sparsify/sparsify.py +4 -1
- mindspore/rewrite/sparsify/utils.py +11 -5
- mindspore/rewrite/symbol_tree.py +577 -732
- mindspore/rewrite/symbol_tree_builder.py +9 -175
- mindspore/rewrite/symbol_tree_dumper.py +2 -2
- mindspore/run_check/_check_version.py +46 -39
- mindspore/run_check/run_check.py +3 -2
- mindspore/{scipy/sparse → safeguard}/__init__.py +4 -5
- mindspore/safeguard/rewrite_obfuscation.py +517 -0
- mindspore/scipy/__init__.py +1 -1
- mindspore/scipy/linalg.py +67 -61
- mindspore/scipy/ops.py +5 -41
- mindspore/scipy/ops_grad.py +3 -2
- mindspore/scipy/ops_wrapper.py +5 -5
- mindspore/scipy/optimize/line_search.py +8 -8
- mindspore/scipy/optimize/linear_sum_assignment.py +4 -4
- mindspore/scipy/optimize/minimize.py +16 -12
- mindspore/scipy/utils.py +1 -52
- mindspore/scipy/utils_const.py +4 -4
- mindspore/train/__init__.py +4 -4
- mindspore/train/_utils.py +13 -5
- mindspore/train/amp.py +410 -148
- mindspore/train/anf_ir_pb2.py +16 -4
- mindspore/train/callback/_backup_and_restore.py +8 -11
- mindspore/train/callback/_callback.py +80 -3
- mindspore/train/callback/_checkpoint.py +82 -51
- mindspore/train/callback/_early_stop.py +12 -15
- mindspore/train/callback/_history.py +1 -1
- mindspore/train/callback/_lambda_callback.py +13 -13
- mindspore/train/callback/_landscape.py +21 -17
- mindspore/train/callback/_loss_monitor.py +9 -10
- mindspore/train/callback/_on_request_exit.py +16 -33
- mindspore/train/callback/_reduce_lr_on_plateau.py +21 -24
- mindspore/train/callback/_summary_collector.py +44 -30
- mindspore/train/callback/_time_monitor.py +62 -12
- mindspore/train/data_sink.py +10 -16
- mindspore/train/dataset_helper.py +154 -86
- mindspore/train/loss_scale_manager.py +14 -9
- mindspore/train/metrics/__init__.py +10 -2
- mindspore/train/metrics/accuracy.py +1 -1
- mindspore/train/metrics/auc.py +1 -1
- mindspore/train/metrics/bleu_score.py +2 -2
- mindspore/train/metrics/confusion_matrix.py +14 -14
- mindspore/train/metrics/cosine_similarity.py +3 -3
- mindspore/train/metrics/dice.py +1 -1
- mindspore/train/metrics/fbeta.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +8 -6
- mindspore/train/metrics/mean_surface_distance.py +5 -4
- mindspore/train/metrics/metric.py +49 -17
- mindspore/train/metrics/occlusion_sensitivity.py +4 -4
- mindspore/train/metrics/perplexity.py +1 -1
- mindspore/train/metrics/precision.py +2 -2
- mindspore/train/metrics/recall.py +2 -3
- mindspore/train/metrics/roc.py +7 -7
- mindspore/train/metrics/root_mean_square_surface_distance.py +5 -4
- mindspore/train/metrics/topk.py +7 -4
- mindspore/train/mind_ir_pb2.py +193 -48
- mindspore/train/model.py +377 -133
- mindspore/train/serialization.py +697 -245
- mindspore/train/summary/_summary_adapter.py +5 -2
- mindspore/train/summary/_writer_pool.py +4 -3
- mindspore/train/summary/summary_record.py +25 -23
- mindspore/train/train_thor/convert_utils.py +39 -23
- mindspore/train/train_thor/dataset_helper.py +4 -3
- mindspore/train/train_thor/model_thor.py +8 -8
- mindspore/version.py +1 -1
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/METADATA +7 -8
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/RECORD +647 -818
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/entry_points.txt +0 -1
- mindspore/_akg/akg/tvm/contrib/debugger/__init__.py +0 -16
- mindspore/_akg/akg/tvm/contrib/debugger/debug_result.py +0 -274
- mindspore/_akg/akg/tvm/contrib/debugger/debug_runtime.py +0 -259
- mindspore/_akg/akg/tvm/contrib/peak.py +0 -341
- mindspore/_akg/akg/tvm/contrib/rpc.py +0 -25
- mindspore/_akg/akg/tvm/contrib/xcode.py +0 -257
- mindspore/_akg/akg/tvm/exec/__init__.py +0 -17
- mindspore/_akg/akg/tvm/exec/autotvm_log_editor.py +0 -60
- mindspore/_akg/akg/tvm/exec/measure_peak.py +0 -48
- mindspore/_akg/akg/tvm/exec/query_rpc_tracker.py +0 -48
- mindspore/_akg/akg/tvm/exec/rpc_proxy.py +0 -98
- mindspore/_akg/akg/tvm/exec/rpc_server.py +0 -88
- mindspore/_akg/akg/tvm/exec/rpc_tracker.py +0 -62
- mindspore/_akg/akg/tvm/rpc/__init__.py +0 -29
- mindspore/_akg/akg/tvm/rpc/base.py +0 -182
- mindspore/_akg/akg/tvm/rpc/client.py +0 -436
- mindspore/_akg/akg/tvm/rpc/proxy.py +0 -595
- mindspore/_akg/akg/tvm/rpc/server.py +0 -413
- mindspore/_akg/akg/tvm/rpc/tornado_util.py +0 -121
- mindspore/_akg/akg/tvm/rpc/tracker.py +0 -431
- mindspore/_extends/graph_kernel/expander.py +0 -80
- mindspore/_extends/graph_kernel/expanders/__init__.py +0 -57
- mindspore/_extends/graph_kernel/expanders/_utils.py +0 -269
- mindspore/_extends/graph_kernel/expanders/addn.py +0 -33
- mindspore/_extends/graph_kernel/expanders/batchnorm.py +0 -152
- mindspore/_extends/graph_kernel/expanders/batchnorm_grad.py +0 -105
- mindspore/_extends/graph_kernel/expanders/bias_add_grad.py +0 -49
- mindspore/_extends/graph_kernel/expanders/clip_by_norm_no_div_sum.py +0 -33
- mindspore/_extends/graph_kernel/expanders/complex/abs.py +0 -30
- mindspore/_extends/graph_kernel/expanders/complex/add.py +0 -44
- mindspore/_extends/graph_kernel/expanders/complex/div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/mul.py +0 -52
- mindspore/_extends/graph_kernel/expanders/complex/real_div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/sub.py +0 -45
- mindspore/_extends/graph_kernel/expanders/conv2d.py +0 -200
- mindspore/_extends/graph_kernel/expanders/dropout_grad.py +0 -30
- mindspore/_extends/graph_kernel/expanders/equal_count.py +0 -50
- mindspore/_extends/graph_kernel/expanders/erfc.py +0 -35
- mindspore/_extends/graph_kernel/expanders/expand_dims.py +0 -50
- mindspore/_extends/graph_kernel/expanders/fused_adam.py +0 -44
- mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py +0 -47
- mindspore/_extends/graph_kernel/expanders/fused_mul_add.py +0 -28
- mindspore/_extends/graph_kernel/expanders/gather.py +0 -43
- mindspore/_extends/graph_kernel/expanders/gelu_grad.py +0 -70
- mindspore/_extends/graph_kernel/expanders/gkdropout.py +0 -40
- mindspore/_extends/graph_kernel/expanders/identity.py +0 -25
- mindspore/_extends/graph_kernel/expanders/layernorm.py +0 -93
- mindspore/_extends/graph_kernel/expanders/layernorm_grad.py +0 -113
- mindspore/_extends/graph_kernel/expanders/logsoftmax.py +0 -46
- mindspore/_extends/graph_kernel/expanders/logsoftmax_grad.py +0 -36
- mindspore/_extends/graph_kernel/expanders/matmul.py +0 -80
- mindspore/_extends/graph_kernel/expanders/maximum_grad.py +0 -59
- mindspore/_extends/graph_kernel/expanders/minimum_grad.py +0 -80
- mindspore/_extends/graph_kernel/expanders/oneslike.py +0 -26
- mindspore/_extends/graph_kernel/expanders/reduce_mean.py +0 -43
- mindspore/_extends/graph_kernel/expanders/relu_grad.py +0 -32
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits_grad.py +0 -35
- mindspore/_extends/graph_kernel/expanders/sigmoid_grad.py +0 -31
- mindspore/_extends/graph_kernel/expanders/slice.py +0 -35
- mindspore/_extends/graph_kernel/expanders/softmax_cross_entropy_with_logits.py +0 -42
- mindspore/_extends/graph_kernel/expanders/softmax_grad_ext.py +0 -41
- mindspore/_extends/graph_kernel/expanders/softsign.py +0 -28
- mindspore/_extends/graph_kernel/expanders/sqrt_grad.py +0 -29
- mindspore/_extends/graph_kernel/expanders/square_sum_all.py +0 -44
- mindspore/_extends/graph_kernel/expanders/square_sum_v1.py +0 -37
- mindspore/_extends/graph_kernel/expanders/squared_difference.py +0 -43
- mindspore/_extends/graph_kernel/expanders/tanh_grad.py +0 -31
- mindspore/_extends/graph_kernel/expanders/tile.py +0 -54
- mindspore/_extends/graph_kernel/model/op_infer.py +0 -506
- mindspore/_extends/parse/jit_fallback_modules.py +0 -51
- mindspore/dataset/datapreprocess/preprocess_imagenet_validate_dataset.py +0 -54
- mindspore/dataset/engine/graphdata.py +0 -1586
- mindspore/include/api/net.h +0 -142
- mindspore/ops/_grad/grad_array_ops.py +0 -1347
- mindspore/ops/_grad/grad_clip_ops.py +0 -84
- mindspore/ops/_grad/grad_debug_ops.py +0 -68
- mindspore/ops/_grad/grad_inner_ops.py +0 -235
- mindspore/ops/_grad/grad_math_ops.py +0 -1684
- mindspore/ops/_grad/grad_nn_ops.py +0 -1529
- mindspore/ops/_grad/grad_other_ops.py +0 -89
- mindspore/ops/_grad/grad_sequence_ops.py +0 -296
- mindspore/ops/_grad/grad_sparse.py +0 -323
- mindspore/ops/_grad_experimental/grad_image_ops.py +0 -249
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +0 -195
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +0 -112
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ApproximateEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Argmax_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/Argmin_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/AssignSub_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Assign_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +0 -150
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +0 -66
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchToSpaceND_bprop.mindir +0 -28
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +0 -33
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +0 -306
- mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +0 -13
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Concat_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +0 -240
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +0 -247
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +0 -247
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +0 -315
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +0 -278
- mindspore/ops/bprop_mindir/DType_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +0 -58
- mindspore/ops/bprop_mindir/Depend_bprop.mindir +0 -13
- mindspore/ops/bprop_mindir/DepthToSpace_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +0 -138
- mindspore/ops/bprop_mindir/DiagPart_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +0 -25
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +0 -18
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +0 -27
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicShape_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Equal_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +0 -58
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +0 -54
- mindspore/ops/bprop_mindir/FloorDiv_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/GatherD_bprop.mindir +0 -26
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +0 -57
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/GreaterEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Greater_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/IOU_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/IsFinite_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/IsInf_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/IsNan_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +0 -126
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +0 -30
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +0 -43
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LessEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Less_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LinSpace_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/Load_bprop.mindir +0 -13
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/LogicalAnd_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LogicalNot_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/MaskedSelect_bprop.mindir +0 -21
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +0 -74
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +0 -74
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +0 -75
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +0 -65
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Maximum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Minimum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +0 -27
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +0 -35
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NonZero_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/NotEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +0 -26
- mindspore/ops/bprop_mindir/OnesLike_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +0 -82
- mindspore/ops/bprop_mindir/Range_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/Rank_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReduceAll_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/ReduceAny_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +0 -60
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +0 -89
- mindspore/ops/bprop_mindir/ReverseSequence_bprop.mindir +0 -52
- mindspore/ops/bprop_mindir/ReverseV2_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/Round_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/ScatterMax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterMin_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterNdUpdate_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/ScatterNd_bprop.mindir +0 -24
- mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/ScatterUpdate_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +0 -21
- mindspore/ops/bprop_mindir/Select_bprop.mindir +0 -31
- mindspore/ops/bprop_mindir/Shape_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +0 -21
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Sign_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/Slice_bprop.mindir +0 -26
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +0 -36
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +0 -33
- mindspore/ops/bprop_mindir/Sort_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SpaceToBatchND_bprop.mindir +0 -28
- mindspore/ops/bprop_mindir/SpaceToDepth_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Split_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +0 -54
- mindspore/ops/bprop_mindir/StridedSliceGrad_bprop.mindir +0 -95
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +0 -98
- mindspore/ops/bprop_mindir/Switch_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +0 -66
- mindspore/ops/bprop_mindir/TensorScatterAdd_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/TensorScatterUpdate_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/TensorShape_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TransShape_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/TruncateDiv_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/Unique_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Unstack_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +0 -32
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +0 -38
- mindspore/ops/bprop_mindir/ZerosLike_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/generate_mindir.py +0 -114
- mindspore/rewrite/node_visitor.py +0 -44
- mindspore/rewrite/topological_manager.py +0 -203
- mindspore/scipy/sparse/linalg.py +0 -192
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/WHEEL +0 -0
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/top_level.txt +0 -0
mindspore/common/tensor.py
CHANGED
|
@@ -74,32 +74,69 @@ class _TensorMeta(type(Tensor_), abc.ABCMeta):
|
|
|
74
74
|
"""
|
|
75
75
|
|
|
76
76
|
|
|
77
|
+
def tensor(input_data=None, dtype=None, shape=None, init=None, internal=False, const_arg=False):
|
|
78
|
+
"""
|
|
79
|
+
Create a new Tensor in Cell.construct() or function decorated by @jit.
|
|
80
|
+
|
|
81
|
+
In graph mode, MindSpore would create a new Tensor object at runtime dynamically,
|
|
82
|
+
based on the `dtype` argument.
|
|
83
|
+
|
|
84
|
+
Please refer to `Creating and Using Tensor
|
|
85
|
+
<https://www.mindspore.cn/docs/en/r2.2/note/static_graph_syntax_support.html#mindspore-user-defined-data-types>`_ .
|
|
86
|
+
|
|
87
|
+
The difference between it and the Tensor class is that it adds
|
|
88
|
+
`Annotation
|
|
89
|
+
<https://www.mindspore.cn/docs/en/r2.2/design/dynamic_graph_and_static_graph.html?#annotation-type>`_
|
|
90
|
+
which can prevent the generation of AnyType compared to the Tensor class.
|
|
91
|
+
|
|
92
|
+
The arguments and return values are the same as the Tensor class. Also see: :class:`mindspore.Tensor`.
|
|
93
|
+
internally to indicate the type of the Tensor currently being created,
|
|
94
|
+
|
|
95
|
+
Supported Platforms:
|
|
96
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
97
|
+
|
|
98
|
+
Examples:
|
|
99
|
+
>>> import mindspore as ms
|
|
100
|
+
>>> from mindspore import jit, tensor
|
|
101
|
+
>>> @jit
|
|
102
|
+
... def func(x):
|
|
103
|
+
... return tensor(x.asnumpy(), dtype=ms.float32)
|
|
104
|
+
>>> x = tensor([1, 2, 3])
|
|
105
|
+
>>> y = func(x)
|
|
106
|
+
>>> print(y)
|
|
107
|
+
[1. 2. 3.]
|
|
108
|
+
"""
|
|
109
|
+
return Tensor(input_data, dtype, shape, init, internal, const_arg) # @jit.typing: () -> tensor_type[{dtype}]
|
|
110
|
+
|
|
111
|
+
|
|
77
112
|
class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
78
113
|
"""
|
|
79
114
|
Tensor is a data structure that stores an n-dimensional array.
|
|
80
115
|
|
|
116
|
+
Note:
|
|
117
|
+
If 'init' interface is used to initialize Tensor, the `Tensor.init_data` API needs to be called to load the
|
|
118
|
+
actual data to `Tensor`.
|
|
119
|
+
|
|
81
120
|
Args:
|
|
82
121
|
input_data (Union[Tensor, float, int, bool, tuple, list, numpy.ndarray]): The data to be stored. It can be
|
|
83
|
-
another Tensor, Python number or NumPy ndarray. Default: None.
|
|
122
|
+
another Tensor, Python number or NumPy ndarray. Default: ``None`` .
|
|
84
123
|
dtype (:class:`mindspore.dtype`): Used to indicate the data type of the output Tensor. The argument should
|
|
85
|
-
be defined in `mindspore.dtype`. If it is None, the data type of the output Tensor will be the same
|
|
86
|
-
as the `input_data`. Default: None.
|
|
124
|
+
be defined in `mindspore.dtype`. If it is ``None`` , the data type of the output Tensor will be the same
|
|
125
|
+
as the `input_data`. Default: ``None`` .
|
|
87
126
|
shape (Union[tuple, list, int]): Used to indicate the shape of the output Tensor. The argument should be
|
|
88
127
|
a list of integers, a tuple of integers or an integer. If `input_data` is available,
|
|
89
128
|
`shape` doesn't need to be set. If None in shape, a tensor of dynamic shape is created, `input_data`
|
|
90
129
|
doesn't need to be set; if None not in shape, a tensor of static shape is created, `input_data` or `init`
|
|
91
|
-
must be set. Default: None.
|
|
130
|
+
must be set. Default: ``None`` .
|
|
92
131
|
init (Initializer): The information of init data.
|
|
93
|
-
'init' is used for delayed initialization in parallel mode
|
|
94
|
-
|
|
95
|
-
Tensor, the `Tensor.init_data` API needs to be called to convert `Tensor` to the actual data.
|
|
96
|
-
Default: None.
|
|
132
|
+
'init' is used for delayed initialization in parallel mode, when using init, `dtype` and `shape` must be
|
|
133
|
+
set. Default: ``None`` .
|
|
97
134
|
internal (bool): Whether it is created by the framework.
|
|
98
|
-
'True' means that the tensor is created by framework.
|
|
99
|
-
'False' means that the tensor is created by user.
|
|
100
|
-
Default: False
|
|
135
|
+
``'True'`` means that the tensor is created by framework.
|
|
136
|
+
``'False'`` means that the tensor is created by user.
|
|
137
|
+
Default: ``False`` .
|
|
101
138
|
const_arg (bool): Whether the tensor is a constant when it is used for the argument of a network.
|
|
102
|
-
Default: False.
|
|
139
|
+
Default: ``False`` .
|
|
103
140
|
|
|
104
141
|
Outputs:
|
|
105
142
|
Tensor.
|
|
@@ -107,6 +144,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
107
144
|
Note:
|
|
108
145
|
The default value None of `input_data` works as a placeholder, it does not mean that we can create a NoneType
|
|
109
146
|
Tensor.
|
|
147
|
+
Tensor with shape contains 0 is not fully tested and supported.
|
|
110
148
|
|
|
111
149
|
Examples:
|
|
112
150
|
>>> import numpy as np
|
|
@@ -169,35 +207,42 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
169
207
|
if input_data is not None:
|
|
170
208
|
Tensor_.__init__(self, input_data)
|
|
171
209
|
else:
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
210
|
+
if input_data is None and shape is None and init is None and dtype is not None:
|
|
211
|
+
validator.check_type_name('dtype', dtype, mstype.number_type +
|
|
212
|
+
(mstype.bool_, mstype.string), "Tensor")
|
|
213
|
+
Tensor_.__init__(self, dtype, [-2])
|
|
214
|
+
logger.warning(f"For 'Tensor', if 'dtype' is not None, 'input_data', 'shape' "
|
|
215
|
+
f"or 'init' must not be None.")
|
|
216
|
+
else:
|
|
217
|
+
# If input data is numpy number, convert it to np array
|
|
218
|
+
if isinstance(input_data, np_types):
|
|
219
|
+
input_data = np.array(input_data)
|
|
175
220
|
|
|
176
|
-
|
|
177
|
-
|
|
221
|
+
if isinstance(shape, numbers.Number):
|
|
222
|
+
shape = (shape,)
|
|
178
223
|
|
|
179
|
-
|
|
224
|
+
_check_tensor_input(input_data, dtype, shape, init)
|
|
180
225
|
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
else:
|
|
186
|
-
_check_input_data_type(input_data)
|
|
187
|
-
if dtype is not None:
|
|
188
|
-
validator.check_type_name('dtype', dtype, mstype.number_type +
|
|
189
|
-
(mstype.bool_, mstype.string), "Tensor")
|
|
226
|
+
# If input_data is tuple/list/numpy.ndarray, it's support in check_type method.
|
|
227
|
+
if (isinstance(shape, (list, tuple)) and None in shape) or init is not None:
|
|
228
|
+
shape = _check_tensor_dynamic_shape(dtype, shape, init)
|
|
229
|
+
Tensor_.__init__(self, dtype, shape)
|
|
190
230
|
else:
|
|
191
|
-
|
|
231
|
+
_check_input_data_type(input_data)
|
|
232
|
+
if dtype is not None:
|
|
233
|
+
validator.check_type_name('dtype', dtype, mstype.number_type +
|
|
234
|
+
(mstype.bool_, mstype.string), "Tensor")
|
|
235
|
+
else:
|
|
236
|
+
dtype = self._set_default_dtype(input_data, dtype)
|
|
192
237
|
|
|
193
|
-
|
|
194
|
-
|
|
238
|
+
if isinstance(input_data, np.ndarray) and (not input_data.flags['FORC']):
|
|
239
|
+
input_data = np.ascontiguousarray(input_data)
|
|
195
240
|
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
241
|
+
if dtype is not None:
|
|
242
|
+
Tensor_.__init__(self, input_data, dtype)
|
|
243
|
+
else:
|
|
244
|
+
Tensor_.__init__(self, input_data)
|
|
245
|
+
validator.check_value_type('const_arg', const_arg, bool, 'Tensor')
|
|
201
246
|
|
|
202
247
|
self.const_arg = const_arg
|
|
203
248
|
self.virtual_flag = False
|
|
@@ -230,7 +275,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
230
275
|
if np.array(input_data).dtype == np.float64:
|
|
231
276
|
return mstype.float32
|
|
232
277
|
if isinstance(input_data, (int, list, tuple)):
|
|
233
|
-
if np.array(input_data).dtype
|
|
278
|
+
if np.array(input_data).dtype in (np.int32, np.int64):
|
|
234
279
|
return mstype.int64
|
|
235
280
|
return dtype
|
|
236
281
|
|
|
@@ -306,11 +351,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
306
351
|
|
|
307
352
|
def __index__(self):
|
|
308
353
|
data = self.asnumpy()
|
|
309
|
-
if
|
|
310
|
-
or data.dtype == "int16"
|
|
311
|
-
or data.dtype == "int32"
|
|
312
|
-
or data.dtype == "int64"
|
|
313
|
-
or data.dtype == "bool"):
|
|
354
|
+
if data.dtype not in ["int8", "int16", "int32", "int64", "bool"]:
|
|
314
355
|
raise ValueError("Only integer tensors of a single element can be converted to an index.")
|
|
315
356
|
return self._convert_scalar_(data, int,
|
|
316
357
|
"Only integer tensors of a single element can be converted to an index.")
|
|
@@ -319,10 +360,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
319
360
|
return self
|
|
320
361
|
|
|
321
362
|
def __abs__(self):
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
data = np.array(data)
|
|
325
|
-
return Tensor(data)
|
|
363
|
+
self._init_check()
|
|
364
|
+
return tensor_operator_registry.get('abs')(self)
|
|
326
365
|
|
|
327
366
|
def __add__(self, other):
|
|
328
367
|
return tensor_operator_registry.get('__add__')(self, other)
|
|
@@ -422,6 +461,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
422
461
|
|
|
423
462
|
def __setitem__(self, index, value):
|
|
424
463
|
out = tensor_operator_registry.get('__setitem__')(self, index, value)
|
|
464
|
+
if isinstance(out, tuple):
|
|
465
|
+
if self.parent_tensor_ is not None and self.index_of_parent_ is not None:
|
|
466
|
+
self.parent_tensor_.__setitem__(self.index_of_parent_, out[0])
|
|
467
|
+
return self
|
|
468
|
+
return self
|
|
425
469
|
self.assign_value(out)
|
|
426
470
|
if self.parent_tensor_ is not None and self.index_of_parent_ is not None:
|
|
427
471
|
self.parent_tensor_.__setitem__(self.index_of_parent_, self)
|
|
@@ -444,8 +488,20 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
444
488
|
def __str__(self):
|
|
445
489
|
if self.dtype == mstype.type_none:
|
|
446
490
|
return "Unknown Tensor type!"
|
|
491
|
+
if self.dtype == mstype.bfloat16:
|
|
492
|
+
return str(self.float().asnumpy())
|
|
447
493
|
return str(self.asnumpy())
|
|
448
494
|
|
|
495
|
+
def __getstate__(self):
|
|
496
|
+
state = self.__dict__.copy()
|
|
497
|
+
state["value"] = Tensor_.__getstate__(self)
|
|
498
|
+
return state
|
|
499
|
+
|
|
500
|
+
def __setstate__(self, state):
|
|
501
|
+
value = state.pop("value")
|
|
502
|
+
Tensor_.__setstate__(self, value)
|
|
503
|
+
self.__dict__.update(state)
|
|
504
|
+
|
|
449
505
|
@property
|
|
450
506
|
def shape(self):
|
|
451
507
|
"""
|
|
@@ -462,12 +518,30 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
462
518
|
def size(self):
|
|
463
519
|
"""
|
|
464
520
|
For details, please refer to :func:`mindspore.ops.size`.
|
|
521
|
+
|
|
522
|
+
Examples:
|
|
523
|
+
>>> from mindspore import Tensor
|
|
524
|
+
>>> import numpy as np
|
|
525
|
+
>>> x = Tensor(np.array([[1, 2], [3, 4]]))
|
|
526
|
+
>>> output = x.size
|
|
527
|
+
>>> print(output)
|
|
528
|
+
4
|
|
465
529
|
"""
|
|
466
530
|
return self._size
|
|
467
531
|
|
|
468
532
|
@property
|
|
469
533
|
def ndim(self):
|
|
470
|
-
"""
|
|
534
|
+
"""
|
|
535
|
+
Return the number of tensor dimensions.
|
|
536
|
+
|
|
537
|
+
Examples:
|
|
538
|
+
>>> from mindspore import Tensor
|
|
539
|
+
>>> import numpy as np
|
|
540
|
+
>>> x = Tensor(np.array([[1, 2], [3, 4]]))
|
|
541
|
+
>>> output = x.ndim
|
|
542
|
+
>>> print(output)
|
|
543
|
+
2
|
|
544
|
+
"""
|
|
471
545
|
return len(self._shape)
|
|
472
546
|
|
|
473
547
|
@property
|
|
@@ -476,6 +550,18 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
476
550
|
Returns a view of a matrix (2-D tensor) conjugated and transposed.
|
|
477
551
|
x.H is equivalent to `mindspore.Tensor.swapaxes(0, 1).conj()` for complex matrices and
|
|
478
552
|
`mindspore.Tensor.swapaxes(0, 1)` for real matrices.
|
|
553
|
+
|
|
554
|
+
Supported Platforms:
|
|
555
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
556
|
+
|
|
557
|
+
Examples:
|
|
558
|
+
>>> from mindspore import Tensor
|
|
559
|
+
>>> import numpy as np
|
|
560
|
+
>>> x = Tensor(np.array([[1, 2], [3, 4]]))
|
|
561
|
+
>>> output = x.H
|
|
562
|
+
>>> print(output)
|
|
563
|
+
[[1 3]
|
|
564
|
+
[2 4]]
|
|
479
565
|
"""
|
|
480
566
|
if self.ndim != 2:
|
|
481
567
|
raise ValueError(f"For tensor.H only support 2-D Tensor, but got {self.ndim}-D.")
|
|
@@ -486,27 +572,78 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
486
572
|
|
|
487
573
|
@property
|
|
488
574
|
def has_init(self):
|
|
489
|
-
"""
|
|
575
|
+
"""
|
|
576
|
+
Whether tensor is initialized.
|
|
577
|
+
|
|
578
|
+
Examples:
|
|
579
|
+
>>> from mindspore import Tensor
|
|
580
|
+
>>> import numpy as np
|
|
581
|
+
>>> x = Tensor(np.array([[1, 2], [3, 4]]))
|
|
582
|
+
>>> output = x.has_init
|
|
583
|
+
>>> print(output)
|
|
584
|
+
False
|
|
585
|
+
"""
|
|
490
586
|
return self.init is not None
|
|
491
587
|
|
|
492
588
|
@property
|
|
493
589
|
def itemsize(self):
|
|
494
|
-
"""
|
|
590
|
+
"""
|
|
591
|
+
Return the length of one tensor element in bytes.
|
|
592
|
+
|
|
593
|
+
Examples:
|
|
594
|
+
>>> from mindspore import Tensor
|
|
595
|
+
>>> import numpy as np
|
|
596
|
+
>>> x = Tensor(np.array([[1, 2], [3, 4]]))
|
|
597
|
+
>>> output = x.itemsize
|
|
598
|
+
>>> print(output)
|
|
599
|
+
8
|
|
600
|
+
"""
|
|
495
601
|
return self._itemsize
|
|
496
602
|
|
|
497
603
|
@property
|
|
498
604
|
def strides(self):
|
|
499
|
-
"""
|
|
605
|
+
"""
|
|
606
|
+
Return the tuple of bytes to step in each dimension when traversing a tensor.
|
|
607
|
+
|
|
608
|
+
Examples:
|
|
609
|
+
>>> from mindspore import Tensor
|
|
610
|
+
>>> import numpy as np
|
|
611
|
+
>>> x = Tensor(np.array([[1, 2], [3, 4]]))
|
|
612
|
+
>>> output = x.strides
|
|
613
|
+
>>> print(output)
|
|
614
|
+
(16, 8)
|
|
615
|
+
"""
|
|
500
616
|
return self._strides
|
|
501
617
|
|
|
502
618
|
@property
|
|
503
619
|
def nbytes(self):
|
|
504
|
-
"""
|
|
620
|
+
"""
|
|
621
|
+
Return the total number of bytes taken by the tensor.
|
|
622
|
+
|
|
623
|
+
Examples:
|
|
624
|
+
>>> from mindspore import Tensor
|
|
625
|
+
>>> import numpy as np
|
|
626
|
+
>>> x = Tensor(np.array([[1, 2], [3, 4]]))
|
|
627
|
+
>>> output = x.nbytes
|
|
628
|
+
>>> print(output)
|
|
629
|
+
32
|
|
630
|
+
"""
|
|
505
631
|
return self._nbytes
|
|
506
632
|
|
|
507
633
|
@property
|
|
508
634
|
def T(self):
|
|
509
|
-
"""
|
|
635
|
+
"""
|
|
636
|
+
Return the transposed tensor.
|
|
637
|
+
|
|
638
|
+
Examples:
|
|
639
|
+
>>> from mindspore import Tensor
|
|
640
|
+
>>> import numpy as np
|
|
641
|
+
>>> x = Tensor(np.array([[1, 2], [3, 4]]))
|
|
642
|
+
>>> output = x.T
|
|
643
|
+
>>> print(output)
|
|
644
|
+
[[1 3]
|
|
645
|
+
[2 4]]
|
|
646
|
+
"""
|
|
510
647
|
return self.transpose()
|
|
511
648
|
|
|
512
649
|
@staticmethod
|
|
@@ -548,7 +685,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
548
685
|
|
|
549
686
|
Args:
|
|
550
687
|
const_arg (bool): Whether the tensor is a constant when it is used for the argument of a network.
|
|
551
|
-
Default: True.
|
|
688
|
+
Default: ``True`` .
|
|
552
689
|
|
|
553
690
|
Returns:
|
|
554
691
|
Tensor, has been specified whether to be a const network argument.
|
|
@@ -597,6 +734,80 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
597
734
|
self._init_check()
|
|
598
735
|
return tensor_operator_registry.get('atan2')(self, other)
|
|
599
736
|
|
|
737
|
+
def cauchy(self, median=0.0, sigma=1.0):
|
|
738
|
+
r"""
|
|
739
|
+
Fills the tensor with numbers drawn from the Cauchy distribution. It is
|
|
740
|
+
defined as follows:
|
|
741
|
+
|
|
742
|
+
.. math::
|
|
743
|
+
f(x)= \frac{1}{\pi} \frac{\sigma}{(x-median)^2 +\sigma^2}
|
|
744
|
+
|
|
745
|
+
.. warning::
|
|
746
|
+
This is an experimental API that is subject to change or deletion.
|
|
747
|
+
|
|
748
|
+
Args:
|
|
749
|
+
median (float, optional): the location parameter, specifying the location
|
|
750
|
+
of the peak of the distribution. Default: 0.0.
|
|
751
|
+
sigma (float, optional): the scale parameter which specifies the half-width
|
|
752
|
+
at half-maximum. Default: 1.0.
|
|
753
|
+
|
|
754
|
+
Returns:
|
|
755
|
+
Tensor. A Tensor with the same type and shape of input.
|
|
756
|
+
|
|
757
|
+
Supported Platforms:
|
|
758
|
+
``Ascend`` ``CPU``
|
|
759
|
+
|
|
760
|
+
Examples:
|
|
761
|
+
>>> import mindspore
|
|
762
|
+
>>> import numpy as np
|
|
763
|
+
>>> x = mindspore.Tensor(np.zeros((1, 2)), dtype=mindspore.float32)
|
|
764
|
+
>>> x.cauchy()
|
|
765
|
+
Tensor(shape=[1, 2], dtype=Float32, value=
|
|
766
|
+
[[8.79836142e-01, 9.37541723e-01]])
|
|
767
|
+
|
|
768
|
+
"""
|
|
769
|
+
self._init_check()
|
|
770
|
+
out = tensor_operator_registry.get('cauchy')(list(self.shape), median, sigma)()
|
|
771
|
+
return out.astype(self.dtype)
|
|
772
|
+
|
|
773
|
+
def log_normal(self, mean=1.0, std=2.0):
|
|
774
|
+
r"""
|
|
775
|
+
Fills the elements of the input tensor with log normal values initialized by
|
|
776
|
+
given mean and std:
|
|
777
|
+
|
|
778
|
+
.. math::
|
|
779
|
+
\text{f}(x;1.0,2.0)=\frac{1}{x\delta \sqrt[]{2\pi} }e^{-\frac{(\ln x-\mu )^2}{2\delta ^2} }
|
|
780
|
+
|
|
781
|
+
where :math:`\mu`, :math:`\delta` is mean and standard deviation of lognormal distribution respectively.
|
|
782
|
+
|
|
783
|
+
.. warning::
|
|
784
|
+
This is an experimental API that is subject to change or deletion.
|
|
785
|
+
|
|
786
|
+
Args:
|
|
787
|
+
mean (float, optional): the mean of normal distribution. With float data type.
|
|
788
|
+
Default: 1.0.
|
|
789
|
+
std (float, optional): the std of normal distribution. With float data type.
|
|
790
|
+
Default: 2.0.
|
|
791
|
+
|
|
792
|
+
Returns:
|
|
793
|
+
Tensor. A Tensor with the same type and shape of input.
|
|
794
|
+
|
|
795
|
+
Supported Platforms:
|
|
796
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
797
|
+
|
|
798
|
+
Examples:
|
|
799
|
+
>>> import mindspore
|
|
800
|
+
>>> import numpy as np
|
|
801
|
+
>>> x = mindspore.Tensor(np.array([[1, 2], [3, 4]]), dtype=mindspore.float32)
|
|
802
|
+
>>> output = x.log_normal()
|
|
803
|
+
>>> print(output)
|
|
804
|
+
[[1.2788825 2.3305743]
|
|
805
|
+
[14.944194 0.16303174]]
|
|
806
|
+
"""
|
|
807
|
+
self._init_check()
|
|
808
|
+
return tensor_operator_registry.get('log_normal')(mean, std)(self)
|
|
809
|
+
|
|
810
|
+
@jit_forbidden_register
|
|
600
811
|
def assign_value(self, value):
|
|
601
812
|
"""
|
|
602
813
|
Assign another tensor value to this tensor.
|
|
@@ -606,6 +817,16 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
606
817
|
|
|
607
818
|
Returns:
|
|
608
819
|
Tensor, Tensor that's been assigned.
|
|
820
|
+
|
|
821
|
+
Examples:
|
|
822
|
+
>>> from mindspore import Tensor
|
|
823
|
+
>>> import numpy as np
|
|
824
|
+
>>> x = Tensor([1, 2, 3, 4])
|
|
825
|
+
>>> y = Tensor(np.array([[1, 2], [3, 4]]))
|
|
826
|
+
>>> output = x.assign_value(y)
|
|
827
|
+
>>> print(x)
|
|
828
|
+
[[1 2]
|
|
829
|
+
[3 4]]
|
|
609
830
|
"""
|
|
610
831
|
if is_stub_tensor(value):
|
|
611
832
|
value = value.stub_sync()
|
|
@@ -631,10 +852,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
631
852
|
Get the item at the specified index of the tensor.
|
|
632
853
|
|
|
633
854
|
Note:
|
|
634
|
-
Tensor.item returns a Tensor scalar instead of a Python scalar.
|
|
855
|
+
Tensor.item returns a Tensor scalar instead of a Python scalar. And if the tensor is a Tensor scalar,
|
|
856
|
+
Tensor.item will return the numpy.ndarray.
|
|
635
857
|
|
|
636
858
|
Args:
|
|
637
|
-
index (Union[None, int, tuple(int)]): The index in Tensor. Default: None
|
|
859
|
+
index (Union[None, int, tuple(int)]): The index in Tensor. Default: ``None``.
|
|
638
860
|
|
|
639
861
|
Returns:
|
|
640
862
|
A Tensor scalar, dtype is the same with the original Tensor.
|
|
@@ -646,12 +868,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
646
868
|
``Ascend`` ``GPU`` ``CPU``
|
|
647
869
|
|
|
648
870
|
Examples:
|
|
649
|
-
>>> import
|
|
871
|
+
>>> import mindspore as ms
|
|
650
872
|
>>> from mindspore import Tensor
|
|
651
|
-
>>> x = Tensor(
|
|
652
|
-
>>> x
|
|
653
|
-
>>> print(x)
|
|
873
|
+
>>> x = Tensor([[1, 2, 3], [4, 5, 6]], ms.float32)
|
|
874
|
+
>>> print(x.item((0, 1)))
|
|
654
875
|
2.0
|
|
876
|
+
>>> x = Tensor(1.2, ms.float32)
|
|
877
|
+
>>> print(x.item())
|
|
878
|
+
1.2
|
|
655
879
|
"""
|
|
656
880
|
output = tensor_operator_registry.get('item')(self, index)
|
|
657
881
|
return output
|
|
@@ -695,6 +919,26 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
695
919
|
output = tensor_operator_registry.get('itemset')(self, *args)
|
|
696
920
|
return output
|
|
697
921
|
|
|
922
|
+
def get_bytes(self):
|
|
923
|
+
r"""
|
|
924
|
+
Get raw data of tensor with type of bytes.
|
|
925
|
+
|
|
926
|
+
Supported Platforms:
|
|
927
|
+
``CPU`` ``GPU`` ``Ascend``
|
|
928
|
+
|
|
929
|
+
Returns:
|
|
930
|
+
Bytes of tensor.
|
|
931
|
+
|
|
932
|
+
Examples:
|
|
933
|
+
>>> import mindspore as ms
|
|
934
|
+
>>> from mindspore import Tensor
|
|
935
|
+
>>> x = ms.Tensor([1, 2, 3], ms.int16)
|
|
936
|
+
>>> print(x.get_bytes())
|
|
937
|
+
b'\x01\x00\x02\x00\x03\x00'
|
|
938
|
+
"""
|
|
939
|
+
self._init_check()
|
|
940
|
+
return Tensor_.get_bytes(self)
|
|
941
|
+
|
|
698
942
|
def asnumpy(self):
|
|
699
943
|
"""
|
|
700
944
|
Convert tensor to numpy array. Returns self tensor as a NumPy ndarray. This tensor and the returned ndarray
|
|
@@ -715,6 +959,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
715
959
|
[11. 2.]
|
|
716
960
|
"""
|
|
717
961
|
self._init_check()
|
|
962
|
+
if self.dtype == mstype.bfloat16:
|
|
963
|
+
raise TypeError(f"For asnumpy, the type of tensor cannot be BFloat16, but got {self.dtype}.")
|
|
718
964
|
return Tensor_.asnumpy(self)
|
|
719
965
|
|
|
720
966
|
def numpy(self):
|
|
@@ -754,6 +1000,20 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
754
1000
|
"""
|
|
755
1001
|
return self.slice_num_of_persistent_data_
|
|
756
1002
|
|
|
1003
|
+
def slice_scatter(self, src, axis=0, start=None, end=None, step=1):
|
|
1004
|
+
"""
|
|
1005
|
+
For details, please refer to :func:`mindspore.ops.slice_scatter`.
|
|
1006
|
+
"""
|
|
1007
|
+
self._init_check()
|
|
1008
|
+
return tensor_operator_registry.get('slice_scatter')(self, src, axis, start, end, step)
|
|
1009
|
+
|
|
1010
|
+
def select_scatter(self, src, axis, index):
|
|
1011
|
+
"""
|
|
1012
|
+
For details, please refer to :func:`mindspore.ops.select_scatter`.
|
|
1013
|
+
"""
|
|
1014
|
+
self._init_check()
|
|
1015
|
+
return tensor_operator_registry.get('select_scatter')(self, src, axis, index)
|
|
1016
|
+
|
|
757
1017
|
def histc(self, bins=100, min=0., max=0.):
|
|
758
1018
|
"""
|
|
759
1019
|
For details, please refer to :func:`mindspore.ops.histc`.
|
|
@@ -796,6 +1056,46 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
796
1056
|
"""
|
|
797
1057
|
return self
|
|
798
1058
|
|
|
1059
|
+
def contiguous(self):
|
|
1060
|
+
"""
|
|
1061
|
+
Converts a Tensor into a continuous-memory Tensor that contains the same data as the original Tensor.
|
|
1062
|
+
|
|
1063
|
+
Returns:
|
|
1064
|
+
A contiguous in memory tensor containing the same data as self tensor.
|
|
1065
|
+
|
|
1066
|
+
Examples:
|
|
1067
|
+
>>> import mindspore as ms
|
|
1068
|
+
>>> import numpy as np
|
|
1069
|
+
>>> from mindspore import Tensor, ops
|
|
1070
|
+
>>> x = Tensor([[1, 2, 3], [4, 5, 6]], dtype=ms.float32)
|
|
1071
|
+
>>> y = ops.transpose(x, (1, 0))
|
|
1072
|
+
>>> y.contiguous()
|
|
1073
|
+
>>> y[:, 1] = 1
|
|
1074
|
+
>>> print(x)
|
|
1075
|
+
[[1. 2. 3.]
|
|
1076
|
+
[4. 5. 6.]]
|
|
1077
|
+
"""
|
|
1078
|
+
Tensor_.contiguous(self)
|
|
1079
|
+
return self
|
|
1080
|
+
|
|
1081
|
+
def is_contiguous(self):
|
|
1082
|
+
"""
|
|
1083
|
+
Determines whether the memory of tensor is contiguous.
|
|
1084
|
+
|
|
1085
|
+
Returns:
|
|
1086
|
+
Bool, True if tensor memory is contiguous, False otherwise.
|
|
1087
|
+
|
|
1088
|
+
Examples:
|
|
1089
|
+
>>> import mindspore as ms
|
|
1090
|
+
>>> import numpy as np
|
|
1091
|
+
>>> from mindspore import Tensor, ops
|
|
1092
|
+
>>> x = Tensor([[1, 2, 3], [4, 5, 6]], dtype=ms.float32)
|
|
1093
|
+
>>> y = ops.transpose(x, (1, 0))
|
|
1094
|
+
>>> print(y.is_contiguous())
|
|
1095
|
+
False
|
|
1096
|
+
"""
|
|
1097
|
+
return Tensor_.is_contiguous(self)
|
|
1098
|
+
|
|
799
1099
|
def flush_from_cache(self):
|
|
800
1100
|
"""
|
|
801
1101
|
Flush cache data to host if tensor is cache enable.
|
|
@@ -850,6 +1150,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
850
1150
|
def triu(self, diagonal=0):
|
|
851
1151
|
r"""
|
|
852
1152
|
For details, please refer to :func:`mindspore.ops.triu`.
|
|
1153
|
+
|
|
1154
|
+
.. warning::
|
|
1155
|
+
This is an experimental API that is subject to change or deletion.
|
|
1156
|
+
|
|
853
1157
|
"""
|
|
854
1158
|
self._init_check()
|
|
855
1159
|
validator.check_value_type('diagonal', diagonal, [int], 'triu')
|
|
@@ -888,9 +1192,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
888
1192
|
For details, please refer to :func:`mindspore.ops.all`.
|
|
889
1193
|
"""
|
|
890
1194
|
self._init_check()
|
|
891
|
-
|
|
892
|
-
axis = ()
|
|
893
|
-
return tensor_operator_registry.get('all')(keep_dims)(self, axis)
|
|
1195
|
+
return tensor_operator_registry.get('all')(self, axis, keep_dims)
|
|
894
1196
|
|
|
895
1197
|
def angle(self):
|
|
896
1198
|
r"""
|
|
@@ -950,7 +1252,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
950
1252
|
if len(shape) != 1:
|
|
951
1253
|
raise ValueError(f"Only one tuple is needed, but got {shape}")
|
|
952
1254
|
shape = shape[0]
|
|
953
|
-
return tensor_operator_registry.get('reshape')(
|
|
1255
|
+
return tensor_operator_registry.get('reshape')(self, shape)
|
|
954
1256
|
|
|
955
1257
|
def view_as(self, other):
|
|
956
1258
|
r"""
|
|
@@ -969,6 +1271,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
969
1271
|
``Ascend`` ``GPU`` ``CPU``
|
|
970
1272
|
|
|
971
1273
|
Examples:
|
|
1274
|
+
>>> from mindspore import Tensor
|
|
1275
|
+
>>> from mindspore import dtype as mstype
|
|
972
1276
|
>>> a = Tensor([[1, 2, 3], [2, 3, 4]], mstype.float32)
|
|
973
1277
|
>>> b = Tensor([1, 1, 1, 1, 1, 1], mstype.float32)
|
|
974
1278
|
>>> output = a.view_as(b)
|
|
@@ -1103,7 +1407,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1103
1407
|
For details, please refer to :func:`mindspore.ops.exp`.
|
|
1104
1408
|
"""
|
|
1105
1409
|
self._init_check()
|
|
1106
|
-
return tensor_operator_registry.get('exp')(
|
|
1410
|
+
return tensor_operator_registry.get('exp')(self)
|
|
1107
1411
|
|
|
1108
1412
|
def real(self):
|
|
1109
1413
|
r"""
|
|
@@ -1214,7 +1518,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1214
1518
|
For details, please refer to :func:`mindspore.ops.abs`.
|
|
1215
1519
|
"""
|
|
1216
1520
|
self._init_check()
|
|
1217
|
-
return tensor_operator_registry.get('abs')(
|
|
1521
|
+
return tensor_operator_registry.get('abs')(self)
|
|
1218
1522
|
|
|
1219
1523
|
def absolute(self):
|
|
1220
1524
|
"""
|
|
@@ -1236,6 +1540,16 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1236
1540
|
self._init_check()
|
|
1237
1541
|
return tensor_operator_registry.get('floor')(self)
|
|
1238
1542
|
|
|
1543
|
+
def floor_divide(self, other):
|
|
1544
|
+
"""
|
|
1545
|
+
For details, please refer to :func:`mindspore.ops.floor_divide`.
|
|
1546
|
+
|
|
1547
|
+
.. warning::
|
|
1548
|
+
This is an experimental API that is subject to change or deletion.
|
|
1549
|
+
"""
|
|
1550
|
+
self._init_check()
|
|
1551
|
+
return tensor_operator_registry.get('floor_divide')(self, other)
|
|
1552
|
+
|
|
1239
1553
|
def lerp(self, end, weight):
|
|
1240
1554
|
"""
|
|
1241
1555
|
For details, please refer to :func:`mindspore.ops.lerp`.
|
|
@@ -1275,7 +1589,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1275
1589
|
self._init_check()
|
|
1276
1590
|
input_x = self.copy() if self.dtype == mstype.float32 else self.astype(mstype.float16)
|
|
1277
1591
|
input_y = other.copy() if other.dtype == mstype.float32 else other.astype(mstype.float16)
|
|
1278
|
-
return tensor_operator_registry.get('__lt__')(tensor_operator_registry.get('abs')(
|
|
1592
|
+
return tensor_operator_registry.get('__lt__')(tensor_operator_registry.get('abs')(
|
|
1279
1593
|
tensor_operator_registry.get('__sub__')(input_x, input_y)
|
|
1280
1594
|
), tolerance)
|
|
1281
1595
|
|
|
@@ -1310,6 +1624,16 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1310
1624
|
self._init_check()
|
|
1311
1625
|
return tensor_operator_registry.get('logaddexp2')(self, other)
|
|
1312
1626
|
|
|
1627
|
+
def logcumsumexp(self, axis):
|
|
1628
|
+
r"""
|
|
1629
|
+
For details, please refer to :func:`mindspore.ops.logcumsumexp`.
|
|
1630
|
+
|
|
1631
|
+
.. warning::
|
|
1632
|
+
This is an experimental API that is subject to change or deletion.
|
|
1633
|
+
"""
|
|
1634
|
+
self._init_check()
|
|
1635
|
+
return tensor_operator_registry.get('logcumsumexp')(self, axis)
|
|
1636
|
+
|
|
1313
1637
|
def logsumexp(self, axis, keepdims=False):
|
|
1314
1638
|
r"""
|
|
1315
1639
|
For details, please refer to :func:`mindspore.ops.logsumexp`.
|
|
@@ -1427,9 +1751,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1427
1751
|
For details, please refer to :func:`mindspore.ops.mean`.
|
|
1428
1752
|
"""
|
|
1429
1753
|
self._init_check()
|
|
1430
|
-
|
|
1431
|
-
axis = ()
|
|
1432
|
-
return tensor_operator_registry.get('mean')(keep_dims)(self, axis)
|
|
1754
|
+
return tensor_operator_registry.get('mean')(self, axis, keep_dims)
|
|
1433
1755
|
|
|
1434
1756
|
def amin(self, axis=None, keepdims=False, *, initial=None, where=None):
|
|
1435
1757
|
"""
|
|
@@ -1456,6 +1778,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1456
1778
|
axis = ()
|
|
1457
1779
|
return tensor_operator_registry.get('amax')(self, axis, keepdims, initial=initial, where=where)
|
|
1458
1780
|
|
|
1781
|
+
def aminmax(self, *, axis=0, keepdims=False):
|
|
1782
|
+
r"""
|
|
1783
|
+
For details, please refer to :func:`mindspore.ops.aminmax`.
|
|
1784
|
+
"""
|
|
1785
|
+
self._init_check()
|
|
1786
|
+
return tensor_operator_registry.get('aminmax')(self, axis=axis, keepdims=keepdims)
|
|
1787
|
+
|
|
1459
1788
|
def reverse_sequence(self, seq_lengths, seq_dim=0, batch_dim=0):
|
|
1460
1789
|
"""
|
|
1461
1790
|
For details, please refer to :func:`mindspore.ops.reverse_sequence`.
|
|
@@ -1517,7 +1846,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1517
1846
|
"""
|
|
1518
1847
|
self._init_check()
|
|
1519
1848
|
new_shape = validator.check_reshape_shp(shape)
|
|
1520
|
-
return tensor_operator_registry.get('reshape')(
|
|
1849
|
+
return tensor_operator_registry.get('reshape')(self, new_shape)
|
|
1521
1850
|
|
|
1522
1851
|
def reshape_as(self, other):
|
|
1523
1852
|
"""
|
|
@@ -1545,7 +1874,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1545
1874
|
[ 0.5 -3.2]]
|
|
1546
1875
|
"""
|
|
1547
1876
|
self._init_check()
|
|
1548
|
-
return tensor_operator_registry.get('reshape')(
|
|
1877
|
+
return tensor_operator_registry.get('reshape')(self, other.shape)
|
|
1549
1878
|
|
|
1550
1879
|
def ravel(self):
|
|
1551
1880
|
"""
|
|
@@ -1571,7 +1900,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1571
1900
|
(24,)
|
|
1572
1901
|
"""
|
|
1573
1902
|
self._init_check()
|
|
1574
|
-
reshape_op = tensor_operator_registry.get('reshape')
|
|
1903
|
+
reshape_op = tensor_operator_registry.get('reshape')
|
|
1575
1904
|
return reshape_op(self, (-1,))
|
|
1576
1905
|
|
|
1577
1906
|
def round(self):
|
|
@@ -1609,6 +1938,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1609
1938
|
self._init_check()
|
|
1610
1939
|
return tensor_operator_registry.get('dot')(self, other)
|
|
1611
1940
|
|
|
1941
|
+
def outer(self, vec2):
|
|
1942
|
+
r"""
|
|
1943
|
+
For details, please refer to :func:`mindspore.ops.outer`.
|
|
1944
|
+
"""
|
|
1945
|
+
self._init_check()
|
|
1946
|
+
return tensor_operator_registry.get('outer')(self, vec2)
|
|
1947
|
+
|
|
1612
1948
|
def rad2deg(self):
|
|
1613
1949
|
r"""
|
|
1614
1950
|
For details, please refer to :func:`mindspore.ops.rad2deg`.
|
|
@@ -1673,6 +2009,20 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1673
2009
|
self._init_check()
|
|
1674
2010
|
return tensor_operator_registry.get('float_power')(self, other)
|
|
1675
2011
|
|
|
2012
|
+
def fmax(self, other):
|
|
2013
|
+
r"""
|
|
2014
|
+
For details, please refer to :func:`mindspore.ops.fmax`.
|
|
2015
|
+
"""
|
|
2016
|
+
self._init_check()
|
|
2017
|
+
return tensor_operator_registry.get('fmax')(self, other)
|
|
2018
|
+
|
|
2019
|
+
def fmin(self, other):
|
|
2020
|
+
r"""
|
|
2021
|
+
For details, please refer to :func:`mindspore.ops.fmin`.
|
|
2022
|
+
"""
|
|
2023
|
+
self._init_check()
|
|
2024
|
+
return tensor_operator_registry.get('fmin')(self, other)
|
|
2025
|
+
|
|
1676
2026
|
def fmod(self, other):
|
|
1677
2027
|
r"""
|
|
1678
2028
|
For details, please refer to :func:`mindspore.ops.fmod`.
|
|
@@ -1706,10 +2056,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1706
2056
|
For details, please refer to :func:`mindspore.ops.squeeze`.
|
|
1707
2057
|
"""
|
|
1708
2058
|
self._init_check()
|
|
1709
|
-
|
|
1710
|
-
return tensor_operator_registry.get('squeeze')(self)
|
|
1711
|
-
new_shape = validator.prepare_shape_for_squeeze(self.shape, axis)
|
|
1712
|
-
return tensor_operator_registry.get('reshape')()(self, new_shape)
|
|
2059
|
+
return tensor_operator_registry.get('squeeze')(self, axis)
|
|
1713
2060
|
|
|
1714
2061
|
def slogdet(self):
|
|
1715
2062
|
"""
|
|
@@ -1751,8 +2098,8 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1751
2098
|
dtype (Union[:class:`mindspore.dtype`, numpy.dtype, str]): Designated tensor dtype, can be in
|
|
1752
2099
|
format of `mindspore.dtype.float32` or `numpy.float32` or `float32`.
|
|
1753
2100
|
copy (bool, optional): By default, astype always returns a newly allocated
|
|
1754
|
-
tensor. If this is set to false, the input tensor is returned instead
|
|
1755
|
-
of a copy. Default:
|
|
2101
|
+
tensor. If this is set to ``false`` , the input tensor is returned instead
|
|
2102
|
+
of a copy. Default: ``True`` .
|
|
1756
2103
|
|
|
1757
2104
|
Returns:
|
|
1758
2105
|
Tensor, with the designated dtype.
|
|
@@ -1781,37 +2128,16 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1781
2128
|
"""
|
|
1782
2129
|
For details, please refer to :func:`mindspore.ops.argmax`.
|
|
1783
2130
|
"""
|
|
1784
|
-
|
|
1785
|
-
|
|
1786
|
-
a = self
|
|
1787
|
-
is_axis_none = False
|
|
1788
|
-
if axis is None:
|
|
1789
|
-
a = a.ravel()
|
|
1790
|
-
axis = 0
|
|
1791
|
-
is_axis_none = True
|
|
1792
|
-
out = tensor_operator_registry.get('argmax')(axis, mstype.int64)(a)
|
|
1793
|
-
if keepdims and not is_axis_none:
|
|
1794
|
-
out = out.expand_dims(axis)
|
|
2131
|
+
self._init_check()
|
|
2132
|
+
out = tensor_operator_registry.get('argmax')(self, axis, keepdims)
|
|
1795
2133
|
return out
|
|
1796
2134
|
|
|
1797
2135
|
def argmin(self, axis=None, keepdims=False):
|
|
1798
2136
|
"""
|
|
1799
2137
|
For details, please refer to :func:`mindspore.ops.argmin`.
|
|
1800
2138
|
"""
|
|
1801
|
-
|
|
1802
|
-
|
|
1803
|
-
# P.Argmin only supports float
|
|
1804
|
-
is_axis_none = False
|
|
1805
|
-
a = self.astype(mstype.float32)
|
|
1806
|
-
if axis is None:
|
|
1807
|
-
a = a.ravel()
|
|
1808
|
-
axis = 0
|
|
1809
|
-
else:
|
|
1810
|
-
axis = validator.check_axis_in_range(axis, a.ndim)
|
|
1811
|
-
# P.Argmin is currently not supported
|
|
1812
|
-
out = tensor_operator_registry.get('argmin')(axis)(a)
|
|
1813
|
-
if keepdims and not is_axis_none:
|
|
1814
|
-
out = out.expand_dims(axis)
|
|
2139
|
+
self._init_check()
|
|
2140
|
+
out = tensor_operator_registry.get('argmin')(self, axis, keepdims)
|
|
1815
2141
|
return out
|
|
1816
2142
|
|
|
1817
2143
|
def argmax_with_value(self, axis=0, keep_dims=False):
|
|
@@ -1821,23 +2147,21 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1821
2147
|
Compute the max value of input Tensor on the specified axis, and return the max value and index.
|
|
1822
2148
|
|
|
1823
2149
|
Note:
|
|
1824
|
-
In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
|
|
1825
|
-
|
|
1826
|
-
.. warning::
|
|
2150
|
+
- In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
|
|
1827
2151
|
- If there are multiple maximum values, the index of the first maximum value is used.
|
|
1828
2152
|
- The value range of `axis` is [-dims, dims - 1]. `dims` is the dimension length of this tensor.
|
|
1829
2153
|
|
|
1830
2154
|
Args:
|
|
1831
|
-
axis (int): The dimension to reduce. Default: 0.
|
|
1832
|
-
keep_dims (bool): Whether to reduce dimension, if true the output will keep the same dimension as the
|
|
1833
|
-
the output will reduce dimension if false. Default: False.
|
|
2155
|
+
axis (int): The dimension to reduce. Default: ``0`` .
|
|
2156
|
+
keep_dims (bool): Whether to reduce dimension, if ``true`` the output will keep the same dimension as the
|
|
2157
|
+
input, the output will reduce dimension if ``false`` . Default: ``False`` .
|
|
1834
2158
|
|
|
1835
2159
|
Returns:
|
|
1836
2160
|
tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the maximum value of the input
|
|
1837
2161
|
tensor.
|
|
1838
2162
|
|
|
1839
2163
|
- **index** (Tensor) - The index for the maximum value of the input tensor.
|
|
1840
|
-
If `keep_dims` is true, the shape of
|
|
2164
|
+
If `keep_dims` is ``true`` , the shape of
|
|
1841
2165
|
output tensors is :math:`(x_1, x_2, ..., x_{axis-1}, 1, x_{axis+1}, ..., x_N)`. Otherwise, the shape is
|
|
1842
2166
|
:math:`(x_1, x_2, ..., x_{axis-1}, x_{axis+1}, ..., x_N)` .
|
|
1843
2167
|
- **value** (Tensor) - The maximum value of input tensor, with the same shape as index.
|
|
@@ -1850,6 +2174,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1850
2174
|
``Ascend`` ``GPU`` ``CPU``
|
|
1851
2175
|
|
|
1852
2176
|
Examples:
|
|
2177
|
+
>>> import numpy as np
|
|
2178
|
+
>>> import mindspore
|
|
2179
|
+
>>> from mindspore import Tensor
|
|
1853
2180
|
>>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
|
|
1854
2181
|
>>> output, index = x.argmax_with_value()
|
|
1855
2182
|
>>> print(output, index)
|
|
@@ -1868,16 +2195,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1868
2195
|
Returns the minimum value with corresponding index.
|
|
1869
2196
|
|
|
1870
2197
|
Note:
|
|
1871
|
-
In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
|
|
1872
|
-
|
|
1873
|
-
.. warning::
|
|
2198
|
+
- In auto_parallel and semi_auto_parallel mode, the first output index can not be used.
|
|
1874
2199
|
- If there are multiple minimum values, the index of the first minimum value is used.
|
|
1875
2200
|
- The value range of `axis` is [-dims, dims - 1]. `dims` is the dimension length of this tensor.
|
|
1876
2201
|
|
|
1877
2202
|
Args:
|
|
1878
2203
|
axis (int): The dimension to reduce. Default: 0.
|
|
1879
2204
|
keep_dims (bool): Whether to reduce dimension, if true the output will keep the same dimension as the input,
|
|
1880
|
-
the output will reduce dimension if false. Default: False
|
|
2205
|
+
the output will reduce dimension if false. Default: ``False``.
|
|
1881
2206
|
|
|
1882
2207
|
Returns:
|
|
1883
2208
|
tuple (Tensor), tuple of 2 tensors, containing the corresponding index and the minimum value of the input
|
|
@@ -1897,9 +2222,12 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1897
2222
|
``Ascend`` ``GPU`` ``CPU``
|
|
1898
2223
|
|
|
1899
2224
|
Examples:
|
|
2225
|
+
>>> import numpy as np
|
|
2226
|
+
>>> import mindspore
|
|
2227
|
+
>>> from mindspore import Tensor
|
|
1900
2228
|
>>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
|
|
1901
2229
|
>>> output, index = x.argmin_with_value()
|
|
1902
|
-
>>> print(output, index
|
|
2230
|
+
>>> print(output, index)
|
|
1903
2231
|
0.0 0
|
|
1904
2232
|
>>> output, index = x.argmin_with_value(keep_dims=True)
|
|
1905
2233
|
>>> print(output, index)
|
|
@@ -1999,29 +2327,33 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
1999
2327
|
"""
|
|
2000
2328
|
Return the maximum of a tensor or maximum along an axis.
|
|
2001
2329
|
|
|
2330
|
+
Note:
|
|
2331
|
+
When `axis` is ``None``, `keepdims` and subsequent parameters
|
|
2332
|
+
have no effect. At the same time, the index is fixed to return 0.
|
|
2333
|
+
|
|
2002
2334
|
Args:
|
|
2003
2335
|
axis (Union[None, int, list, tuple of ints], optional): Axis or
|
|
2004
2336
|
axes along which to operate. By default, flattened input is used. If
|
|
2005
2337
|
this is a tuple of ints, the maximum is selected over multiple axes,
|
|
2006
|
-
instead of a single axis or all the axes as before. Default: None.
|
|
2338
|
+
instead of a single axis or all the axes as before. Default: ``None`` .
|
|
2007
2339
|
keepdims (bool, optional):
|
|
2008
|
-
If this is set to True, the axes which are reduced are left in the
|
|
2340
|
+
If this is set to ``True`` , the axes which are reduced are left in the
|
|
2009
2341
|
result as dimensions with size one. With this option, the result will
|
|
2010
|
-
broadcast correctly against the input array. Default: False.
|
|
2342
|
+
broadcast correctly against the input array. Default: ``False`` .
|
|
2011
2343
|
|
|
2012
2344
|
Keyword Args:
|
|
2013
2345
|
initial (scalar, optional):
|
|
2014
2346
|
The minimum value of an output element. Must be present to allow
|
|
2015
|
-
computation on empty slice. Default: None.
|
|
2016
|
-
where (Tensor
|
|
2347
|
+
computation on empty slice. Default: ``None`` .
|
|
2348
|
+
where (bool Tensor, optional):
|
|
2017
2349
|
A boolean tensor which is broadcasted to match the dimensions of array,
|
|
2018
2350
|
and selects elements to include in the reduction. If non-default value
|
|
2019
|
-
is passed, initial must also be provided. Default: True.
|
|
2020
|
-
return_indices (bool, optional): Whether to return the index of the maximum value.
|
|
2021
|
-
If `axis` is a list or tuple of ints, it must be False.
|
|
2351
|
+
is passed, initial must also be provided. Default: ``True`` .
|
|
2352
|
+
return_indices (bool, optional): Whether to return the index of the maximum value.
|
|
2353
|
+
Default: ``False`` . If `axis` is a list or tuple of ints, it must be ``False`` .
|
|
2022
2354
|
|
|
2023
2355
|
Returns:
|
|
2024
|
-
Tensor or scalar, maximum of input tensor. If `axis` is None, the result is a scalar
|
|
2356
|
+
Tensor or scalar, maximum of input tensor. If `axis` is ``None`` , the result is a scalar
|
|
2025
2357
|
value. If `axis` is given, the result is a tensor of dimension ``self.ndim - 1``.
|
|
2026
2358
|
|
|
2027
2359
|
Raises:
|
|
@@ -2066,29 +2398,33 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2066
2398
|
"""
|
|
2067
2399
|
Return the minimum of a tensor or minimum along an axis.
|
|
2068
2400
|
|
|
2401
|
+
Note:
|
|
2402
|
+
When `axis` is ``None``, `keepdims` and subsequent parameters
|
|
2403
|
+
have no effect. At the same time, the index is fixed to return 0.
|
|
2404
|
+
|
|
2069
2405
|
Args:
|
|
2070
2406
|
axis (Union[None, int, list, tuple of ints], optional): An axis or
|
|
2071
2407
|
axes along which to operate. By default, flattened input is used. If
|
|
2072
2408
|
`axis` is a tuple of ints, the minimum is selected over multiple axes,
|
|
2073
|
-
instead of a single axis or all the axes as before. Default: None.
|
|
2409
|
+
instead of a single axis or all the axes as before. Default: ``None`` .
|
|
2074
2410
|
keepdims (bool, optional):
|
|
2075
|
-
If True, the axes which are reduced are left in the
|
|
2411
|
+
If ``True`` , the axes which are reduced are left in the
|
|
2076
2412
|
result as dimensions with size one. With this option, the result will
|
|
2077
|
-
broadcast correctly against the input array. Default: False.
|
|
2413
|
+
broadcast correctly against the input array. Default: ``False`` .
|
|
2078
2414
|
|
|
2079
2415
|
Keyword Args:
|
|
2080
2416
|
initial (scalar, optional):
|
|
2081
2417
|
The minimum value of an output element. Must be present to allow
|
|
2082
|
-
computation on empty slice. Default: None.
|
|
2083
|
-
where (bool
|
|
2418
|
+
computation on empty slice. Default: ``None`` .
|
|
2419
|
+
where (Tensor[bool], optional):
|
|
2084
2420
|
A boolean tensor which is broadcasted to match the dimensions of array,
|
|
2085
2421
|
and selects elements to include in the reduction. If non-default value
|
|
2086
|
-
is passed, initial must also be provided. Default: True.
|
|
2087
|
-
return_indices (bool, optional): Whether to return the index of the minimum value. Default: False.
|
|
2088
|
-
If `axis` is a list or tuple of ints, it must be False.
|
|
2422
|
+
is passed, initial must also be provided. Default: ``True`` .
|
|
2423
|
+
return_indices (bool, optional): Whether to return the index of the minimum value. Default: ``False`` .
|
|
2424
|
+
If `axis` is a list or tuple of ints, it must be ``False`` .
|
|
2089
2425
|
|
|
2090
2426
|
Returns:
|
|
2091
|
-
Tensor or scalar, minimum of input tensor. If `axis` is None, the result is a scalar
|
|
2427
|
+
Tensor or scalar, minimum of input tensor. If `axis` is ``None`` , the result is a scalar
|
|
2092
2428
|
value. If `axis` is given, the result is a tensor of dimension ``self.ndim - 1``.
|
|
2093
2429
|
|
|
2094
2430
|
Raises:
|
|
@@ -2211,15 +2547,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2211
2547
|
"""
|
|
2212
2548
|
`Tensor.fill` is deprecated, please use `ops.fill` instead.
|
|
2213
2549
|
"""
|
|
2550
|
+
self._init_check()
|
|
2214
2551
|
if value is None:
|
|
2215
2552
|
if self.dtype not in (mstype.float16, mstype.float32, mstype.float64):
|
|
2216
2553
|
raise TypeError("For 'Tensor.fill', if the argument 'value' is None, the type of the original "
|
|
2217
2554
|
"tensor must be float, but got {}.".format(self.dtype))
|
|
2218
2555
|
value = Tensor(float('nan')).astype("float32")
|
|
2219
2556
|
return tensor_operator_registry.get("tile")()(value, self.shape).astype(self.dtype)
|
|
2220
|
-
if not isinstance(value, (int, float, bool)):
|
|
2221
|
-
raise TypeError("For 'Tensor.fill', the type of the argument 'value' must be int, float or bool, "
|
|
2222
|
-
"but got {}.".format(type(value)))
|
|
2223
2557
|
return tensor_operator_registry.get("fill")(self.dtype, self.shape, value)
|
|
2224
2558
|
|
|
2225
2559
|
def fills(self, value):
|
|
@@ -2229,6 +2563,48 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2229
2563
|
self._init_check()
|
|
2230
2564
|
return tensor_operator_registry.get('fills')(self, value)
|
|
2231
2565
|
|
|
2566
|
+
def fill_diagonal(self, fill_value, wrap=False):
|
|
2567
|
+
"""
|
|
2568
|
+
Fills the main diagonal of a Tensor with a specified value and returns the result.
|
|
2569
|
+
The input has at least 2 dimensions, and all dimensions of input must be equal in length
|
|
2570
|
+
when the dimension of input is greater than 2.
|
|
2571
|
+
|
|
2572
|
+
.. warning::
|
|
2573
|
+
This is an experimental API that is subject to change or deletion.
|
|
2574
|
+
|
|
2575
|
+
Args:
|
|
2576
|
+
fill_value (float): The value to fill with the diagonal of `self`.
|
|
2577
|
+
wrap (bool, optional): Controls whether the diagonal elements continue onto the
|
|
2578
|
+
remaining rows in case of a tall matrix(a matrix has more rows than columns). Default: ``False``.
|
|
2579
|
+
|
|
2580
|
+
Returns:
|
|
2581
|
+
- **y** (Tensor) - Tensor, has the same shape and data type as `self`.
|
|
2582
|
+
|
|
2583
|
+
Raises:
|
|
2584
|
+
TypeError: If data type of `self` is not one of the following: float32, int32, int64.
|
|
2585
|
+
ValueError: If the dimension of `self` is not greater than 1.
|
|
2586
|
+
ValueError: If the size of each dimension is not equal, when the dimension is greater than 2.
|
|
2587
|
+
|
|
2588
|
+
Supported Platforms:
|
|
2589
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2590
|
+
|
|
2591
|
+
Examples:
|
|
2592
|
+
>>> import numpy as np
|
|
2593
|
+
>>> import mindspore
|
|
2594
|
+
>>> from mindspore import Tensor
|
|
2595
|
+
>>> x = Tensor(np.ones((6, 3)), mindspore.float32)
|
|
2596
|
+
>>> output = x.fill_diagonal(5.0, wrap=True)
|
|
2597
|
+
>>> print(output)
|
|
2598
|
+
[[5. 1. 1.]
|
|
2599
|
+
[1. 5. 1.]
|
|
2600
|
+
[1. 1. 5.]
|
|
2601
|
+
[1. 1. 1.]
|
|
2602
|
+
[5. 1. 1.]
|
|
2603
|
+
[1. 5. 1.]]
|
|
2604
|
+
"""
|
|
2605
|
+
self._init_check()
|
|
2606
|
+
return tensor_operator_registry.get('fill_diagonal')(fill_value, wrap)(self)
|
|
2607
|
+
|
|
2232
2608
|
def masked_fill(self, mask, value):
|
|
2233
2609
|
"""
|
|
2234
2610
|
For details, please refer to :func:`mindspore.ops.masked_fill`.
|
|
@@ -2252,10 +2628,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2252
2628
|
|
|
2253
2629
|
Args:
|
|
2254
2630
|
axis (Union[None, int, tuple(int)]): Axis or axes along which the range is computed.
|
|
2255
|
-
The default is to compute the variance of the flattened tensor. Default: None.
|
|
2256
|
-
keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
|
|
2631
|
+
The default is to compute the variance of the flattened tensor. Default: ``None`` .
|
|
2632
|
+
keepdims (bool): If this is set to ``True`` , the axes which are reduced are left in the result as
|
|
2257
2633
|
dimensions with size one. With this option, the result will broadcast correctly against the tensor.
|
|
2258
|
-
Default is False.
|
|
2634
|
+
Default is ``False`` .
|
|
2259
2635
|
|
|
2260
2636
|
Returns:
|
|
2261
2637
|
Tensor.
|
|
@@ -2319,10 +2695,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2319
2695
|
Args:
|
|
2320
2696
|
slice_index (int): Slice index of a parameter's slices.
|
|
2321
2697
|
It is used when initialize a slice of a parameter, it guarantees that devices
|
|
2322
|
-
using the same slice can generate the same tensor. Default: None
|
|
2323
|
-
shape (list[int]): Shape of the slice, it is used when initialize a slice of the parameter.
|
|
2698
|
+
using the same slice can generate the same tensor. Default: ``None``.
|
|
2699
|
+
shape (list[int]): Shape of the slice, it is used when initialize a slice of the parameter.
|
|
2700
|
+
Default: ``None``.
|
|
2324
2701
|
opt_shard_group(str): Optimizer shard group which is used in auto or semi auto parallel mode
|
|
2325
|
-
to get one shard of a parameter's slice.
|
|
2702
|
+
to get one shard of a parameter's slice. For more information about optimizer parallel, please refer to:
|
|
2703
|
+
`Optimizer Parallel
|
|
2704
|
+
<https://www.mindspore.cn/tutorials/experts/en/r2.2/parallel/optimizer_parallel.html>`_.
|
|
2705
|
+
Default: ``None``.
|
|
2326
2706
|
|
|
2327
2707
|
Returns:
|
|
2328
2708
|
Initialized Tensor.
|
|
@@ -2391,14 +2771,20 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2391
2771
|
self.init.seed, _ = self.seed
|
|
2392
2772
|
|
|
2393
2773
|
with seed_context(self.init):
|
|
2394
|
-
|
|
2774
|
+
if slice_num_of_persistent_data == 1:
|
|
2775
|
+
self.init(data)
|
|
2395
2776
|
self.init = None
|
|
2396
2777
|
|
|
2397
2778
|
# At embedding cache scenes. When size of tensor is out of range, we store data to persistent storage
|
|
2398
2779
|
if slice_num_of_persistent_data > 1:
|
|
2399
2780
|
self.assign_value(Tensor_.persistent_data_from_numpy(data, slice_num_of_persistent_data))
|
|
2400
2781
|
else:
|
|
2401
|
-
self.
|
|
2782
|
+
if self.dtype == mstype.bfloat16:
|
|
2783
|
+
# The dtype of data is np.float32 when mstype is bfloat16,
|
|
2784
|
+
# so we create tensor_ by init func instead of asnumpy
|
|
2785
|
+
self.assign_value(Tensor_(data, self.dtype))
|
|
2786
|
+
else:
|
|
2787
|
+
self.assign_value(Tensor_.from_numpy(data))
|
|
2402
2788
|
return self
|
|
2403
2789
|
|
|
2404
2790
|
def resize(self, *new_shape):
|
|
@@ -2518,54 +2904,15 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2518
2904
|
"""
|
|
2519
2905
|
For details, please refer to :func:`mindspore.ops.diagonal`.
|
|
2520
2906
|
"""
|
|
2521
|
-
|
|
2522
|
-
|
|
2523
|
-
raise ValueError("For 'Tensor.diagonal', the original tensor requires at least two dimensions, "
|
|
2524
|
-
"but got {}.".format(ndim))
|
|
2525
|
-
dtype = self.dtype
|
|
2907
|
+
self._init_check()
|
|
2908
|
+
return tensor_operator_registry.get('diagonal')(self, offset, axis1, axis2)
|
|
2526
2909
|
|
|
2527
|
-
|
|
2528
|
-
|
|
2529
|
-
|
|
2530
|
-
|
|
2531
|
-
|
|
2532
|
-
|
|
2533
|
-
a = self.transpose(perm)
|
|
2534
|
-
|
|
2535
|
-
shape = a.shape
|
|
2536
|
-
n, m = shape[-2:]
|
|
2537
|
-
|
|
2538
|
-
e = tensor_operator_registry.get('eye')(n, m, dtype)
|
|
2539
|
-
if offset >= m or offset <= -n:
|
|
2540
|
-
e = tensor_operator_registry.get('fill')(dtype, (n, m), 0)
|
|
2541
|
-
elif offset != 0:
|
|
2542
|
-
e = e.astype(mstype.float32)
|
|
2543
|
-
if offset > 0:
|
|
2544
|
-
e_left = tensor_operator_registry.get('fill')(mstype.float32, (n, offset), 0)
|
|
2545
|
-
e_right = e[..., 0:m - offset:1]
|
|
2546
|
-
e = tensor_operator_registry.get('concatenate')(1)((e_left, e_right)).astype(dtype)
|
|
2547
|
-
elif offset < 0:
|
|
2548
|
-
e_upper = tensor_operator_registry.get('fill')(mstype.float32, (-offset, m), 0)
|
|
2549
|
-
e_lower = e[0:n + offset:1, ...]
|
|
2550
|
-
e = tensor_operator_registry.get('concatenate')(0)((e_upper, e_lower)).astype(dtype)
|
|
2551
|
-
e = tensor_operator_registry.get('broadcast_to')(shape)(e)
|
|
2552
|
-
|
|
2553
|
-
prod = tensor_operator_registry.get('__mul__')(a, e)
|
|
2554
|
-
res = tensor_operator_registry.get('reduce_sum')(prod.astype(mstype.float32), -1)
|
|
2555
|
-
|
|
2556
|
-
begin = ()
|
|
2557
|
-
for _ in range(ndim - 2):
|
|
2558
|
-
begin += (0,)
|
|
2559
|
-
last_dim_begin = max(0, -offset)
|
|
2560
|
-
begin += (last_dim_begin,)
|
|
2561
|
-
size = res.shape[:-1]
|
|
2562
|
-
last_dim_end = min(
|
|
2563
|
-
shape[-2], max(0, shape[-1] - offset)) - last_dim_begin
|
|
2564
|
-
if last_dim_end <= 0:
|
|
2565
|
-
return Tensor([])
|
|
2566
|
-
size += (last_dim_end,)
|
|
2567
|
-
res = tensor_operator_registry.get('tensor_slice')(res, begin, size)
|
|
2568
|
-
return res.astype(dtype)
|
|
2910
|
+
def diagonal_scatter(self, src, offset=0, dim1=0, dim2=1):
|
|
2911
|
+
r"""
|
|
2912
|
+
For details, please refer to :func:`mindspore.ops.diagonal_scatter`.
|
|
2913
|
+
"""
|
|
2914
|
+
self._init_check()
|
|
2915
|
+
return tensor_operator_registry.get('diagonal_scatter')(self, src, offset, dim1, dim2)
|
|
2569
2916
|
|
|
2570
2917
|
def trace(self, offset=0, axis1=0, axis2=1, dtype=None):
|
|
2571
2918
|
"""
|
|
@@ -2619,20 +2966,20 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2619
2966
|
Takes elements from a tensor along an axis.
|
|
2620
2967
|
|
|
2621
2968
|
Args:
|
|
2622
|
-
indices (Tensor): The indices with shape
|
|
2969
|
+
indices (Tensor): The indices with shape :math:`(Nj...)` of the values to extract.
|
|
2623
2970
|
axis (int, optional): The axis over which to select values. By default,
|
|
2624
|
-
the flattened input tensor is used. Default:
|
|
2625
|
-
mode ('raise'
|
|
2971
|
+
the flattened input tensor is used. Default: ``None`` .
|
|
2972
|
+
mode (str, optional): Support ``'raise'``, ``'wrap'``, ``'clip'``.
|
|
2626
2973
|
|
|
2627
|
-
- raise
|
|
2974
|
+
- ``raise``: Raises an error;
|
|
2628
2975
|
|
|
2629
|
-
- wrap
|
|
2976
|
+
- ``wrap``: Wraps around;
|
|
2630
2977
|
|
|
2631
|
-
- clip
|
|
2978
|
+
- ``clip``: Clips to the range. ``'clip'`` mode means that all indices that are
|
|
2632
2979
|
too large are replaced by the index that addresses the last element
|
|
2633
2980
|
along that axis. Note that this disables indexing with negative numbers.
|
|
2634
2981
|
|
|
2635
|
-
Default: 'clip'.
|
|
2982
|
+
Default: ``'clip'`` .
|
|
2636
2983
|
|
|
2637
2984
|
Returns:
|
|
2638
2985
|
Tensor, the indexed result.
|
|
@@ -2690,17 +3037,17 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2690
3037
|
`choices` must be broadcasted to the same shape. If `choices` is itself a tensor,
|
|
2691
3038
|
then its outermost dimension (i.e., the one corresponding to ``choices.shape[0]``)
|
|
2692
3039
|
is taken as defining the "sequence".
|
|
2693
|
-
mode (
|
|
2694
|
-
``[0, n-1]`` will be treated
|
|
3040
|
+
mode (str, optional): Specifies how indices outside
|
|
3041
|
+
``[0, n-1]`` will be treated. Support ``'raise'``, ``'wrap'``, ``'clip'``.
|
|
2695
3042
|
|
|
2696
|
-
- raise
|
|
3043
|
+
- ``raise``: Raises an error;
|
|
2697
3044
|
|
|
2698
|
-
- wrap
|
|
3045
|
+
- ``wrap``: Wraps around;
|
|
2699
3046
|
|
|
2700
|
-
- clip
|
|
2701
|
-
Note that this disables indexing with negative numbers.
|
|
3047
|
+
- ``clip``: Clips to the range. The values greater than n-1 will be mapped to n-1.
|
|
3048
|
+
Note that this mode disables indexing with negative numbers.
|
|
2702
3049
|
|
|
2703
|
-
Default: 'clip'
|
|
3050
|
+
Default: ``'clip'``.
|
|
2704
3051
|
|
|
2705
3052
|
Returns:
|
|
2706
3053
|
Tensor, the merged result.
|
|
@@ -2768,10 +3115,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2768
3115
|
side (str, optional): If 'left', the index of the first suitable
|
|
2769
3116
|
location found is given. If 'right', return the last such index. If there is
|
|
2770
3117
|
no suitable index, return either 0 or N (where N is the length of the tensor).
|
|
2771
|
-
Default:
|
|
3118
|
+
Default: ``left`` .
|
|
2772
3119
|
sorter (Union[int, float, bool, list, tuple, Tensor]): 1-D optional tensor of
|
|
2773
3120
|
integer indices that sort the tensor into ascending order. They are typically
|
|
2774
|
-
the result of argsort. Default: None.
|
|
3121
|
+
the result of argsort. Default: ``None`` .
|
|
2775
3122
|
|
|
2776
3123
|
Returns:
|
|
2777
3124
|
Tensor, array of insertion points with the same shape as `v`.
|
|
@@ -2850,10 +3197,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2850
3197
|
|
|
2851
3198
|
Args:
|
|
2852
3199
|
axis (Union[None, int, tuple(int)]): Axis or axes along which the variance is computed.
|
|
2853
|
-
The default is to compute the variance of the flattened array. Default:
|
|
2854
|
-
ddof (int): Means Delta Degrees of Freedom. Default: 0.
|
|
3200
|
+
The default is to compute the variance of the flattened array. Default: ``None`` .
|
|
3201
|
+
ddof (int): Means Delta Degrees of Freedom. Default: ``0`` .
|
|
2855
3202
|
The divisor used in calculations is :math:`N - ddof`, where :math:`N` represents the number of elements.
|
|
2856
|
-
keepdims (bool): Default:
|
|
3203
|
+
keepdims (bool): Default: ``False`` .
|
|
2857
3204
|
|
|
2858
3205
|
Returns:
|
|
2859
3206
|
Variance tensor.
|
|
@@ -2887,10 +3234,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2887
3234
|
axis = ()
|
|
2888
3235
|
else:
|
|
2889
3236
|
axis = validator.check_and_canonicalize_axes(axis, self.ndim)
|
|
2890
|
-
x_mean = tensor_operator_registry.get('mean')(
|
|
3237
|
+
x_mean = tensor_operator_registry.get('mean')(self, axis, True)
|
|
2891
3238
|
x_sub = tensor_operator_registry.get('__sub__')(self, x_mean)
|
|
2892
3239
|
x_pow = tensor_operator_registry.get('__pow__')(x_sub, 2)
|
|
2893
|
-
x_sum = tensor_operator_registry.get('
|
|
3240
|
+
x_sum = tensor_operator_registry.get('reducesum')(bool(keepdims))(x_pow, axis)
|
|
2894
3241
|
nums = 1
|
|
2895
3242
|
if axis == ():
|
|
2896
3243
|
nums = self.size
|
|
@@ -2915,26 +3262,27 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2915
3262
|
`extobj` are not supported.
|
|
2916
3263
|
|
|
2917
3264
|
Args:
|
|
2918
|
-
axis (Union[None, int, tuple(int)]): Axis or axes along which a sum is performed.
|
|
3265
|
+
axis (Union[None, int, tuple(int), list(int)]): Axis or axes along which a sum is performed.
|
|
3266
|
+
Default: ``None`` .
|
|
2919
3267
|
If None, sum all the elements of the input tensor.
|
|
2920
3268
|
If the axis is negative, it counts from the last to the first axis.
|
|
2921
|
-
If the axis is a tuple of ints, a sum is performed on all the axes specified in the tuple
|
|
2922
|
-
instead of a single axis or all the axes as before.
|
|
2923
|
-
dtype (:class:`mindspore.dtype`, optional): defaults to None. Overrides the dtype of the
|
|
3269
|
+
If the axis is a tuple or list of ints, a sum is performed on all the axes specified in the tuple
|
|
3270
|
+
or list instead of a single axis or all the axes as before.
|
|
3271
|
+
dtype (:class:`mindspore.dtype`, optional): defaults to ``None`` . Overrides the dtype of the
|
|
2924
3272
|
output Tensor.
|
|
2925
|
-
keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
|
|
2926
|
-
dimensions with size one. With this option, the result will broadcast correctly against the input
|
|
2927
|
-
If the default value is passed, then keepdims will not be passed through to the sum method
|
|
2928
|
-
sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
|
|
2929
|
-
implement keepdims any exceptions will be raised. Default:
|
|
2930
|
-
initial (scalar): Starting value for the sum. Default:
|
|
3273
|
+
keepdims (bool): If this is set to ``True`` , the axes which are reduced are left in the result as
|
|
3274
|
+
dimensions with size one. With this option, the result will broadcast correctly against the input
|
|
3275
|
+
array. If the default value is passed, then keepdims will not be passed through to the sum method
|
|
3276
|
+
of sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
|
|
3277
|
+
implement keepdims any exceptions will be raised. Default: ``False`` .
|
|
3278
|
+
initial (scalar): Starting value for the sum. Default: ``None`` .
|
|
2931
3279
|
|
|
2932
3280
|
Returns:
|
|
2933
3281
|
Tensor. A tensor with the same shape as input, with the specified axis removed.
|
|
2934
|
-
If the input tensor is a 0-d array, or if the axis is None, a scalar is returned.
|
|
3282
|
+
If the input tensor is a 0-d array, or if the axis is ``None`` , a scalar is returned.
|
|
2935
3283
|
|
|
2936
3284
|
Raises:
|
|
2937
|
-
TypeError: If input is not array_like, or `axis` is not int or
|
|
3285
|
+
TypeError: If input is not array_like, or `axis` is not int, tuple of ints or list of ints,
|
|
2938
3286
|
or `keepdims` is not integer, or `initial` is not scalar.
|
|
2939
3287
|
ValueError: If any axis is out of range or duplicate axes exist.
|
|
2940
3288
|
|
|
@@ -2954,27 +3302,14 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2954
3302
|
>>> print(input_x.sum(axis=1))
|
|
2955
3303
|
[10. 35.]
|
|
2956
3304
|
"""
|
|
2957
|
-
input_x = self.astype(mstype.int32) if self.dtype == mstype.bool_ else self
|
|
2958
|
-
dtype = input_x.dtype if dtype is None else dtype
|
|
2959
|
-
if not isinstance(keepdims, int):
|
|
2960
|
-
raise TypeError("For 'Tensor.sum', the type of the argument 'keepdims' must be int, but "
|
|
2961
|
-
"got {}.".format(type(keepdims)))
|
|
2962
3305
|
if initial is not None and not isinstance(initial, (int, float, bool)):
|
|
2963
|
-
raise TypeError("For
|
|
2964
|
-
|
|
2965
|
-
if axis is None:
|
|
2966
|
-
axis = ()
|
|
2967
|
-
else:
|
|
2968
|
-
axis = validator.check_and_canonicalize_axes(axis, self.ndim)
|
|
2969
|
-
|
|
2970
|
-
if not validator.check_type_support(input_x.dtype, 'GPU', (mstype.float64, mstype.float32, mstype.float16)):
|
|
2971
|
-
input_x = input_x.astype(mstype.float32)
|
|
2972
|
-
if 0 in self.shape:
|
|
2973
|
-
input_x = tensor_operator_registry.get('make_tensor')([0], self.dtype)
|
|
2974
|
-
res = tensor_operator_registry.get('sum')(bool(keepdims))(input_x, axis)
|
|
3306
|
+
raise TypeError(f"For Tensor.sum, initial must be int, float or bool, but got {type(initial)}.")
|
|
3307
|
+
res = tensor_operator_registry.get("sum")(self, axis, keepdims)
|
|
2975
3308
|
if initial is not None:
|
|
2976
3309
|
res += initial
|
|
2977
|
-
|
|
3310
|
+
if dtype is not None:
|
|
3311
|
+
res = res.astype(dtype)
|
|
3312
|
+
return res
|
|
2978
3313
|
|
|
2979
3314
|
def sum_to_size(self, *size):
|
|
2980
3315
|
r"""
|
|
@@ -2993,6 +3328,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
2993
3328
|
``Ascend`` ``GPU`` ``CPU``
|
|
2994
3329
|
|
|
2995
3330
|
Examples:
|
|
3331
|
+
>>> import numpy as np
|
|
3332
|
+
>>> import mindspore
|
|
3333
|
+
>>> from mindspore import Tensor
|
|
2996
3334
|
>>> x = Tensor(np.random.randn(3, 3, 3, 3, 3, 3), mindspore.float32)
|
|
2997
3335
|
>>> output = x.sum_to_size((1, 3, 1, 3))
|
|
2998
3336
|
>>> print(output.shape)
|
|
@@ -3025,6 +3363,20 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3025
3363
|
self._init_check()
|
|
3026
3364
|
return tensor_operator_registry.get('nansum')(self, axis=axis, keepdims=keepdims, dtype=dtype)
|
|
3027
3365
|
|
|
3366
|
+
def nanmean(self, axis=None, keepdims=False, *, dtype=None):
|
|
3367
|
+
r"""
|
|
3368
|
+
For details, please refer to :func:`mindspore.ops.nanmean`.
|
|
3369
|
+
"""
|
|
3370
|
+
self._init_check()
|
|
3371
|
+
return tensor_operator_registry.get('nanmean')(self, axis, keepdims, dtype=dtype)
|
|
3372
|
+
|
|
3373
|
+
def nanmedian(self, axis=-1, keepdims=False):
|
|
3374
|
+
r"""
|
|
3375
|
+
For details, please refer to :func:`mindspore.ops.nanmedian`.
|
|
3376
|
+
"""
|
|
3377
|
+
self._init_check()
|
|
3378
|
+
return tensor_operator_registry.get('nanmedian')(self, axis, keepdims)
|
|
3379
|
+
|
|
3028
3380
|
def repeat(self, repeats, axis=None):
|
|
3029
3381
|
"""
|
|
3030
3382
|
Repeat elements of a tensor.
|
|
@@ -3033,7 +3385,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3033
3385
|
repeats (Union[int, tuple, list]): The number of repetitions for each element.
|
|
3034
3386
|
`repeats` is broadcasted to fit the shape of the given axis.
|
|
3035
3387
|
axis (int, optional): The axis along which to repeat values. By default,
|
|
3036
|
-
use the flattened input tensor, and return a flat output tensor. Default: None
|
|
3388
|
+
use the flattened input tensor, and return a flat output tensor. Default: ``None``.
|
|
3037
3389
|
|
|
3038
3390
|
Returns:
|
|
3039
3391
|
Tensor, has the same shape as input tensor except along the given axis.
|
|
@@ -3111,7 +3463,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3111
3463
|
For details, please refer to :func:`mindspore.ops.bernoulli`.
|
|
3112
3464
|
"""
|
|
3113
3465
|
self._init_check()
|
|
3114
|
-
validator.check_is_int(seed, 'seed')
|
|
3115
3466
|
return tensor_operator_registry.get('bernoulli')(self, p, seed)
|
|
3116
3467
|
|
|
3117
3468
|
def random_categorical(self, num_sample, seed=0, dtype=mstype.int64):
|
|
@@ -3265,6 +3616,29 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3265
3616
|
self._init_check()
|
|
3266
3617
|
return tensor_operator_registry.get('dense_to_sparse_csr')(self)
|
|
3267
3618
|
|
|
3619
|
+
def tolist(self):
|
|
3620
|
+
r"""
|
|
3621
|
+
Convert a Tensor to List. If the input is Tensor scalar, a Python scalar will be returned.
|
|
3622
|
+
|
|
3623
|
+
Returns:
|
|
3624
|
+
List or Python scalar.
|
|
3625
|
+
|
|
3626
|
+
Supported Platforms:
|
|
3627
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3628
|
+
|
|
3629
|
+
Examples:
|
|
3630
|
+
>>> import mindspore as ms
|
|
3631
|
+
>>> x = ms.Tensor([[1, 2, 3], [4, 5, 6]])
|
|
3632
|
+
>>> out1 = x.tolist()
|
|
3633
|
+
>>> print(out1)
|
|
3634
|
+
[[1, 2, 3], [4, 5, 6]]
|
|
3635
|
+
>>> out2 = x[0][0].tolist()
|
|
3636
|
+
>>> print(out2)
|
|
3637
|
+
1
|
|
3638
|
+
"""
|
|
3639
|
+
self._init_check()
|
|
3640
|
+
return self.asnumpy().tolist()
|
|
3641
|
+
|
|
3268
3642
|
def unbind(self, dim=0):
|
|
3269
3643
|
r"""
|
|
3270
3644
|
For details, please refer to :func:`mindspore.ops.unbind`.
|
|
@@ -3376,6 +3750,15 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3376
3750
|
"""
|
|
3377
3751
|
return tensor_operator_registry.get("xlogy")()(self, y)
|
|
3378
3752
|
|
|
3753
|
+
def eigvals(self):
|
|
3754
|
+
r"""
|
|
3755
|
+
For details, please refer to :func:`mindspore.ops.eigvals`.
|
|
3756
|
+
|
|
3757
|
+
.. warning::
|
|
3758
|
+
This is an experimental API that is subject to change or deletion.
|
|
3759
|
+
"""
|
|
3760
|
+
return tensor_operator_registry.get("eigvals")()(self)
|
|
3761
|
+
|
|
3379
3762
|
def erf(self):
|
|
3380
3763
|
r"""
|
|
3381
3764
|
For details, please refer to :func:`mindspore.ops.erf`.
|
|
@@ -3388,11 +3771,11 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3388
3771
|
"""
|
|
3389
3772
|
return tensor_operator_registry.get("erfc")()(self)
|
|
3390
3773
|
|
|
3391
|
-
def tile(self,
|
|
3774
|
+
def tile(self, reps):
|
|
3392
3775
|
r"""
|
|
3393
3776
|
For details, please refer to :func:`mindspore.ops.tile`.
|
|
3394
3777
|
"""
|
|
3395
|
-
return tensor_operator_registry.get('tile')(
|
|
3778
|
+
return tensor_operator_registry.get('tile')(self, reps)
|
|
3396
3779
|
|
|
3397
3780
|
def topk(self, k, dim=None, largest=True, sorted=True):
|
|
3398
3781
|
r"""
|
|
@@ -3490,6 +3873,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3490
3873
|
``Ascend`` ``GPU`` ``CPU``
|
|
3491
3874
|
|
|
3492
3875
|
Examples:
|
|
3876
|
+
>>> import numpy as np
|
|
3877
|
+
>>> import mindspore
|
|
3878
|
+
>>> from mindspore import Tensor
|
|
3493
3879
|
>>> input_np = np.random.randn(2, 3, 4, 5).astype(np.float32)
|
|
3494
3880
|
>>> input_x = Tensor(input_np)
|
|
3495
3881
|
>>> dtype = mindspore.int32
|
|
@@ -3500,6 +3886,60 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3500
3886
|
self._init_check()
|
|
3501
3887
|
return tensor_operator_registry.get('to')()(self, dtype)
|
|
3502
3888
|
|
|
3889
|
+
def type(self, dtype=None):
|
|
3890
|
+
r"""
|
|
3891
|
+
Change the dtype of the Tensor to the `dtype` . Return the type if `dtype` is ``None`` .
|
|
3892
|
+
|
|
3893
|
+
Args:
|
|
3894
|
+
dtype (mindspore.dtype, optional): The specified dtype of output tensor. Default: ``None``.
|
|
3895
|
+
|
|
3896
|
+
Returns:
|
|
3897
|
+
Tensor or str. If `dtype` is ``None`` , return a str, which describes the dtype of Tensor.
|
|
3898
|
+
If `dtype` is not ``None`` , then return a Tensor, and the dtype of returned Tensor is `dtype` .
|
|
3899
|
+
|
|
3900
|
+
Supported Platforms:
|
|
3901
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3902
|
+
|
|
3903
|
+
Examples:
|
|
3904
|
+
>>> import mindspore
|
|
3905
|
+
>>> from mindspore import Tensor
|
|
3906
|
+
>>> x = Tensor([[1.2, 2], [3.4, 4]], dtype=mindspore.float32)
|
|
3907
|
+
>>> print(x.type())
|
|
3908
|
+
Float32
|
|
3909
|
+
>>> print(x.type(dtype=mindspore.int32))
|
|
3910
|
+
[[1 2]
|
|
3911
|
+
[3 4]]
|
|
3912
|
+
"""
|
|
3913
|
+
self._init_check()
|
|
3914
|
+
if dtype is None:
|
|
3915
|
+
return str(self.dtype)
|
|
3916
|
+
return self.astype(dtype)
|
|
3917
|
+
|
|
3918
|
+
def type_as(self, other):
|
|
3919
|
+
r"""
|
|
3920
|
+
Change the dtype of the Tensor to the dtype of `other`.
|
|
3921
|
+
|
|
3922
|
+
Args:
|
|
3923
|
+
other (Tensor): The return tensor has the same dtype as `other`.
|
|
3924
|
+
|
|
3925
|
+
Returns:
|
|
3926
|
+
Tensor, has the same dtype as `other`.
|
|
3927
|
+
|
|
3928
|
+
Supported Platforms:
|
|
3929
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
3930
|
+
|
|
3931
|
+
Examples:
|
|
3932
|
+
>>> import mindspore
|
|
3933
|
+
>>> from mindspore import Tensor
|
|
3934
|
+
>>> x = Tensor([[1, 2], [3, 4]], dtype=mindspore.float32)
|
|
3935
|
+
>>> y = Tensor([[1, 2], [3, 4]], dtype=mindspore.int32)
|
|
3936
|
+
>>> x = x.type_as(y)
|
|
3937
|
+
>>> print(x.dtype)
|
|
3938
|
+
Int32
|
|
3939
|
+
"""
|
|
3940
|
+
self._init_check()
|
|
3941
|
+
return self.astype(other.dtype)
|
|
3942
|
+
|
|
3503
3943
|
def bool(self):
|
|
3504
3944
|
r"""
|
|
3505
3945
|
Converts input tensor dtype to `bool`.
|
|
@@ -3512,6 +3952,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3512
3952
|
``Ascend`` ``GPU`` ``CPU``
|
|
3513
3953
|
|
|
3514
3954
|
Examples:
|
|
3955
|
+
>>> import numpy as np
|
|
3956
|
+
>>> import mindspore
|
|
3957
|
+
>>> from mindspore import Tensor
|
|
3515
3958
|
>>> input_x = Tensor(np.ones([2,2]), mindspore.float32)
|
|
3516
3959
|
>>> output = input_x.bool()
|
|
3517
3960
|
>>> print(output.dtype)
|
|
@@ -3531,6 +3974,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3531
3974
|
``Ascend`` ``GPU`` ``CPU``
|
|
3532
3975
|
|
|
3533
3976
|
Examples:
|
|
3977
|
+
>>> import numpy as np
|
|
3978
|
+
>>> import mindspore
|
|
3979
|
+
>>> from mindspore import Tensor
|
|
3534
3980
|
>>> input_x = Tensor(np.ones([2,2]), mindspore.int32)
|
|
3535
3981
|
>>> output = input_x.float()
|
|
3536
3982
|
>>> print(output.dtype)
|
|
@@ -3550,6 +3996,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3550
3996
|
``Ascend`` ``GPU`` ``CPU``
|
|
3551
3997
|
|
|
3552
3998
|
Examples:
|
|
3999
|
+
>>> import numpy as np
|
|
4000
|
+
>>> import mindspore
|
|
4001
|
+
>>> from mindspore import Tensor
|
|
3553
4002
|
>>> input_x = Tensor(np.ones([2,2]), mindspore.int32)
|
|
3554
4003
|
>>> output = input_x.half()
|
|
3555
4004
|
>>> print(output.dtype)
|
|
@@ -3569,6 +4018,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3569
4018
|
``Ascend`` ``GPU`` ``CPU``
|
|
3570
4019
|
|
|
3571
4020
|
Examples:
|
|
4021
|
+
>>> import numpy as np
|
|
4022
|
+
>>> import mindspore
|
|
4023
|
+
>>> from mindspore import Tensor
|
|
3572
4024
|
>>> input_x = Tensor(np.ones([2,2]), mindspore.float32)
|
|
3573
4025
|
>>> output = input_x.int()
|
|
3574
4026
|
>>> print(output.dtype)
|
|
@@ -3588,6 +4040,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3588
4040
|
``Ascend`` ``GPU`` ``CPU``
|
|
3589
4041
|
|
|
3590
4042
|
Examples:
|
|
4043
|
+
>>> import numpy as np
|
|
4044
|
+
>>> import mindspore
|
|
4045
|
+
>>> from mindspore import Tensor
|
|
3591
4046
|
>>> input_x = Tensor(np.ones([2,2]), mindspore.int32)
|
|
3592
4047
|
>>> output = input_x.long()
|
|
3593
4048
|
>>> print(output.dtype)
|
|
@@ -3633,6 +4088,16 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3633
4088
|
self._init_check()
|
|
3634
4089
|
return tensor_operator_registry.get('cholesky_inverse')(upper=upper)(self)
|
|
3635
4090
|
|
|
4091
|
+
def cholesky_solve(self, input2, upper=False):
|
|
4092
|
+
r"""
|
|
4093
|
+
For details, please refer to :func:`mindspore.ops.cholesky_solve`.
|
|
4094
|
+
|
|
4095
|
+
.. warning::
|
|
4096
|
+
This is an experimental API that is subject to change or deletion.
|
|
4097
|
+
"""
|
|
4098
|
+
self._init_check()
|
|
4099
|
+
return tensor_operator_registry.get('cholesky_solve')(self, input2, upper)
|
|
4100
|
+
|
|
3636
4101
|
def conj(self):
|
|
3637
4102
|
r"""
|
|
3638
4103
|
For details, please refer to :func:`mindspore.ops.conj`.
|
|
@@ -3640,6 +4105,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3640
4105
|
self._init_check()
|
|
3641
4106
|
return tensor_operator_registry.get('conj')(self)
|
|
3642
4107
|
|
|
4108
|
+
def count_nonzero(self, axis=(), keep_dims=False, dtype=mstype.int32):
|
|
4109
|
+
r"""
|
|
4110
|
+
For details, please refer to :func:`mindspore.ops.count_nonzero`.
|
|
4111
|
+
"""
|
|
4112
|
+
self._init_check()
|
|
4113
|
+
return tensor_operator_registry.get('count_nonzero')(self, axis, keep_dims, dtype)
|
|
4114
|
+
|
|
3643
4115
|
def cross(self, other, dim=None):
|
|
3644
4116
|
r"""
|
|
3645
4117
|
For details, please refer to :func:`mindspore.ops.cross`.
|
|
@@ -3685,13 +4157,17 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3685
4157
|
def unfold(self, kernel_size, dilation=1, padding=0, stride=1):
|
|
3686
4158
|
r"""
|
|
3687
4159
|
For details, please refer to :func:`mindspore.ops.unfold`.
|
|
4160
|
+
|
|
4161
|
+
.. warning::
|
|
4162
|
+
This is an experimental API that is subject to change or deletion.
|
|
4163
|
+
|
|
3688
4164
|
"""
|
|
3689
4165
|
self._init_check()
|
|
3690
4166
|
return tensor_operator_registry.get('unfold')(self, kernel_size, dilation, padding, stride)
|
|
3691
4167
|
|
|
3692
4168
|
def expand(self, size):
|
|
3693
4169
|
r"""
|
|
3694
|
-
For details, please refer to :func:`mindspore.ops.
|
|
4170
|
+
For details, please refer to :func:`mindspore.ops.broadcast_to`.
|
|
3695
4171
|
"""
|
|
3696
4172
|
self._init_check()
|
|
3697
4173
|
return tensor_operator_registry.get('expand')(self, size)
|
|
@@ -3724,6 +4200,13 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3724
4200
|
self._init_check()
|
|
3725
4201
|
return tensor_operator_registry.get('div')(self, value, rounding_mode=rounding_mode)
|
|
3726
4202
|
|
|
4203
|
+
def eq(self, other):
|
|
4204
|
+
r"""
|
|
4205
|
+
For details, please refer to :func:`mindspore.ops.eq`.
|
|
4206
|
+
"""
|
|
4207
|
+
self._init_check()
|
|
4208
|
+
return tensor_operator_registry.get('equal')(self, other)
|
|
4209
|
+
|
|
3727
4210
|
def equal(self, other):
|
|
3728
4211
|
r"""
|
|
3729
4212
|
For details, please refer to :func:`mindspore.ops.equal`.
|
|
@@ -3825,7 +4308,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3825
4308
|
|
|
3826
4309
|
Examples:
|
|
3827
4310
|
>>> import mindspore as ms
|
|
3828
|
-
>>> from mindspore import Tensor
|
|
3829
4311
|
>>> x = ms.Tensor([1, 2, 3], ms.int64)
|
|
3830
4312
|
>>> y = ms.Tensor([1, 2, 3], ms.uint64)
|
|
3831
4313
|
>>> output = x.is_signed()
|
|
@@ -3940,6 +4422,10 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
3940
4422
|
def matrix_power(self, n):
|
|
3941
4423
|
r"""
|
|
3942
4424
|
For details, please refer to :func:`mindspore.ops.matrix_power`.
|
|
4425
|
+
|
|
4426
|
+
.. warning::
|
|
4427
|
+
This is an experimental API that is subject to change or deletion.
|
|
4428
|
+
|
|
3943
4429
|
"""
|
|
3944
4430
|
self._init_check()
|
|
3945
4431
|
return tensor_operator_registry.get('matrix_power')(self, n)
|
|
@@ -4008,7 +4494,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4008
4494
|
|
|
4009
4495
|
Keyword Args:
|
|
4010
4496
|
dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned tensor has
|
|
4011
|
-
thesame dtype as `self`. Default: None
|
|
4497
|
+
thesame dtype as `self`. Default: ``None``.
|
|
4012
4498
|
|
|
4013
4499
|
Returns:
|
|
4014
4500
|
Tensor, the shape and dtype is defined above and filled with zeros.
|
|
@@ -4020,6 +4506,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4020
4506
|
``Ascend`` ``GPU`` ``CPU``
|
|
4021
4507
|
|
|
4022
4508
|
Examples:
|
|
4509
|
+
>>> import numpy as np
|
|
4510
|
+
>>> import mindspore
|
|
4511
|
+
>>> from mindspore import Tensor
|
|
4023
4512
|
>>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
|
|
4024
4513
|
>>> output = x.new_zeros((2, 2))
|
|
4025
4514
|
>>> print(output)
|
|
@@ -4042,7 +4531,7 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4042
4531
|
|
|
4043
4532
|
Keyword Args:
|
|
4044
4533
|
dtype (mindspore.dtype, optional): The desired dtype of the output tensor. If None, the returned
|
|
4045
|
-
tensor has the same dtype as `self`. Default: None
|
|
4534
|
+
tensor has the same dtype as `self`. Default: ``None``.
|
|
4046
4535
|
|
|
4047
4536
|
Returns:
|
|
4048
4537
|
Tensor, the shape and dtype is defined above and filled with ones.
|
|
@@ -4054,6 +4543,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4054
4543
|
``Ascend`` ``GPU`` ``CPU``
|
|
4055
4544
|
|
|
4056
4545
|
Examples:
|
|
4546
|
+
>>> import numpy as np
|
|
4547
|
+
>>> import mindspore
|
|
4548
|
+
>>> from mindspore import Tensor
|
|
4057
4549
|
>>> x = Tensor(np.array([1, 2, 3]), mindspore.float32)
|
|
4058
4550
|
>>> output = x.new_ones((2, 2))
|
|
4059
4551
|
>>> print(output)
|
|
@@ -4149,6 +4641,9 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4149
4641
|
``GPU`` ``CPU``
|
|
4150
4642
|
|
|
4151
4643
|
Examples:
|
|
4644
|
+
>>> import numpy as np
|
|
4645
|
+
>>> import mindspore
|
|
4646
|
+
>>> from mindspore import Tensor
|
|
4152
4647
|
>>> x = Tensor(np.asarray(np.complex(1.3 + 0.4j)), mindspore.complex64)
|
|
4153
4648
|
>>> output = x.imag()
|
|
4154
4649
|
>>> print(output)
|
|
@@ -4157,6 +4652,37 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4157
4652
|
self._init_check()
|
|
4158
4653
|
return tensor_operator_registry.get('imag')(self)
|
|
4159
4654
|
|
|
4655
|
+
def quantile(self, q, axis=None, keepdims=False):
|
|
4656
|
+
r"""
|
|
4657
|
+
For details, please refer to :func:`mindspore.ops.quantile`.
|
|
4658
|
+
"""
|
|
4659
|
+
self._init_check()
|
|
4660
|
+
return tensor_operator_registry.get('quantile')(self, q, axis, keepdims)
|
|
4661
|
+
|
|
4662
|
+
def nanquantile(self, q, axis=None, keepdims=False):
|
|
4663
|
+
"""
|
|
4664
|
+
For details, please refer to :func:`mindspore.ops.nanquantile`.
|
|
4665
|
+
"""
|
|
4666
|
+
self._init_check()
|
|
4667
|
+
return tensor_operator_registry.get('nanquantile')(self, q, axis, keepdims)
|
|
4668
|
+
|
|
4669
|
+
def orgqr(self, input2):
|
|
4670
|
+
r"""
|
|
4671
|
+
For details, please refer to :func:`mindspore.ops.orgqr`.
|
|
4672
|
+
"""
|
|
4673
|
+
self._init_check()
|
|
4674
|
+
return tensor_operator_registry.get('orgqr')(self, input2)
|
|
4675
|
+
|
|
4676
|
+
def lu_solve(self, LU_data, LU_pivots):
|
|
4677
|
+
r"""
|
|
4678
|
+
For details, please refer to :func:`mindspore.ops.lu_solve`.
|
|
4679
|
+
|
|
4680
|
+
.. warning::
|
|
4681
|
+
This is an experimental API that is subject to change or deletion.
|
|
4682
|
+
"""
|
|
4683
|
+
self._init_check()
|
|
4684
|
+
return tensor_operator_registry.get('lu_solve')(self, LU_data, LU_pivots)
|
|
4685
|
+
|
|
4160
4686
|
|
|
4161
4687
|
def nextafter(self, other):
|
|
4162
4688
|
r"""
|
|
@@ -4165,7 +4691,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4165
4691
|
self._init_check()
|
|
4166
4692
|
return tensor_operator_registry.get('nextafter')(self, other)
|
|
4167
4693
|
|
|
4168
|
-
|
|
4169
4694
|
def qr(self, some=True):
|
|
4170
4695
|
r"""
|
|
4171
4696
|
For details, please refer to :func:`mindspore.ops.qr`.
|
|
@@ -4175,12 +4700,143 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
4175
4700
|
return tensor_operator_registry.get('qr')(self, 'reduced' if some else 'complete')
|
|
4176
4701
|
|
|
4177
4702
|
|
|
4703
|
+
def ormqr(self, input2, input3, left=True, transpose=False):
|
|
4704
|
+
r"""
|
|
4705
|
+
For details, please refer to :func:`mindspore.ops.ormqr`,
|
|
4706
|
+
Args `input2` and `input3` correspond to the args `tau` and `other` of :func:`mindspore.ops.ormqr`.
|
|
4707
|
+
"""
|
|
4708
|
+
self._init_check()
|
|
4709
|
+
return tensor_operator_registry.get('ormqr')(self, input2, input3, left, transpose)
|
|
4710
|
+
|
|
4711
|
+
|
|
4712
|
+
def masked_scatter(self, mask, x):
|
|
4713
|
+
r"""
|
|
4714
|
+
Returns a Tensor. Updates the value in the "self Tensor" with the `tensor` value according to the mask.
|
|
4715
|
+
The shape of `mask` and the "self Tensor" must be the same or `mask` is broadcastable.
|
|
4716
|
+
|
|
4717
|
+
.. warning::
|
|
4718
|
+
This is an experimental API that is subject to change or deletion.
|
|
4719
|
+
|
|
4720
|
+
Args:
|
|
4721
|
+
mask (Tensor[bool]): A bool tensor with a shape broadcastable to the "self Tensor".
|
|
4722
|
+
x (Tensor): A tensor with the same data type as the "self Tensor". The number
|
|
4723
|
+
of elements must be greater than or equal to the number of True's in `mask`.
|
|
4724
|
+
|
|
4725
|
+
Returns:
|
|
4726
|
+
Tensor, with the same type and shape as the "self Tensor".
|
|
4727
|
+
|
|
4728
|
+
Raises:
|
|
4729
|
+
TypeError: If `mask` or `x` is not a Tensor.
|
|
4730
|
+
TypeError: If data type of the "self Tensor" is not be supported.
|
|
4731
|
+
TypeError: If dtype of `mask` is not bool.
|
|
4732
|
+
TypeError: If the dim of the "self Tensor" less than the dim of `mask`.
|
|
4733
|
+
ValueError: If `mask` can not be broadcastable to the "self Tensor".
|
|
4734
|
+
ValueError: If the number of elements in `x` is less than the number required for the updates.
|
|
4735
|
+
|
|
4736
|
+
Supported Platforms:
|
|
4737
|
+
``Ascend`` ``CPU``
|
|
4738
|
+
|
|
4739
|
+
Examples:
|
|
4740
|
+
>>> import numpy as np
|
|
4741
|
+
>>> import mindspore
|
|
4742
|
+
>>> from mindspore import Tensor
|
|
4743
|
+
>>> x = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
|
|
4744
|
+
>>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
|
|
4745
|
+
>>> tensor = Tensor(np.array([5., 6., 7.]), mindspore.float32)
|
|
4746
|
+
>>> output = x.masked_scatter(mask, tensor)
|
|
4747
|
+
>>> print(output)
|
|
4748
|
+
[5. 6. 3. 7.]
|
|
4749
|
+
"""
|
|
4750
|
+
self._init_check()
|
|
4751
|
+
return tensor_operator_registry.get('masked_scatter')()(self, mask, x)
|
|
4752
|
+
|
|
4753
|
+
|
|
4754
|
+
def index_put(self, indices, values, accumulate=False):
|
|
4755
|
+
r"""
|
|
4756
|
+
Returns a Tensor. According to the index number of `indices` ,
|
|
4757
|
+
replace the value corresponding to the "self Tensor" with the value in `values`.
|
|
4758
|
+
|
|
4759
|
+
Args:
|
|
4760
|
+
indices (tuple[Tensor], list[Tensor]): the indices of type int32 or int64, used to index into the "self
|
|
4761
|
+
Tensor". The rank of tensors in indices should be 1-D, size of indices should <= "self Tensor".rank
|
|
4762
|
+
and the tensors in indices should be broadcastable.
|
|
4763
|
+
values (Tensor): 1-D Tensor of the same type as "self Tensor". if size == 1 will be broadcast
|
|
4764
|
+
accumulate (bool): If `accumulate` is True, the elements in values are added to "self Tensor",
|
|
4765
|
+
else the elements in `values` replace the corresponding element in the "self Tensor".
|
|
4766
|
+
Default: ``False``.
|
|
4767
|
+
|
|
4768
|
+
Returns:
|
|
4769
|
+
Tensor, with the same type and shape as the "self Tensor".
|
|
4770
|
+
|
|
4771
|
+
Raises:
|
|
4772
|
+
TypeError: If the dtype of the "self Tensor" is not equal to the dtype of `values`.
|
|
4773
|
+
TypeError: If the dtype of `indices` is not tuple[Tensor], list[Tensor].
|
|
4774
|
+
TypeError: If the dtype of tensors in `indices` are not int32 or int64.
|
|
4775
|
+
TypeError: If the dtype of tensors in `indices` are inconsistent.
|
|
4776
|
+
TypeError: If the dtype of `accumulate` is not bool.
|
|
4777
|
+
ValueError: If rank(`values`) is not 1-D.
|
|
4778
|
+
ValueError: If size(`values`) is not 1 or max size of the tensors in `indices` when
|
|
4779
|
+
rank("self Tensor") == size(`indices`).
|
|
4780
|
+
ValueError: If size(`values`) is not 1 or "self Tensor".shape[-1] when
|
|
4781
|
+
rank("self Tensor") > size(`indices`).
|
|
4782
|
+
ValueError: If the rank of tensors in `indices` is not 1-D.
|
|
4783
|
+
ValueError: If the tensors in `indices` is not be broadcastable.
|
|
4784
|
+
ValueError: If size(`indices`) > rank("self Tensor").
|
|
4785
|
+
|
|
4786
|
+
Supported Platforms:
|
|
4787
|
+
``Ascend`` ``CPU``
|
|
4788
|
+
|
|
4789
|
+
Examples:
|
|
4790
|
+
>>> import numpy as np
|
|
4791
|
+
>>> import mindspore
|
|
4792
|
+
>>> from mindspore import Tensor
|
|
4793
|
+
>>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6]]).astype(np.int32))
|
|
4794
|
+
>>> values = Tensor(np.array([3]).astype(np.int32))
|
|
4795
|
+
>>> indices = [Tensor(np.array([0, 1, 1]).astype(np.int32)), Tensor(np.array([1, 2, 1]).astype(np.int32))]
|
|
4796
|
+
>>> accumulate = True
|
|
4797
|
+
>>> output = x.index_put(indices, values, accumulate)
|
|
4798
|
+
>>> print(output)
|
|
4799
|
+
[[1 5 3]
|
|
4800
|
+
[4 8 9]]
|
|
4801
|
+
"""
|
|
4802
|
+
self._init_check()
|
|
4803
|
+
validator.check_value_type('accumulate', accumulate, bool, 'Tensor.index_put')
|
|
4804
|
+
_index_put = tensor_operator_registry.get('index_put')(0 if accumulate is False else 1)
|
|
4805
|
+
return _index_put(self, values, indices)
|
|
4806
|
+
|
|
4807
|
+
|
|
4808
|
+
def _offload(self):
|
|
4809
|
+
r"""
|
|
4810
|
+
Offload tensor parameter to host. Currently, only support for pynative mode.
|
|
4811
|
+
|
|
4812
|
+
Supported Platforms:
|
|
4813
|
+
``Ascend``
|
|
4814
|
+
|
|
4815
|
+
Examples:
|
|
4816
|
+
>>> import mindspore as ms
|
|
4817
|
+
>>> from mindspore import Tensor
|
|
4818
|
+
>>> x = ms.Tensor([1, 2, 3], ms.int64)
|
|
4819
|
+
>>> x._offload()
|
|
4820
|
+
"""
|
|
4821
|
+
self._init_check()
|
|
4822
|
+
return Tensor_._offload(self)
|
|
4823
|
+
|
|
4824
|
+
|
|
4178
4825
|
def _vm_compare(*args):
|
|
4179
4826
|
"""Implement `vm_compare` for tensor."""
|
|
4180
|
-
|
|
4827
|
+
if args:
|
|
4828
|
+
obj_str = args[-1]
|
|
4829
|
+
else:
|
|
4830
|
+
raise ValueError("_vm_compare does not receive any input.")
|
|
4181
4831
|
if obj_str == "shape":
|
|
4182
4832
|
fn = getattr(args[0].asnumpy(), obj_str)
|
|
4183
4833
|
return fn
|
|
4834
|
+
if obj_str == "__setitem__":
|
|
4835
|
+
fn = getattr(args[0].asnumpy(), obj_str)
|
|
4836
|
+
index = args[1].asnumpy() if isinstance(args[1], Tensor) else args[1]
|
|
4837
|
+
value = args[2].asnumpy() if isinstance(args[2], Tensor) else args[2]
|
|
4838
|
+
fn(index, value)
|
|
4839
|
+
return args[0]
|
|
4184
4840
|
if len(args) == 2:
|
|
4185
4841
|
fn = getattr(args[0].asnumpy(), obj_str)
|
|
4186
4842
|
return Tensor(fn())
|