mindspore 2.0.0rc1__cp38-cp38-manylinux1_x86_64.whl → 2.2.0__cp38-cp38-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +2 -2
- mindspore/__init__.py +5 -2
- mindspore/_akg/akg/build_module.py +5 -6
- mindspore/_akg/akg/composite/build_module.py +49 -16
- mindspore/_akg/akg/composite/split_stitch.py +10 -11
- mindspore/_akg/akg/config/repository.json +195 -0
- mindspore/_akg/akg/global_configs.py +5 -1
- mindspore/_akg/akg/ms/info_version_adapt.py +67 -1
- mindspore/_akg/akg/tvm/api.py +4 -3
- mindspore/_akg/akg/tvm/autotvm/__init__.py +1 -2
- mindspore/_akg/akg/tvm/autotvm/graph_tuner/base_graph_tuner.py +1 -5
- mindspore/_akg/akg/tvm/autotvm/measure/__init__.py +1 -1
- mindspore/_akg/akg/tvm/autotvm/measure/measure.py +1 -10
- mindspore/_akg/akg/tvm/autotvm/measure/measure_methods.py +1 -372
- mindspore/_akg/akg/tvm/build_module.py +16 -1
- mindspore/_akg/akg/tvm/contrib/graph_runtime.py +0 -53
- mindspore/_akg/akg/tvm/hybrid/parser.py +7 -6
- mindspore/_akg/akg/tvm/ir_builder.py +1 -1
- mindspore/_akg/akg/tvm/module.py +1 -2
- mindspore/_akg/akg/tvm/stmt.py +2 -2
- mindspore/_akg/akg/utils/composite_op_helper.py +9 -10
- mindspore/_akg/akg/utils/kernel_exec.py +58 -260
- mindspore/_akg/akg/utils/op_dsl.py +17 -1
- mindspore/_akg/akg/utils/result_analysis.py +4 -24
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +198 -0
- mindspore/_c_dataengine.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_check_jit_forbidden_api.py +5 -1
- mindspore/_checkparam.py +79 -62
- mindspore/_extends/graph_kernel/__init__.py +0 -1
- mindspore/_extends/graph_kernel/model/graph_split.py +2 -0
- mindspore/_extends/graph_kernel/model/model_builder.py +9 -50
- mindspore/_extends/graph_kernel/splitter.py +1 -9
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +128 -21
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +2 -2
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +4 -2
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +18 -13
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +13 -9
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +1 -1
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -1
- mindspore/_extends/parse/__init__.py +19 -17
- mindspore/_extends/parse/namespace.py +7 -36
- mindspore/_extends/parse/parser.py +375 -189
- mindspore/_extends/parse/resources.py +36 -41
- mindspore/_extends/parse/standard_method.py +350 -245
- mindspore/_extends/parse/trope.py +2 -12
- mindspore/_extends/remote/kernel_build_server.py +24 -7
- mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
- mindspore/_install_custom.py +43 -0
- mindspore/_mindspore_offline_debug.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/amp.py +85 -19
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/base.py +2 -2
- mindspore/boost/boost.py +27 -32
- mindspore/boost/boost_cell_wrapper.py +37 -13
- mindspore/boost/grad_accumulation.py +1 -1
- mindspore/boost/grad_freeze.py +34 -6
- mindspore/boost/group_loss_scale_manager.py +15 -14
- mindspore/boost/less_batch_normalization.py +28 -3
- mindspore/common/__init__.py +15 -11
- mindspore/common/_auto_dynamic.py +68 -0
- mindspore/common/_jit_fallback_utils.py +111 -0
- mindspore/common/_register_for_adapter.py +17 -5
- mindspore/common/_register_for_tensor.py +2 -2
- mindspore/common/_stub_tensor.py +18 -15
- mindspore/common/_utils.py +31 -7
- mindspore/common/api.py +269 -101
- mindspore/common/auto_dynamic_shape.py +498 -0
- mindspore/common/dtype.py +61 -21
- mindspore/common/dump.py +9 -7
- mindspore/common/initializer.py +106 -76
- mindspore/common/jit_config.py +35 -14
- mindspore/common/lazy_inline.py +187 -0
- mindspore/common/mindir_util.py +101 -0
- mindspore/common/mutable.py +10 -13
- mindspore/common/parameter.py +246 -55
- mindspore/common/seed.py +13 -7
- mindspore/common/sparse_tensor.py +29 -33
- mindspore/common/tensor.py +907 -251
- mindspore/communication/__init__.py +7 -4
- mindspore/communication/_comm_helper.py +84 -4
- mindspore/communication/management.py +160 -88
- mindspore/config/op_info.config +99 -75
- mindspore/config/super_bar_config.json +36 -4
- mindspore/context.py +526 -219
- mindspore/dataset/__init__.py +9 -46
- mindspore/dataset/audio/__init__.py +4 -19
- mindspore/dataset/audio/transforms.py +545 -233
- mindspore/dataset/audio/utils.py +21 -18
- mindspore/dataset/callback/ds_callback.py +42 -13
- mindspore/dataset/core/config.py +158 -100
- mindspore/dataset/core/validator_helpers.py +1 -63
- mindspore/dataset/debug/debug_hook.py +45 -13
- mindspore/dataset/debug/pre_defined_hook.py +5 -5
- mindspore/dataset/engine/__init__.py +0 -5
- mindspore/dataset/engine/cache_client.py +38 -15
- mindspore/dataset/engine/datasets.py +615 -278
- mindspore/dataset/engine/datasets_audio.py +154 -283
- mindspore/dataset/engine/datasets_standard_format.py +104 -116
- mindspore/dataset/engine/datasets_text.py +443 -326
- mindspore/dataset/engine/datasets_user_defined.py +251 -164
- mindspore/dataset/engine/datasets_vision.py +839 -1443
- mindspore/dataset/engine/iterators.py +11 -4
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +7 -3
- mindspore/dataset/engine/obs/util.py +3 -0
- mindspore/dataset/engine/offload.py +6 -6
- mindspore/dataset/engine/queue.py +15 -14
- mindspore/dataset/engine/samplers.py +39 -23
- mindspore/dataset/engine/serializer_deserializer.py +22 -6
- mindspore/dataset/engine/validators.py +21 -331
- mindspore/dataset/text/__init__.py +5 -33
- mindspore/dataset/text/transforms.py +334 -165
- mindspore/dataset/text/utils.py +215 -145
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/c_transforms.py +3 -2
- mindspore/dataset/transforms/py_transforms_util.py +40 -12
- mindspore/dataset/transforms/transforms.py +174 -71
- mindspore/dataset/utils/browse_dataset.py +25 -17
- mindspore/dataset/utils/line_reader.py +24 -21
- mindspore/dataset/vision/__init__.py +5 -26
- mindspore/dataset/vision/c_transforms.py +177 -165
- mindspore/dataset/vision/py_transforms.py +114 -119
- mindspore/dataset/vision/py_transforms_util.py +54 -51
- mindspore/dataset/vision/transforms.py +1127 -381
- mindspore/dataset/vision/utils.py +54 -38
- mindspore/dataset/vision/validators.py +12 -2
- mindspore/experimental/map_parameter.py +38 -4
- mindspore/{dataset/datapreprocess → experimental/optim}/__init__.py +14 -4
- mindspore/experimental/optim/adam.py +192 -0
- mindspore/experimental/optim/adamw.py +181 -0
- mindspore/experimental/optim/lr_scheduler.py +1427 -0
- mindspore/experimental/optim/optimizer.py +252 -0
- mindspore/experimental/optim/sgd.py +147 -0
- mindspore/gen_ops.py +273 -0
- mindspore/include/OWNERS +1 -2
- mindspore/include/api/context.h +21 -1
- mindspore/include/api/data_type.h +2 -1
- mindspore/include/api/graph.h +0 -15
- mindspore/include/api/kernel.h +2 -0
- mindspore/include/api/kernel_api.h +37 -12
- mindspore/include/api/model.h +29 -42
- mindspore/include/api/model_group.h +14 -3
- mindspore/include/api/model_parallel_runner.h +18 -2
- mindspore/include/api/serialization.h +26 -0
- mindspore/include/api/status.h +1 -0
- mindspore/include/api/types.h +38 -4
- mindspore/include/c_api/ms/abstract.h +67 -0
- mindspore/include/c_api/ms/attribute.h +197 -0
- mindspore/include/c_api/ms/base/handle_types.h +43 -0
- mindspore/include/c_api/ms/base/macros.h +32 -0
- mindspore/include/c_api/ms/base/status.h +33 -0
- mindspore/include/c_api/ms/base/types.h +282 -0
- mindspore/include/c_api/ms/context.h +102 -0
- mindspore/include/c_api/ms/graph.h +160 -0
- mindspore/include/c_api/ms/node.h +606 -0
- mindspore/include/c_api/ms/tensor.h +161 -0
- mindspore/include/c_api/ms/value.h +84 -0
- mindspore/include/c_api/status_c.h +3 -0
- mindspore/include/dataset/constants.h +6 -12
- mindspore/include/dataset/execute.h +23 -13
- mindspore/include/dataset/text.h +26 -26
- mindspore/include/dataset/transforms.h +25 -31
- mindspore/include/dataset/vision.h +60 -60
- mindspore/include/dataset/vision_ascend.h +5 -6
- mindspore/include/dataset/vision_lite.h +17 -17
- mindspore/include/mindapi/base/format.h +0 -1
- mindspore/include/mindapi/base/type_id.h +2 -1
- mindspore/include/mindapi/base/types.h +5 -1
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libjemalloc.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +9000 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/cpu/libakg.so +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
- mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
- mindspore/log.py +9 -6
- mindspore/mindrecord/filereader.py +33 -4
- mindspore/mindrecord/filewriter.py +70 -35
- mindspore/mindrecord/mindpage.py +40 -34
- mindspore/mindrecord/shardreader.py +1 -1
- mindspore/mindrecord/shardsegment.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +25 -18
- mindspore/mindrecord/tools/cifar10_to_mr.py +25 -18
- mindspore/mindrecord/tools/csv_to_mr.py +29 -13
- mindspore/mindrecord/tools/imagenet_to_mr.py +24 -10
- mindspore/mindrecord/tools/mnist_to_mr.py +24 -11
- mindspore/mindrecord/tools/tfrecord_to_mr.py +31 -26
- mindspore/nn/cell.py +463 -169
- mindspore/nn/dynamic_lr.py +47 -43
- mindspore/nn/layer/activation.py +225 -82
- mindspore/nn/layer/basic.py +121 -79
- mindspore/nn/layer/channel_shuffle.py +21 -21
- mindspore/nn/layer/combined.py +33 -26
- mindspore/nn/layer/container.py +277 -22
- mindspore/nn/layer/conv.py +441 -304
- mindspore/nn/layer/dense.py +19 -13
- mindspore/nn/layer/embedding.py +62 -49
- mindspore/nn/layer/flash_attention.py +264 -0
- mindspore/nn/layer/image.py +50 -39
- mindspore/nn/layer/math.py +62 -51
- mindspore/nn/layer/normalization.py +219 -167
- mindspore/nn/layer/padding.py +58 -70
- mindspore/nn/layer/pooling.py +334 -287
- mindspore/nn/layer/rnn_cells.py +53 -38
- mindspore/nn/layer/rnns.py +59 -56
- mindspore/nn/layer/thor_layer.py +52 -44
- mindspore/nn/layer/timedistributed.py +6 -4
- mindspore/nn/layer/transformer.py +284 -164
- mindspore/nn/learning_rate_schedule.py +34 -25
- mindspore/nn/loss/__init__.py +3 -2
- mindspore/nn/loss/loss.py +554 -311
- mindspore/nn/optim/ada_grad.py +12 -9
- mindspore/nn/optim/adadelta.py +14 -11
- mindspore/nn/optim/adafactor.py +19 -16
- mindspore/nn/optim/adam.py +62 -47
- mindspore/nn/optim/adamax.py +13 -10
- mindspore/nn/optim/adasum.py +12 -8
- mindspore/nn/optim/asgd.py +10 -9
- mindspore/nn/optim/ftrl.py +20 -17
- mindspore/nn/optim/lamb.py +16 -12
- mindspore/nn/optim/lars.py +8 -6
- mindspore/nn/optim/lazyadam.py +25 -20
- mindspore/nn/optim/momentum.py +10 -7
- mindspore/nn/optim/optimizer.py +61 -9
- mindspore/nn/optim/proximal_ada_grad.py +14 -13
- mindspore/nn/optim/rmsprop.py +17 -13
- mindspore/nn/optim/rprop.py +30 -17
- mindspore/nn/optim/sgd.py +40 -23
- mindspore/nn/optim/thor.py +24 -26
- mindspore/nn/probability/bijector/bijector.py +11 -11
- mindspore/nn/probability/bijector/exp.py +1 -1
- mindspore/nn/probability/bijector/gumbel_cdf.py +3 -3
- mindspore/nn/probability/bijector/invert.py +1 -1
- mindspore/nn/probability/bijector/power_transform.py +29 -29
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +5 -5
- mindspore/nn/probability/bnn_layers/bnn_cell_wrapper.py +4 -2
- mindspore/nn/probability/bnn_layers/conv_variational.py +13 -13
- mindspore/nn/probability/bnn_layers/dense_variational.py +12 -12
- mindspore/nn/probability/bnn_layers/layer_distribution.py +9 -8
- mindspore/nn/probability/distribution/_utils/custom_ops.py +19 -3
- mindspore/nn/probability/distribution/_utils/utils.py +1 -1
- mindspore/nn/probability/distribution/bernoulli.py +9 -9
- mindspore/nn/probability/distribution/beta.py +8 -8
- mindspore/nn/probability/distribution/categorical.py +23 -15
- mindspore/nn/probability/distribution/cauchy.py +5 -6
- mindspore/nn/probability/distribution/distribution.py +3 -3
- mindspore/nn/probability/distribution/exponential.py +4 -4
- mindspore/nn/probability/distribution/gamma.py +10 -10
- mindspore/nn/probability/distribution/geometric.py +8 -8
- mindspore/nn/probability/distribution/gumbel.py +8 -9
- mindspore/nn/probability/distribution/half_normal.py +5 -5
- mindspore/nn/probability/distribution/laplace.py +5 -5
- mindspore/nn/probability/distribution/log_normal.py +12 -11
- mindspore/nn/probability/distribution/logistic.py +8 -8
- mindspore/nn/probability/distribution/normal.py +6 -5
- mindspore/nn/probability/distribution/poisson.py +10 -11
- mindspore/nn/probability/distribution/student_t.py +8 -9
- mindspore/nn/probability/distribution/transformed_distribution.py +5 -5
- mindspore/nn/probability/distribution/uniform.py +11 -11
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +9 -9
- mindspore/nn/wrap/cell_wrapper.py +188 -63
- mindspore/nn/wrap/grad_reducer.py +21 -12
- mindspore/nn/wrap/loss_scale.py +136 -49
- mindspore/numpy/__init__.py +4 -4
- mindspore/numpy/array_creations.py +55 -56
- mindspore/numpy/array_ops.py +134 -35
- mindspore/numpy/logic_ops.py +66 -20
- mindspore/numpy/math_ops.py +142 -139
- mindspore/numpy/utils_const.py +2 -2
- mindspore/offline_debug/convert_async.py +2 -2
- mindspore/ops/_grad_experimental/__init__.py +7 -5
- mindspore/ops/_grad_experimental/grad_array_ops.py +231 -348
- mindspore/ops/{_grad → _grad_experimental}/grad_base.py +1 -33
- mindspore/ops/{_grad → _grad_experimental}/grad_comm_ops.py +25 -13
- mindspore/ops/{_grad/__init__.py → _grad_experimental/grad_debug_ops.py} +15 -7
- mindspore/ops/{_grad → _grad_experimental}/grad_implementations.py +17 -11
- mindspore/ops/_grad_experimental/grad_inner_ops.py +33 -52
- mindspore/ops/_grad_experimental/grad_math_ops.py +151 -1224
- mindspore/ops/_grad_experimental/grad_nn_ops.py +141 -414
- mindspore/ops/{_grad → _grad_experimental}/grad_quant_ops.py +10 -6
- mindspore/ops/_grad_experimental/grad_sparse.py +317 -2
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -13
- mindspore/ops/{_grad → _grad_experimental}/taylor_rule.py +1 -1
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/flash_attention/__init__.py +0 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +406 -0
- mindspore/{_extends/graph_kernel/expanders/complex/__init__.py → ops/_op_impl/_custom_op/flash_attention/constants.py} +27 -8
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +467 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +563 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +193 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +435 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/__init__.py +0 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/sparse_tiling.py +45 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/strategy.py +67 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +62 -0
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/aicpu/__init__.py +41 -1
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d.py +37 -0
- mindspore/ops/_op_impl/aicpu/bias_add_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/cast.py +52 -0
- mindspore/ops/_op_impl/aicpu/coalesce.py +2 -0
- mindspore/ops/_op_impl/aicpu/col2im.py +3 -1
- mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
- mindspore/ops/_op_impl/aicpu/dropout_genmask.py +6 -0
- mindspore/ops/_op_impl/aicpu/eps.py +32 -0
- mindspore/ops/_op_impl/aicpu/eye.py +4 -4
- mindspore/ops/_op_impl/aicpu/fft_with_size.py +6 -0
- mindspore/ops/_op_impl/aicpu/fill_diagonal.py +5 -0
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -2
- mindspore/ops/_op_impl/aicpu/im2col.py +3 -5
- mindspore/ops/_op_impl/aicpu/lgamma.py +1 -0
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +6 -3
- mindspore/ops/_op_impl/aicpu/lu.py +39 -0
- mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/masked_scatter.py +1 -0
- mindspore/ops/_op_impl/aicpu/masked_select_grad.py +3 -0
- mindspore/ops/_op_impl/aicpu/matrix_band_part.py +59 -0
- mindspore/ops/_op_impl/aicpu/matrix_power.py +6 -1
- mindspore/ops/_op_impl/aicpu/median.py +1 -0
- mindspore/ops/_op_impl/aicpu/multinomial.py +9 -9
- mindspore/ops/_op_impl/aicpu/not_equal.py +0 -5
- mindspore/ops/_op_impl/aicpu/pad_v3.py +3 -1
- mindspore/ops/_op_impl/aicpu/pad_v3_grad.py +2 -0
- mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +15 -7
- mindspore/ops/_op_impl/aicpu/random_categorical.py +39 -19
- mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +5 -2
- mindspore/ops/_op_impl/aicpu/random_poisson.py +103 -52
- mindspore/ops/_op_impl/aicpu/random_shuffle.py +17 -15
- mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2.py +0 -6
- mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2_grad.py +0 -7
- mindspore/ops/_op_impl/aicpu/scatter_nd.py +2 -0
- mindspore/ops/_op_impl/aicpu/sequence_concat.py +40 -0
- mindspore/ops/_op_impl/aicpu/sequence_stack.py +40 -0
- mindspore/ops/_op_impl/aicpu/{sparseaddmm.py → sparse_addmm.py} +2 -2
- mindspore/ops/_op_impl/aicpu/{sparsesparsemaximum.py → sparse_sparse_maximum.py} +4 -4
- mindspore/ops/_op_impl/aicpu/standard_laplace.py +5 -4
- mindspore/ops/_op_impl/aicpu/standard_normal.py +5 -4
- mindspore/ops/_op_impl/aicpu/truncated_normal.py +9 -7
- mindspore/ops/_op_impl/aicpu/uniform.py +5 -3
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +8 -4
- mindspore/ops/_op_impl/aicpu/uniform_int.py +5 -5
- mindspore/ops/_op_impl/aicpu/uniform_real.py +4 -4
- mindspore/ops/_op_impl/aicpu/upsample_nearest_3d.py +14 -6
- mindspore/ops/_op_impl/aicpu/upsample_nearest_3d_grad.py +22 -8
- mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d.py +11 -6
- mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d_grad.py +21 -10
- mindspore/ops/_op_impl/tbe/__init__.py +6 -4
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool.py +2 -2
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +3 -3
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +4 -4
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +2 -2
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +3 -3
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +3 -3
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +2 -2
- mindspore/ops/_op_impl/tbe/bn_infer.py +2 -2
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +3 -2
- mindspore/ops/_op_impl/tbe/broadcast_to.py +1 -1
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +3 -3
- mindspore/ops/_op_impl/tbe/expand_dims.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_v2.py +56 -0
- mindspore/ops/_op_impl/tbe/im2col.py +4 -4
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +7 -3
- mindspore/ops/_op_impl/tbe/mem_set.py +38 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +3 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +2 -2
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +2 -0
- mindspore/ops/_primitive_cache.py +1 -1
- mindspore/ops/_tracefunc.py +241 -0
- mindspore/ops/_utils/utils.py +10 -2
- mindspore/ops/_vmap/vmap_array_ops.py +5 -3
- mindspore/ops/_vmap/vmap_base.py +5 -4
- mindspore/ops/_vmap/vmap_convolution_ops.py +1 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +6 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +11 -6
- mindspore/ops/_vmap/vmap_math_ops.py +5 -2
- mindspore/ops/_vmap/vmap_nn_ops.py +135 -11
- mindspore/ops/arg_dtype_cast.py +54 -0
- mindspore/ops/composite/__init__.py +7 -5
- mindspore/ops/composite/base.py +78 -34
- mindspore/ops/composite/math_ops.py +5 -695
- mindspore/ops/composite/multitype_ops/_compile_utils.py +403 -97
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +28 -22
- mindspore/ops/composite/multitype_ops/add_impl.py +69 -7
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +48 -10
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/mod_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +10 -7
- mindspore/ops/composite/multitype_ops/sub_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/uadd_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +9 -0
- mindspore/ops/deprecated.py +304 -0
- mindspore/ops/function/__init__.py +41 -4
- mindspore/ops/function/array_func.py +1108 -467
- mindspore/ops/function/clip_func.py +94 -27
- mindspore/ops/function/debug_func.py +3 -1
- mindspore/ops/function/grad/grad_func.py +82 -73
- mindspore/ops/function/image_func.py +28 -12
- mindspore/ops/function/linalg_func.py +135 -39
- mindspore/ops/function/math_func.py +3779 -894
- mindspore/ops/function/nn_func.py +1584 -657
- mindspore/ops/function/parameter_func.py +13 -3
- mindspore/ops/function/random_func.py +247 -153
- mindspore/ops/function/sparse_func.py +14 -11
- mindspore/ops/function/sparse_unary_func.py +173 -47
- mindspore/ops/function/spectral_func.py +8 -4
- mindspore/ops/function/vmap_func.py +8 -7
- mindspore/ops/functional.py +47 -16
- mindspore/ops/op_info_register.py +346 -86
- mindspore/ops/operations/__init__.py +38 -22
- mindspore/ops/operations/_grad_ops.py +145 -149
- mindspore/ops/operations/_inner_ops.py +298 -56
- mindspore/ops/operations/_ms_kernel.py +3 -3
- mindspore/ops/operations/_quant_ops.py +24 -28
- mindspore/ops/operations/_rl_inner_ops.py +9 -7
- mindspore/ops/operations/_scalar_ops.py +115 -0
- mindspore/ops/operations/_sequence_ops.py +148 -10
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/_thor_ops.py +2 -2
- mindspore/ops/operations/array_ops.py +1239 -561
- mindspore/ops/operations/comm_ops.py +166 -90
- mindspore/ops/operations/control_ops.py +3 -3
- mindspore/ops/operations/custom_ops.py +124 -102
- mindspore/ops/operations/debug_ops.py +24 -11
- mindspore/ops/operations/image_ops.py +86 -71
- mindspore/ops/operations/inner_ops.py +18 -13
- mindspore/ops/operations/linalg_ops.py +30 -11
- mindspore/ops/operations/math_ops.py +1730 -435
- mindspore/ops/operations/nn_ops.py +1953 -943
- mindspore/ops/operations/other_ops.py +65 -43
- mindspore/ops/operations/random_ops.py +258 -98
- mindspore/ops/operations/rl_ops.py +4 -36
- mindspore/ops/operations/sparse_ops.py +38 -33
- mindspore/ops/operations/spectral_ops.py +8 -4
- mindspore/ops/primitive.py +66 -44
- mindspore/ops/signature.py +5 -5
- mindspore/parallel/_auto_parallel_context.py +80 -19
- mindspore/parallel/_cost_model_context.py +42 -0
- mindspore/parallel/_offload_context.py +162 -72
- mindspore/parallel/_parallel_serialization.py +2 -2
- mindspore/parallel/_ps_context.py +16 -4
- mindspore/parallel/_recovery_context.py +2 -1
- mindspore/parallel/_tensor.py +15 -13
- mindspore/parallel/_transformer/layers.py +8 -6
- mindspore/parallel/_transformer/loss.py +1 -0
- mindspore/parallel/_transformer/moe.py +7 -7
- mindspore/parallel/_transformer/op_parallel_config.py +12 -1
- mindspore/parallel/_transformer/transformer.py +34 -14
- mindspore/parallel/_utils.py +36 -14
- mindspore/parallel/algo_parameter_config.py +114 -20
- mindspore/parallel/checkpoint_transform.py +16 -18
- mindspore/parallel/shard.py +16 -13
- mindspore/profiler/__init__.py +1 -1
- mindspore/profiler/common/struct_type.py +3 -3
- mindspore/profiler/common/util.py +3 -2
- mindspore/profiler/envprofiling.py +11 -4
- mindspore/profiler/parser/aicpu_data_parser.py +5 -3
- mindspore/profiler/parser/ascend_flops_generator.py +94 -0
- mindspore/profiler/parser/ascend_fpbp_generator.py +76 -0
- mindspore/profiler/parser/ascend_hccl_generator.py +288 -0
- mindspore/profiler/parser/ascend_msprof_exporter.py +213 -0
- mindspore/profiler/parser/ascend_msprof_generator.py +199 -0
- mindspore/profiler/parser/ascend_op_generator.py +276 -0
- mindspore/profiler/parser/ascend_steptrace_generator.py +94 -0
- mindspore/profiler/parser/ascend_timeline_generator.py +110 -54
- mindspore/profiler/parser/base_timeline_generator.py +11 -7
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +45 -46
- mindspore/profiler/parser/flops_parser.py +15 -11
- mindspore/profiler/parser/framework_parser.py +92 -73
- mindspore/profiler/parser/hccl_parser.py +16 -12
- mindspore/profiler/parser/integrator.py +22 -11
- mindspore/profiler/parser/memory_usage_parser.py +36 -11
- mindspore/profiler/parser/minddata_analyzer.py +12 -14
- mindspore/profiler/parser/minddata_pipeline_parser.py +1 -1
- mindspore/profiler/parser/msadvisor_parser.py +8 -4
- mindspore/profiler/parser/op_intermediate_parser.py +5 -2
- mindspore/profiler/parser/optime_parser.py +1 -1
- mindspore/profiler/parser/profiler_info.py +4 -5
- mindspore/profiler/parser/step_trace_parser.py +11 -14
- mindspore/profiler/profiling.py +678 -377
- mindspore/rewrite/api/node.py +211 -54
- mindspore/rewrite/api/node_type.py +5 -0
- mindspore/rewrite/api/pattern_engine.py +22 -23
- mindspore/rewrite/api/scoped_value.py +20 -17
- mindspore/rewrite/api/symbol_tree.py +252 -106
- mindspore/rewrite/api/tree_node_helper.py +3 -0
- mindspore/rewrite/ast_helpers/__init__.py +2 -1
- mindspore/rewrite/ast_helpers/ast_finder.py +129 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +116 -104
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +97 -46
- mindspore/rewrite/common/rewrite_elog.py +5 -1
- mindspore/rewrite/namer.py +51 -51
- mindspore/rewrite/namespace.py +14 -5
- mindspore/{ops/bprop_mindir → rewrite/node}/__init__.py +9 -4
- mindspore/rewrite/node/call_function.py +79 -0
- mindspore/rewrite/node/cell_container.py +135 -0
- mindspore/rewrite/node/control_flow.py +88 -0
- mindspore/rewrite/{node.py → node/node.py} +313 -247
- mindspore/rewrite/node/node_manager.py +254 -0
- mindspore/rewrite/node/node_topological_manager.py +243 -0
- mindspore/rewrite/parsers/arguments_parser.py +22 -21
- mindspore/rewrite/parsers/assign_parser.py +225 -239
- mindspore/rewrite/parsers/attribute_parser.py +9 -7
- mindspore/rewrite/parsers/class_def_parser.py +179 -218
- mindspore/rewrite/parsers/constant_parser.py +9 -6
- mindspore/rewrite/parsers/container_parser.py +9 -7
- mindspore/rewrite/parsers/for_parser.py +36 -15
- mindspore/rewrite/parsers/function_def_parser.py +23 -20
- mindspore/rewrite/parsers/if_parser.py +28 -24
- mindspore/rewrite/parsers/module_parser.py +202 -25
- mindspore/rewrite/{parser.py → parsers/parser.py} +4 -2
- mindspore/rewrite/{parser_register.py → parsers/parser_register.py} +1 -1
- mindspore/rewrite/parsers/return_parser.py +6 -6
- mindspore/rewrite/sparsify/sparse_transformer.py +12 -3
- mindspore/rewrite/sparsify/sparsify.py +4 -1
- mindspore/rewrite/sparsify/utils.py +11 -5
- mindspore/rewrite/symbol_tree.py +577 -732
- mindspore/rewrite/symbol_tree_builder.py +9 -175
- mindspore/rewrite/symbol_tree_dumper.py +2 -2
- mindspore/run_check/_check_version.py +46 -39
- mindspore/run_check/run_check.py +3 -2
- mindspore/{scipy/sparse → safeguard}/__init__.py +4 -5
- mindspore/safeguard/rewrite_obfuscation.py +517 -0
- mindspore/scipy/__init__.py +1 -1
- mindspore/scipy/linalg.py +67 -61
- mindspore/scipy/ops.py +5 -41
- mindspore/scipy/ops_grad.py +3 -2
- mindspore/scipy/ops_wrapper.py +5 -5
- mindspore/scipy/optimize/line_search.py +8 -8
- mindspore/scipy/optimize/linear_sum_assignment.py +4 -4
- mindspore/scipy/optimize/minimize.py +16 -12
- mindspore/scipy/utils.py +1 -52
- mindspore/scipy/utils_const.py +4 -4
- mindspore/train/__init__.py +4 -4
- mindspore/train/_utils.py +13 -5
- mindspore/train/amp.py +410 -148
- mindspore/train/anf_ir_pb2.py +16 -4
- mindspore/train/callback/_backup_and_restore.py +8 -11
- mindspore/train/callback/_callback.py +80 -3
- mindspore/train/callback/_checkpoint.py +82 -51
- mindspore/train/callback/_early_stop.py +12 -15
- mindspore/train/callback/_history.py +1 -1
- mindspore/train/callback/_lambda_callback.py +13 -13
- mindspore/train/callback/_landscape.py +21 -17
- mindspore/train/callback/_loss_monitor.py +9 -10
- mindspore/train/callback/_on_request_exit.py +16 -33
- mindspore/train/callback/_reduce_lr_on_plateau.py +21 -24
- mindspore/train/callback/_summary_collector.py +44 -30
- mindspore/train/callback/_time_monitor.py +62 -12
- mindspore/train/data_sink.py +10 -16
- mindspore/train/dataset_helper.py +154 -86
- mindspore/train/loss_scale_manager.py +14 -9
- mindspore/train/metrics/__init__.py +10 -2
- mindspore/train/metrics/accuracy.py +1 -1
- mindspore/train/metrics/auc.py +1 -1
- mindspore/train/metrics/bleu_score.py +2 -2
- mindspore/train/metrics/confusion_matrix.py +14 -14
- mindspore/train/metrics/cosine_similarity.py +3 -3
- mindspore/train/metrics/dice.py +1 -1
- mindspore/train/metrics/fbeta.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +8 -6
- mindspore/train/metrics/mean_surface_distance.py +5 -4
- mindspore/train/metrics/metric.py +49 -17
- mindspore/train/metrics/occlusion_sensitivity.py +4 -4
- mindspore/train/metrics/perplexity.py +1 -1
- mindspore/train/metrics/precision.py +2 -2
- mindspore/train/metrics/recall.py +2 -3
- mindspore/train/metrics/roc.py +7 -7
- mindspore/train/metrics/root_mean_square_surface_distance.py +5 -4
- mindspore/train/metrics/topk.py +7 -4
- mindspore/train/mind_ir_pb2.py +193 -48
- mindspore/train/model.py +377 -133
- mindspore/train/serialization.py +697 -245
- mindspore/train/summary/_summary_adapter.py +5 -2
- mindspore/train/summary/_writer_pool.py +4 -3
- mindspore/train/summary/summary_record.py +25 -23
- mindspore/train/train_thor/convert_utils.py +39 -23
- mindspore/train/train_thor/dataset_helper.py +4 -3
- mindspore/train/train_thor/model_thor.py +8 -8
- mindspore/version.py +1 -1
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/METADATA +7 -8
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/RECORD +647 -818
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/entry_points.txt +0 -1
- mindspore/_akg/akg/tvm/contrib/debugger/__init__.py +0 -16
- mindspore/_akg/akg/tvm/contrib/debugger/debug_result.py +0 -274
- mindspore/_akg/akg/tvm/contrib/debugger/debug_runtime.py +0 -259
- mindspore/_akg/akg/tvm/contrib/peak.py +0 -341
- mindspore/_akg/akg/tvm/contrib/rpc.py +0 -25
- mindspore/_akg/akg/tvm/contrib/xcode.py +0 -257
- mindspore/_akg/akg/tvm/exec/__init__.py +0 -17
- mindspore/_akg/akg/tvm/exec/autotvm_log_editor.py +0 -60
- mindspore/_akg/akg/tvm/exec/measure_peak.py +0 -48
- mindspore/_akg/akg/tvm/exec/query_rpc_tracker.py +0 -48
- mindspore/_akg/akg/tvm/exec/rpc_proxy.py +0 -98
- mindspore/_akg/akg/tvm/exec/rpc_server.py +0 -88
- mindspore/_akg/akg/tvm/exec/rpc_tracker.py +0 -62
- mindspore/_akg/akg/tvm/rpc/__init__.py +0 -29
- mindspore/_akg/akg/tvm/rpc/base.py +0 -182
- mindspore/_akg/akg/tvm/rpc/client.py +0 -436
- mindspore/_akg/akg/tvm/rpc/proxy.py +0 -595
- mindspore/_akg/akg/tvm/rpc/server.py +0 -413
- mindspore/_akg/akg/tvm/rpc/tornado_util.py +0 -121
- mindspore/_akg/akg/tvm/rpc/tracker.py +0 -431
- mindspore/_extends/graph_kernel/expander.py +0 -80
- mindspore/_extends/graph_kernel/expanders/__init__.py +0 -57
- mindspore/_extends/graph_kernel/expanders/_utils.py +0 -269
- mindspore/_extends/graph_kernel/expanders/addn.py +0 -33
- mindspore/_extends/graph_kernel/expanders/batchnorm.py +0 -152
- mindspore/_extends/graph_kernel/expanders/batchnorm_grad.py +0 -105
- mindspore/_extends/graph_kernel/expanders/bias_add_grad.py +0 -49
- mindspore/_extends/graph_kernel/expanders/clip_by_norm_no_div_sum.py +0 -33
- mindspore/_extends/graph_kernel/expanders/complex/abs.py +0 -30
- mindspore/_extends/graph_kernel/expanders/complex/add.py +0 -44
- mindspore/_extends/graph_kernel/expanders/complex/div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/mul.py +0 -52
- mindspore/_extends/graph_kernel/expanders/complex/real_div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/sub.py +0 -45
- mindspore/_extends/graph_kernel/expanders/conv2d.py +0 -200
- mindspore/_extends/graph_kernel/expanders/dropout_grad.py +0 -30
- mindspore/_extends/graph_kernel/expanders/equal_count.py +0 -50
- mindspore/_extends/graph_kernel/expanders/erfc.py +0 -35
- mindspore/_extends/graph_kernel/expanders/expand_dims.py +0 -50
- mindspore/_extends/graph_kernel/expanders/fused_adam.py +0 -44
- mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py +0 -47
- mindspore/_extends/graph_kernel/expanders/fused_mul_add.py +0 -28
- mindspore/_extends/graph_kernel/expanders/gather.py +0 -43
- mindspore/_extends/graph_kernel/expanders/gelu_grad.py +0 -70
- mindspore/_extends/graph_kernel/expanders/gkdropout.py +0 -40
- mindspore/_extends/graph_kernel/expanders/identity.py +0 -25
- mindspore/_extends/graph_kernel/expanders/layernorm.py +0 -93
- mindspore/_extends/graph_kernel/expanders/layernorm_grad.py +0 -113
- mindspore/_extends/graph_kernel/expanders/logsoftmax.py +0 -46
- mindspore/_extends/graph_kernel/expanders/logsoftmax_grad.py +0 -36
- mindspore/_extends/graph_kernel/expanders/matmul.py +0 -80
- mindspore/_extends/graph_kernel/expanders/maximum_grad.py +0 -59
- mindspore/_extends/graph_kernel/expanders/minimum_grad.py +0 -80
- mindspore/_extends/graph_kernel/expanders/oneslike.py +0 -26
- mindspore/_extends/graph_kernel/expanders/reduce_mean.py +0 -43
- mindspore/_extends/graph_kernel/expanders/relu_grad.py +0 -32
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits_grad.py +0 -35
- mindspore/_extends/graph_kernel/expanders/sigmoid_grad.py +0 -31
- mindspore/_extends/graph_kernel/expanders/slice.py +0 -35
- mindspore/_extends/graph_kernel/expanders/softmax_cross_entropy_with_logits.py +0 -42
- mindspore/_extends/graph_kernel/expanders/softmax_grad_ext.py +0 -41
- mindspore/_extends/graph_kernel/expanders/softsign.py +0 -28
- mindspore/_extends/graph_kernel/expanders/sqrt_grad.py +0 -29
- mindspore/_extends/graph_kernel/expanders/square_sum_all.py +0 -44
- mindspore/_extends/graph_kernel/expanders/square_sum_v1.py +0 -37
- mindspore/_extends/graph_kernel/expanders/squared_difference.py +0 -43
- mindspore/_extends/graph_kernel/expanders/tanh_grad.py +0 -31
- mindspore/_extends/graph_kernel/expanders/tile.py +0 -54
- mindspore/_extends/graph_kernel/model/op_infer.py +0 -506
- mindspore/_extends/parse/jit_fallback_modules.py +0 -51
- mindspore/dataset/datapreprocess/preprocess_imagenet_validate_dataset.py +0 -54
- mindspore/dataset/engine/graphdata.py +0 -1586
- mindspore/include/api/net.h +0 -142
- mindspore/ops/_grad/grad_array_ops.py +0 -1347
- mindspore/ops/_grad/grad_clip_ops.py +0 -84
- mindspore/ops/_grad/grad_debug_ops.py +0 -68
- mindspore/ops/_grad/grad_inner_ops.py +0 -235
- mindspore/ops/_grad/grad_math_ops.py +0 -1684
- mindspore/ops/_grad/grad_nn_ops.py +0 -1529
- mindspore/ops/_grad/grad_other_ops.py +0 -89
- mindspore/ops/_grad/grad_sequence_ops.py +0 -296
- mindspore/ops/_grad/grad_sparse.py +0 -323
- mindspore/ops/_grad_experimental/grad_image_ops.py +0 -249
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +0 -195
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +0 -112
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ApproximateEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Argmax_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/Argmin_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/AssignSub_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Assign_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +0 -150
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +0 -66
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchToSpaceND_bprop.mindir +0 -28
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +0 -33
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +0 -306
- mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +0 -13
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Concat_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +0 -240
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +0 -247
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +0 -247
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +0 -315
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +0 -278
- mindspore/ops/bprop_mindir/DType_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +0 -58
- mindspore/ops/bprop_mindir/Depend_bprop.mindir +0 -13
- mindspore/ops/bprop_mindir/DepthToSpace_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +0 -138
- mindspore/ops/bprop_mindir/DiagPart_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +0 -25
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +0 -18
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +0 -27
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicShape_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Equal_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +0 -58
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +0 -54
- mindspore/ops/bprop_mindir/FloorDiv_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/GatherD_bprop.mindir +0 -26
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +0 -57
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/GreaterEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Greater_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/IOU_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/IsFinite_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/IsInf_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/IsNan_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +0 -126
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +0 -30
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +0 -43
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LessEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Less_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LinSpace_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/Load_bprop.mindir +0 -13
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/LogicalAnd_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LogicalNot_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/MaskedSelect_bprop.mindir +0 -21
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +0 -74
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +0 -74
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +0 -75
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +0 -65
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Maximum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Minimum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +0 -27
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +0 -35
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NonZero_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/NotEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +0 -26
- mindspore/ops/bprop_mindir/OnesLike_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +0 -82
- mindspore/ops/bprop_mindir/Range_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/Rank_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReduceAll_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/ReduceAny_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +0 -60
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +0 -89
- mindspore/ops/bprop_mindir/ReverseSequence_bprop.mindir +0 -52
- mindspore/ops/bprop_mindir/ReverseV2_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/Round_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/ScatterMax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterMin_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterNdUpdate_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/ScatterNd_bprop.mindir +0 -24
- mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/ScatterUpdate_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +0 -21
- mindspore/ops/bprop_mindir/Select_bprop.mindir +0 -31
- mindspore/ops/bprop_mindir/Shape_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +0 -21
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Sign_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/Slice_bprop.mindir +0 -26
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +0 -36
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +0 -33
- mindspore/ops/bprop_mindir/Sort_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SpaceToBatchND_bprop.mindir +0 -28
- mindspore/ops/bprop_mindir/SpaceToDepth_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Split_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +0 -54
- mindspore/ops/bprop_mindir/StridedSliceGrad_bprop.mindir +0 -95
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +0 -98
- mindspore/ops/bprop_mindir/Switch_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +0 -66
- mindspore/ops/bprop_mindir/TensorScatterAdd_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/TensorScatterUpdate_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/TensorShape_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TransShape_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/TruncateDiv_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/Unique_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Unstack_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +0 -32
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +0 -38
- mindspore/ops/bprop_mindir/ZerosLike_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/generate_mindir.py +0 -114
- mindspore/rewrite/node_visitor.py +0 -44
- mindspore/rewrite/topological_manager.py +0 -203
- mindspore/scipy/sparse/linalg.py +0 -192
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/WHEEL +0 -0
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/top_level.txt +0 -0
|
@@ -24,20 +24,21 @@ from mindspore.common import mutable
|
|
|
24
24
|
import mindspore.common._monad as monad
|
|
25
25
|
from mindspore.common.sparse_tensor import RowTensorInner
|
|
26
26
|
from mindspore.ops.composite.base import _append, _insert, _pop, _list_clear, _reverse, \
|
|
27
|
-
_extend, _dict_clear, _haskey, _update, _fromkeys
|
|
27
|
+
_extend, _dict_setitem, _dict_clear, _haskey, _update, _fromkeys
|
|
28
28
|
|
|
29
29
|
from ... import _checkparam as validator
|
|
30
|
-
from ..._checkparam import check_is_number, check_reshape_shp,
|
|
31
|
-
|
|
30
|
+
from ..._checkparam import check_is_number, check_reshape_shp, check_axis_in_range, \
|
|
31
|
+
check_axis_valid, check_and_canonicalize_axes
|
|
32
32
|
from ...ops import functional as F
|
|
33
33
|
from ...ops import operations as P
|
|
34
34
|
from ...ops import composite
|
|
35
|
-
from ...ops.
|
|
35
|
+
from ...ops.operations import array_ops
|
|
36
|
+
from ...ops.composite import MultitypeFuncGraph, env_get, hyper_add, \
|
|
36
37
|
zeros_like, ones_like, repeat_elements
|
|
37
38
|
from ...ops.composite.multitype_ops import _constexpr_utils as const_utils
|
|
38
39
|
from ...ops.composite.multitype_ops import _compile_utils as compile_utils
|
|
39
40
|
from ...ops.operations.math_ops import Median
|
|
40
|
-
from ...ops.operations._inner_ops import Format
|
|
41
|
+
from ...ops.operations._inner_ops import Format
|
|
41
42
|
from ...ops.operations import _csr_ops
|
|
42
43
|
from ...ops.operations import _map_tensor_ops
|
|
43
44
|
from ...ops.primitive import constexpr, _primexpr
|
|
@@ -57,7 +58,6 @@ size_op_ = P.Size()
|
|
|
57
58
|
_format = Format()
|
|
58
59
|
_reduce_sum_default = P.ReduceSum()
|
|
59
60
|
_reduce_sum_keepdims = P.ReduceSum(True)
|
|
60
|
-
_mean_keepdims = P.ReduceMean(True)
|
|
61
61
|
_csr_mm = _csr_ops.CSRMM()
|
|
62
62
|
|
|
63
63
|
itemsize_map = {mstype.bool_: 1, mstype.int8: 1, mstype.uint8: 1,
|
|
@@ -91,10 +91,7 @@ def mean(x, axis=None, keep_dims=False):
|
|
|
91
91
|
>>> print(output)
|
|
92
92
|
2.0
|
|
93
93
|
"""
|
|
94
|
-
|
|
95
|
-
axis = ()
|
|
96
|
-
reduce_mean = P.ReduceMean(keep_dims)
|
|
97
|
-
return reduce_mean(x, axis)
|
|
94
|
+
return F.mean(x, axis, keep_dims)
|
|
98
95
|
|
|
99
96
|
|
|
100
97
|
def ndimension(x):
|
|
@@ -109,7 +106,7 @@ def prod(input, axis=None, keep_dims=False):
|
|
|
109
106
|
Args:
|
|
110
107
|
input (Tensor): Input Tensor.
|
|
111
108
|
axis (Union[None, int, tuple(int), list(int)]): Dimensions of reduction,
|
|
112
|
-
when axis is None or empty tuple, reduce all dimensions. Default: None
|
|
109
|
+
when axis is None or empty tuple, reduce all dimensions. Default: ``None``.
|
|
113
110
|
keep_dims (bool): Whether to keep the reduced dimensions. Default: False.
|
|
114
111
|
|
|
115
112
|
Returns:
|
|
@@ -175,11 +172,7 @@ def all_(x, axis=(), keep_dims=False):
|
|
|
175
172
|
Returns:
|
|
176
173
|
Tensor, has the same data type as x.
|
|
177
174
|
"""
|
|
178
|
-
|
|
179
|
-
if axis is None:
|
|
180
|
-
axis = ()
|
|
181
|
-
reduce_all = P.ReduceAll(keep_dims)
|
|
182
|
-
return reduce_all(x, axis)
|
|
175
|
+
return F.all(x, axis, keep_dims)
|
|
183
176
|
|
|
184
177
|
|
|
185
178
|
def angle(x):
|
|
@@ -314,6 +307,52 @@ def slogdet(x):
|
|
|
314
307
|
return F.slogdet(x)
|
|
315
308
|
|
|
316
309
|
|
|
310
|
+
def cauchy(x, median=0.0, sigma=1.0):
|
|
311
|
+
r"""
|
|
312
|
+
Fills the tensor with numbers drawn from the Cauchy distribution. It is
|
|
313
|
+
defined as follows:
|
|
314
|
+
|
|
315
|
+
.. math::
|
|
316
|
+
f(x)= \frac{1}{\pi} \frac{\sigma}{(x-median)^2 +\sigma^2}
|
|
317
|
+
|
|
318
|
+
Args:
|
|
319
|
+
x (Tensor): Input tensor.
|
|
320
|
+
median (float, optional): the location parameter, specifying the location
|
|
321
|
+
of the peak of the distribution. Default: 0.0.
|
|
322
|
+
sigma (float, optional): the scale parameter which specifies the half-width
|
|
323
|
+
at half-maximum. Default: 1.0.
|
|
324
|
+
|
|
325
|
+
Returns:
|
|
326
|
+
Tensor. A Tensor with the same type and shape of input.
|
|
327
|
+
"""
|
|
328
|
+
out = P.Cauchy(list(x.shape), median, sigma)()
|
|
329
|
+
return F.cast(out, x.dtype)
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
def log_normal(x, mean=1.0, std=2.0):
|
|
333
|
+
r"""
|
|
334
|
+
Fills the elements of the input tensor with log normal values initialized by
|
|
335
|
+
given mean and std:
|
|
336
|
+
|
|
337
|
+
.. math::
|
|
338
|
+
\text{f}(x;1.0,2.0)=\frac{1}{x\delta \sqrt[]{2\pi} }e^{-\frac{(\ln x-\mu )^2}{2\delta ^2} }
|
|
339
|
+
|
|
340
|
+
where \mu, \delta is mean and standard deviation of log normal distribution respectively.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
x (Tensor): Input tensor.
|
|
344
|
+
mean (float, optional): the mean of normal distribution. With float data type.
|
|
345
|
+
Default: 1.0.
|
|
346
|
+
std (float, optional): the std of normal distribution. With float data type.
|
|
347
|
+
Default: 2.0.
|
|
348
|
+
|
|
349
|
+
Returns:
|
|
350
|
+
Tensor. A Tensor with the same type and shape of input.
|
|
351
|
+
"""
|
|
352
|
+
log_normal = P.LogNormalReverse(mean, std)
|
|
353
|
+
return log_normal(x)
|
|
354
|
+
|
|
355
|
+
|
|
317
356
|
def chunk(x, chunks, axis=0):
|
|
318
357
|
r"""
|
|
319
358
|
For details, please refer to :func:`mindspore.ops.chunk`.
|
|
@@ -340,7 +379,7 @@ def hasattr(x, attr): # pylint: disable=redefined-builtin
|
|
|
340
379
|
Boolean value, indicates whether the object x has attribute attr.
|
|
341
380
|
"""
|
|
342
381
|
out = getattr(x, attr, mstype._null)
|
|
343
|
-
return not isinstance(out, mstype.
|
|
382
|
+
return not isinstance(out, mstype._NullType)
|
|
344
383
|
|
|
345
384
|
|
|
346
385
|
def astype(x, dtype, copy=True): # pylint: disable=redefined-outer-name
|
|
@@ -397,25 +436,25 @@ def multinomial(input, num_samples, replacement=True, seed=None):
|
|
|
397
436
|
return F.multinomial(input, num_samples, replacement, seed)
|
|
398
437
|
|
|
399
438
|
|
|
400
|
-
def tile(x,
|
|
439
|
+
def tile(x, reps):
|
|
401
440
|
r"""
|
|
402
|
-
Replicates an input tensor with given
|
|
441
|
+
Replicates an input tensor with given reps times.
|
|
403
442
|
|
|
404
|
-
Creates a new tensor by replicating `input_x` `
|
|
405
|
-
output tensor has `input_x.shape[i] *
|
|
406
|
-
are replicated `
|
|
443
|
+
Creates a new tensor by replicating `input_x` `reps` times. The i'th dimension of
|
|
444
|
+
output tensor has `input_x.shape[i] * reps[i]` elements, and the values of `input_x`
|
|
445
|
+
are replicated `reps[i]` times along the i'th dimension.
|
|
407
446
|
|
|
408
447
|
Note:
|
|
409
|
-
The length of `
|
|
448
|
+
The length of `reps` must be greater or equal to the length of dimension in `input_x`.
|
|
410
449
|
|
|
411
450
|
Args:
|
|
412
|
-
|
|
451
|
+
reps (tuple[int]): The parameter that specifies the number of replications,
|
|
413
452
|
the parameter type is tuple, and the data type is int, i.e., :math:`(y_1, y_2, ..., y_S)`.
|
|
414
|
-
The length of `
|
|
453
|
+
The length of `reps` cannot be smaller than the length of the shape of `input_x`.
|
|
415
454
|
Only constant value is allowed.
|
|
416
455
|
|
|
417
456
|
Returns:
|
|
418
|
-
Tensor, has the same data type as the `input_x`. Suppose the length of `
|
|
457
|
+
Tensor, has the same data type as the `input_x`. Suppose the length of `reps` is `d`,
|
|
419
458
|
the dimension of `input_x` is `input_x.dim`, and the shape of `input_x` is :math:`(x_1, x_2, ..., x_S)`.
|
|
420
459
|
|
|
421
460
|
- If `input_x.dim = d`, then the shape of their corresponding positions can be multiplied, and
|
|
@@ -426,9 +465,9 @@ def tile(x, multiples):
|
|
|
426
465
|
:math:`(1*y_1, ..., x_S*y_R)`.
|
|
427
466
|
|
|
428
467
|
Raises:
|
|
429
|
-
TypeError: If `
|
|
430
|
-
ValueError: If the elements of `
|
|
431
|
-
ValueError: If the length of `
|
|
468
|
+
TypeError: If `reps` is not a tuple or its elements are not all int.
|
|
469
|
+
ValueError: If the elements of `reps` are not all greater than 0.
|
|
470
|
+
ValueError: If the length of `reps` are smaller than the length of dimension in `input_x`.
|
|
432
471
|
|
|
433
472
|
Supported Platforms:
|
|
434
473
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -437,15 +476,15 @@ def tile(x, multiples):
|
|
|
437
476
|
>>> import mindspore as ms
|
|
438
477
|
>>> from mindspore import Tensor
|
|
439
478
|
>>> input_x = Tensor(np.array([[1, 2], [3, 4]]), mindspore.float32)
|
|
440
|
-
>>>
|
|
441
|
-
>>> output = input_x.tile(
|
|
479
|
+
>>> reps = (2, 3)
|
|
480
|
+
>>> output = input_x.tile(reps)
|
|
442
481
|
>>> print(output)
|
|
443
482
|
[[1. 2. 1. 2. 1. 2.]
|
|
444
483
|
[3. 4. 3. 4. 3. 4.]
|
|
445
484
|
[1. 2. 1. 2. 1. 2.]
|
|
446
485
|
[3. 4. 3. 4. 3. 4.]]
|
|
447
|
-
>>>
|
|
448
|
-
>>> output = input_x.tile(
|
|
486
|
+
>>> reps = (2, 3, 2)
|
|
487
|
+
>>> output = input_x.tile(reps)
|
|
449
488
|
>>> print(output)
|
|
450
489
|
[[[1. 2. 1. 2.]
|
|
451
490
|
[3. 4. 3. 4.]
|
|
@@ -460,7 +499,7 @@ def tile(x, multiples):
|
|
|
460
499
|
[1. 2. 1. 2.]
|
|
461
500
|
[3. 4. 3. 4.]]]
|
|
462
501
|
"""
|
|
463
|
-
return F.tile(x,
|
|
502
|
+
return F.tile(x, reps)
|
|
464
503
|
|
|
465
504
|
|
|
466
505
|
def short(x):
|
|
@@ -506,7 +545,7 @@ def transpose(x, *axis):
|
|
|
506
545
|
(3, 2, 1)
|
|
507
546
|
"""
|
|
508
547
|
ndim = F.rank(x)
|
|
509
|
-
perm =
|
|
548
|
+
perm = validator.check_transpose_axis(axis, ndim)
|
|
510
549
|
return F.transpose(x, perm)
|
|
511
550
|
|
|
512
551
|
|
|
@@ -716,6 +755,20 @@ def scatter(self, axis, index, src):
|
|
|
716
755
|
return F.scatter(self, axis, index, src)
|
|
717
756
|
|
|
718
757
|
|
|
758
|
+
def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
|
|
759
|
+
r"""
|
|
760
|
+
Embeds the src into the input Tensor according to `axis`.
|
|
761
|
+
"""
|
|
762
|
+
return F.slice_scatter(input, src, axis, start, end, step)
|
|
763
|
+
|
|
764
|
+
|
|
765
|
+
def select_scatter(input, src, axis, index):
|
|
766
|
+
r"""
|
|
767
|
+
On the specified dimension `axis` of `input` , `src` is scattered into `input` on the specified `index` of `input` .
|
|
768
|
+
"""
|
|
769
|
+
return F.select_scatter(input, src, axis, index)
|
|
770
|
+
|
|
771
|
+
|
|
719
772
|
def swapaxes(input, axis0, axis1):
|
|
720
773
|
"""
|
|
721
774
|
Interchange two axes of a tensor.
|
|
@@ -755,12 +808,7 @@ def squeeze(x, axis=None):
|
|
|
755
808
|
>>> print(x.shape)
|
|
756
809
|
(2, 2)
|
|
757
810
|
"""
|
|
758
|
-
|
|
759
|
-
if axis is None:
|
|
760
|
-
return F.squeeze(x)
|
|
761
|
-
# yield squeezed shape based on the axes
|
|
762
|
-
new_shape = prepare_shape_for_squeeze(shape, axis)
|
|
763
|
-
return F.reshape(x, new_shape)
|
|
811
|
+
return F.squeeze(x, axis)
|
|
764
812
|
|
|
765
813
|
|
|
766
814
|
def unbind(input, dim=0):
|
|
@@ -775,7 +823,7 @@ def argmax(x, axis=None, keepdims=False):
|
|
|
775
823
|
Args:
|
|
776
824
|
axis (Union[int, None], optional): The dimension to reduce.
|
|
777
825
|
If `axis` is None, the indices of the maximum value within the
|
|
778
|
-
flattened input will be returned. Default: None
|
|
826
|
+
flattened input will be returned. Default: ``None``.
|
|
779
827
|
keepdims (bool, optional): Whether the output tensor retains the
|
|
780
828
|
specified dimension. Ignored if `axis` is None. Default: False.
|
|
781
829
|
|
|
@@ -795,15 +843,7 @@ def argmax(x, axis=None, keepdims=False):
|
|
|
795
843
|
>>> print(a.argmax())
|
|
796
844
|
5
|
|
797
845
|
"""
|
|
798
|
-
|
|
799
|
-
if axis is None:
|
|
800
|
-
x = ravel(x)
|
|
801
|
-
axis = 0
|
|
802
|
-
is_axis_none = True
|
|
803
|
-
out = P.Argmax(axis, mstype.int64)(x)
|
|
804
|
-
if keepdims and not is_axis_none:
|
|
805
|
-
out = expand_dims(out, axis)
|
|
806
|
-
return out
|
|
846
|
+
return F.argmax(x, axis, keepdims)
|
|
807
847
|
|
|
808
848
|
|
|
809
849
|
def argmin(x, axis=None, keepdims=False):
|
|
@@ -1169,6 +1209,13 @@ def logaddexp2(input, other):
|
|
|
1169
1209
|
return F.logaddexp2(input, other)
|
|
1170
1210
|
|
|
1171
1211
|
|
|
1212
|
+
def logcumsumexp(input, axis):
|
|
1213
|
+
"""
|
|
1214
|
+
Computes the logarithm of the sum of exponentiations of the inputs along specified dimension.
|
|
1215
|
+
"""
|
|
1216
|
+
return F.logcumsumexp(input, axis)
|
|
1217
|
+
|
|
1218
|
+
|
|
1172
1219
|
def logsumexp(input, axis, keepdims=False):
|
|
1173
1220
|
"""
|
|
1174
1221
|
Reduces a dimension of a tensor by calculating exponential for all elements in the dimension,
|
|
@@ -1239,7 +1286,7 @@ def permute(input, *axis):
|
|
|
1239
1286
|
Permutes the dimensions of the input tensor according to input permutation.
|
|
1240
1287
|
"""
|
|
1241
1288
|
ndim = F.rank(input)
|
|
1242
|
-
perm =
|
|
1289
|
+
perm = validator.check_transpose_axis(axis, ndim)
|
|
1243
1290
|
return F.permute(input, perm)
|
|
1244
1291
|
|
|
1245
1292
|
|
|
@@ -1373,8 +1420,9 @@ def diagonal(x, offset=0, axis1=0, axis2=1):
|
|
|
1373
1420
|
|
|
1374
1421
|
e = F.eye(n, m, dtype)
|
|
1375
1422
|
if offset >= m or offset <= -n:
|
|
1376
|
-
|
|
1377
|
-
|
|
1423
|
+
zero_shape = shape[:-2] + (0,)
|
|
1424
|
+
return F.zeros(zero_shape, dtype)
|
|
1425
|
+
if offset != 0:
|
|
1378
1426
|
e = e.astype(mstype.float32)
|
|
1379
1427
|
if offset > 0:
|
|
1380
1428
|
e_left = F.fill(dtype, (n, offset), 0)
|
|
@@ -1404,6 +1452,13 @@ def diagonal(x, offset=0, axis1=0, axis2=1):
|
|
|
1404
1452
|
return res.astype(dtype)
|
|
1405
1453
|
|
|
1406
1454
|
|
|
1455
|
+
def diagonal_scatter(input, src, offset, dim1=0, dim2=1):
|
|
1456
|
+
r"""
|
|
1457
|
+
Embed `src` into the diagonal of `input` according to the `dim1` and `dim2`.
|
|
1458
|
+
"""
|
|
1459
|
+
return F.diagonal_scatter(input, src, offset, dim1, dim2)
|
|
1460
|
+
|
|
1461
|
+
|
|
1407
1462
|
def digamma(input):
|
|
1408
1463
|
"""
|
|
1409
1464
|
Computes the logarithmic derivative of the gamma function on input.
|
|
@@ -1481,6 +1536,20 @@ def float_power(x, exponent):
|
|
|
1481
1536
|
return F.float_power(x, exponent)
|
|
1482
1537
|
|
|
1483
1538
|
|
|
1539
|
+
def fmax(input, other):
|
|
1540
|
+
"""
|
|
1541
|
+
For details, please refer to :func:`mindspore.ops.fmax`.
|
|
1542
|
+
"""
|
|
1543
|
+
return F.fmax(input, other)
|
|
1544
|
+
|
|
1545
|
+
|
|
1546
|
+
def fmin(input, other):
|
|
1547
|
+
"""
|
|
1548
|
+
For details, please refer to :func:`mindspore.ops.fmin`.
|
|
1549
|
+
"""
|
|
1550
|
+
return F.fmin(input, other)
|
|
1551
|
+
|
|
1552
|
+
|
|
1484
1553
|
def fmod(x, other):
|
|
1485
1554
|
"""
|
|
1486
1555
|
For details, please refer to :func:`mindspore.ops.fmod`.
|
|
@@ -1639,6 +1708,22 @@ def take(x, indices, axis=None, mode='clip'):
|
|
|
1639
1708
|
return res.reshape(shape_out)
|
|
1640
1709
|
|
|
1641
1710
|
|
|
1711
|
+
def ms_type(input, dtype=None):
|
|
1712
|
+
r"""
|
|
1713
|
+
Change the dtype of the Tensor to the `dtype` . Return the type if `dtype` is None.
|
|
1714
|
+
"""
|
|
1715
|
+
if dtype is None:
|
|
1716
|
+
return str(input.dtype)
|
|
1717
|
+
return input.astype(dtype)
|
|
1718
|
+
|
|
1719
|
+
|
|
1720
|
+
def type_as(input, other):
|
|
1721
|
+
r"""
|
|
1722
|
+
Change the dtype of `input` to the dtype of `other`.
|
|
1723
|
+
"""
|
|
1724
|
+
return input.astype(other.dtype)
|
|
1725
|
+
|
|
1726
|
+
|
|
1642
1727
|
def _infer_out_shape(*shapes):
|
|
1643
1728
|
"""
|
|
1644
1729
|
Returns shape of output after broadcasting. Raises ValueError if shapes cannot be broadcast.
|
|
@@ -1646,8 +1731,8 @@ def _infer_out_shape(*shapes):
|
|
|
1646
1731
|
shape_out = list()
|
|
1647
1732
|
max_len = ms_max([len(it) for it in shapes])
|
|
1648
1733
|
for i in range(max_len):
|
|
1649
|
-
items = [it[i-(max_len-len(it))] if i - (max_len - len(it))
|
|
1650
|
-
|
|
1734
|
+
items = [it[i - (max_len - len(it))] if i - (max_len - len(it))
|
|
1735
|
+
>= 0 else 1 for it in shapes]
|
|
1651
1736
|
max_size = 0 if 0 in items else ms_max(items)
|
|
1652
1737
|
shape_out.append(max_size)
|
|
1653
1738
|
return tuple(shape_out)
|
|
@@ -1758,12 +1843,14 @@ def searchsorted(x, v, side='left', sorter=None):
|
|
|
1758
1843
|
>>> print(x.searchsorted(3))
|
|
1759
1844
|
2
|
|
1760
1845
|
"""
|
|
1846
|
+
|
|
1761
1847
|
def get_log2_size(size):
|
|
1762
1848
|
"""Get log2 size"""
|
|
1763
|
-
log2_res = F.log2(F.cast(
|
|
1849
|
+
log2_res = F.log2(F.cast(size, mstype.float32))
|
|
1764
1850
|
ceil_res = F.ceil(log2_res)
|
|
1765
1851
|
cast_res = F.cast(ceil_res, mstype.int64)
|
|
1766
1852
|
return cast_res
|
|
1853
|
+
|
|
1767
1854
|
if side not in ('left', 'right'):
|
|
1768
1855
|
const_utils.raise_value_error('invalid value for keyword "side"')
|
|
1769
1856
|
a = x.astype(mstype.float32)
|
|
@@ -1801,8 +1888,6 @@ def fill(x, value):
|
|
|
1801
1888
|
const_utils.raise_type_error("If None is used as value, the original Tensor's dtype must be float.")
|
|
1802
1889
|
value = nan_tensor
|
|
1803
1890
|
return F.tile(value, x.shape).astype(x.dtype)
|
|
1804
|
-
if not isinstance(value, (int, float, bool)):
|
|
1805
|
-
const_utils.raise_type_error("input value must be a scalar.")
|
|
1806
1891
|
return F.fill(x.dtype, x.shape, value)
|
|
1807
1892
|
|
|
1808
1893
|
|
|
@@ -1813,6 +1898,15 @@ def fills(x, value):
|
|
|
1813
1898
|
return F.fills(x, value)
|
|
1814
1899
|
|
|
1815
1900
|
|
|
1901
|
+
def fill_diagonal(x, fill_value, wrap=False):
|
|
1902
|
+
"""
|
|
1903
|
+
Fills the main diagonal of a Tensor with a specified value and returns the result. The input has at least
|
|
1904
|
+
2 dimensions, and all dimensions of input must be equal in length when the dimension of input is greater than 2.
|
|
1905
|
+
"""
|
|
1906
|
+
|
|
1907
|
+
return P.FillDiagonal(fill_value, wrap)(x)
|
|
1908
|
+
|
|
1909
|
+
|
|
1816
1910
|
def ptp(x, axis=None, keepdims=False):
|
|
1817
1911
|
"""
|
|
1818
1912
|
The name of the function comes from the acronym for "peak to peak".
|
|
@@ -1823,7 +1917,7 @@ def ptp(x, axis=None, keepdims=False):
|
|
|
1823
1917
|
Args:
|
|
1824
1918
|
x (Tensor): Input tensor.
|
|
1825
1919
|
axis (Union[None, int, tuple(int)]): Axis or axes along which the range is computed.
|
|
1826
|
-
The default is to compute the variance of the flattened array. Default: None
|
|
1920
|
+
The default is to compute the variance of the flattened array. Default: ``None``.
|
|
1827
1921
|
keepdims (bool): Default is False.
|
|
1828
1922
|
|
|
1829
1923
|
Returns:
|
|
@@ -1909,7 +2003,7 @@ def var(x, axis=None, ddof=0, keepdims=False):
|
|
|
1909
2003
|
axis = ()
|
|
1910
2004
|
else:
|
|
1911
2005
|
axis = check_and_canonicalize_axes(axis, x.ndim)
|
|
1912
|
-
x_mean =
|
|
2006
|
+
x_mean = F.mean(x, axis, True)
|
|
1913
2007
|
x_sub = F.tensor_sub(x, x_mean)
|
|
1914
2008
|
x_pow = F.tensor_pow(x_sub, 2)
|
|
1915
2009
|
if keepdims:
|
|
@@ -1982,7 +2076,7 @@ def sum(input, axis=None, dtype=None, keepdims=False, initial=None): # pylint:
|
|
|
1982
2076
|
|
|
1983
2077
|
Args:
|
|
1984
2078
|
input (Union[int, float, bool, list, tuple, Tensor]): Elements to sum.
|
|
1985
|
-
axis (Union[None, int, tuple(int)]): Axis or axes along which a sum is performed. Default: None
|
|
2079
|
+
axis (Union[None, int, tuple(int)]): Axis or axes along which a sum is performed. Default: ``None``.
|
|
1986
2080
|
If None, sum all of the elements of the input array.
|
|
1987
2081
|
If axis is negative it counts from the last to the first axis.
|
|
1988
2082
|
If axis is a tuple of ints, a sum is performed on all of the axes specified in the tuple
|
|
@@ -2017,27 +2111,34 @@ def sum(input, axis=None, dtype=None, keepdims=False, initial=None): # pylint:
|
|
|
2017
2111
|
>>> print(input_x.sum(axis=1))
|
|
2018
2112
|
[10. 35.]
|
|
2019
2113
|
"""
|
|
2020
|
-
input_x = input.astype(mstype.int32) if input.dtype == mstype.bool_ else input
|
|
2021
|
-
dtype = input_x.dtype if dtype is None else dtype
|
|
2022
|
-
dtype = check_astype_dtype_const(dtype)
|
|
2023
|
-
if not isinstance(keepdims, int):
|
|
2024
|
-
const_utils.raise_type_error("integer argument expected")
|
|
2025
2114
|
if initial is not None and not isinstance(initial, (int, float, bool)):
|
|
2026
|
-
|
|
2027
|
-
|
|
2028
|
-
axis = ()
|
|
2029
|
-
else:
|
|
2030
|
-
axis = check_and_canonicalize_axes(axis, input.ndim)
|
|
2031
|
-
|
|
2032
|
-
if not check_type_support(input_x.dtype, 'GPU', (mstype.float64, mstype.float32, mstype.float16)):
|
|
2033
|
-
input_x = input_x.astype(mstype.float32)
|
|
2034
|
-
if keepdims:
|
|
2035
|
-
res = _reduce_sum_keepdims(input_x, axis)
|
|
2036
|
-
else:
|
|
2037
|
-
res = _reduce_sum_default(input_x, axis)
|
|
2115
|
+
raise TypeError(f"For Tensor.sum, initial must be int, float or bool, but got {type(initial)}.")
|
|
2116
|
+
res = F.sum(input, axis, keepdims)
|
|
2038
2117
|
if initial is not None:
|
|
2039
2118
|
res += initial
|
|
2040
|
-
|
|
2119
|
+
if dtype is not None:
|
|
2120
|
+
res = res.astype(dtype)
|
|
2121
|
+
return res
|
|
2122
|
+
|
|
2123
|
+
|
|
2124
|
+
@_primexpr
|
|
2125
|
+
def _check_sum_to_size(size, input_dim, shape_input):
|
|
2126
|
+
"""Check the length of size of sum_to_size."""
|
|
2127
|
+
if len(size) > input_dim:
|
|
2128
|
+
raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_input}.")
|
|
2129
|
+
|
|
2130
|
+
|
|
2131
|
+
@_primexpr
|
|
2132
|
+
def _count_axes(size, input_shape, shape_input):
|
|
2133
|
+
"""Count the sum axes for sum_to_size."""
|
|
2134
|
+
axes = []
|
|
2135
|
+
for i in range(len(size)):
|
|
2136
|
+
element = size[i]
|
|
2137
|
+
if element != input_shape[i] and element == 1:
|
|
2138
|
+
axes.append(i)
|
|
2139
|
+
elif element != input_shape[i]:
|
|
2140
|
+
raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_input}.")
|
|
2141
|
+
return axes
|
|
2041
2142
|
|
|
2042
2143
|
|
|
2043
2144
|
def sum_to_size(input, *size):
|
|
@@ -2047,17 +2148,12 @@ def sum_to_size(input, *size):
|
|
|
2047
2148
|
if len(size) == 1 and isinstance(size[0], tuple):
|
|
2048
2149
|
size = size[0]
|
|
2049
2150
|
shape_input = input.shape
|
|
2050
|
-
|
|
2051
|
-
raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_input}.")
|
|
2151
|
+
_check_sum_to_size(size, input.ndim, shape_input)
|
|
2052
2152
|
if len(size) < input.ndim:
|
|
2053
2153
|
pre_axis = tuple(axis for axis in range(input.ndim - len(size)))
|
|
2054
2154
|
input = input.sum(pre_axis)
|
|
2055
|
-
|
|
2056
|
-
|
|
2057
|
-
if element != input.shape[i] and element == 1:
|
|
2058
|
-
axes.append(i)
|
|
2059
|
-
elif element != input.shape[i]:
|
|
2060
|
-
raise ValueError(f"For sum_to_size, size {size} is not expandable to the tensor size {shape_input}.")
|
|
2155
|
+
|
|
2156
|
+
axes = _count_axes(size, input.shape, shape_input)
|
|
2061
2157
|
if axes:
|
|
2062
2158
|
return input.sum(tuple(axes), keepdims=True)
|
|
2063
2159
|
return input
|
|
@@ -2070,6 +2166,21 @@ def nansum(input, axis=None, keepdims=False, *, dtype=None):
|
|
|
2070
2166
|
return F.nansum(input, axis=axis, keepdims=keepdims, dtype=dtype)
|
|
2071
2167
|
|
|
2072
2168
|
|
|
2169
|
+
def nanmean(input, axis=None, keepdims=False, *, dtype=None):
|
|
2170
|
+
r"""
|
|
2171
|
+
Computes the mean of input tensor, ignoring NaN.
|
|
2172
|
+
"""
|
|
2173
|
+
return F.nanmean(input, axis, keepdims, dtype=dtype)
|
|
2174
|
+
|
|
2175
|
+
|
|
2176
|
+
def nanmedian(input, axis=-1, keepdims=False):
|
|
2177
|
+
r"""
|
|
2178
|
+
Computes the median and indices of input tensor, ignoring NaN.
|
|
2179
|
+
If all elements in the specified dimensions are NaN, the result will be NaN.
|
|
2180
|
+
"""
|
|
2181
|
+
return F.nanmedian(input, axis, keepdims)
|
|
2182
|
+
|
|
2183
|
+
|
|
2073
2184
|
def repeat(x, repeats, axis=None):
|
|
2074
2185
|
"""
|
|
2075
2186
|
Repeat elements of an array.
|
|
@@ -2233,7 +2344,7 @@ def itemset(data, *args):
|
|
|
2233
2344
|
|
|
2234
2345
|
def ms_iter(xs):
|
|
2235
2346
|
"""Implementation of `iter`."""
|
|
2236
|
-
return xs.__ms_iter__
|
|
2347
|
+
return xs.__ms_iter__
|
|
2237
2348
|
|
|
2238
2349
|
|
|
2239
2350
|
def ms_next(it):
|
|
@@ -2246,47 +2357,6 @@ def hasnext(it):
|
|
|
2246
2357
|
return it.__ms_hasnext__()
|
|
2247
2358
|
|
|
2248
2359
|
|
|
2249
|
-
@constexpr
|
|
2250
|
-
def constant_abs(x):
|
|
2251
|
-
"""Returns the absolute value of the constant."""
|
|
2252
|
-
if x is None:
|
|
2253
|
-
raise ValueError("For abs(), the input should be a constant or Tensor type.")
|
|
2254
|
-
return abs(x)
|
|
2255
|
-
|
|
2256
|
-
|
|
2257
|
-
def ms_abs(x):
|
|
2258
|
-
"""Implementation of `abs`."""
|
|
2259
|
-
if isinstance(x, Tensor):
|
|
2260
|
-
return abs_(x)
|
|
2261
|
-
return constant_abs(x)
|
|
2262
|
-
|
|
2263
|
-
|
|
2264
|
-
@constexpr
|
|
2265
|
-
def constant_round(*data):
|
|
2266
|
-
"""Returns the rounded value of the constant."""
|
|
2267
|
-
for x in data:
|
|
2268
|
-
if x is None:
|
|
2269
|
-
raise ValueError(
|
|
2270
|
-
"For round(), the input should be a Tensor or 1-2 constants.")
|
|
2271
|
-
return round(*data)
|
|
2272
|
-
|
|
2273
|
-
|
|
2274
|
-
def ms_round(*data):
|
|
2275
|
-
"""Implementation of `round`."""
|
|
2276
|
-
len_data = len(data)
|
|
2277
|
-
if len_data <= 0 or len_data > 2:
|
|
2278
|
-
const_utils.raise_type_error("round() requires 1 or 2 arguments.")
|
|
2279
|
-
if len_data == 1 or data[1] is None:
|
|
2280
|
-
x = data[0]
|
|
2281
|
-
if isinstance(x, Tensor):
|
|
2282
|
-
return round_(x)
|
|
2283
|
-
return constant_round(x)
|
|
2284
|
-
if isinstance(data[0], Tensor) or isinstance(data[1], Tensor):
|
|
2285
|
-
const_utils.raise_type_error(
|
|
2286
|
-
"When applying round() to tensor, only one tensor is supported as input.")
|
|
2287
|
-
return constant_round(*data)
|
|
2288
|
-
|
|
2289
|
-
|
|
2290
2360
|
@constexpr
|
|
2291
2361
|
def cast_to_str(data):
|
|
2292
2362
|
return str(data)
|
|
@@ -2300,13 +2370,9 @@ def str_func(*data):
|
|
|
2300
2370
|
if data_len == 0:
|
|
2301
2371
|
return ''
|
|
2302
2372
|
data = data[0]
|
|
2303
|
-
if
|
|
2304
|
-
|
|
2305
|
-
|
|
2306
|
-
if not F.isconstant(data):
|
|
2307
|
-
const_utils.raise_type_error(
|
|
2308
|
-
"str() does not support non-constant input.")
|
|
2309
|
-
return cast_to_str(data)
|
|
2373
|
+
if F.isconstant(data):
|
|
2374
|
+
return cast_to_str(data)
|
|
2375
|
+
return data.__str__()
|
|
2310
2376
|
|
|
2311
2377
|
|
|
2312
2378
|
@constexpr
|
|
@@ -2322,22 +2388,20 @@ def bool_func(*data):
|
|
|
2322
2388
|
if data_len == 0:
|
|
2323
2389
|
return False
|
|
2324
2390
|
data = data[0]
|
|
2325
|
-
if isinstance(data, (CSRTensor, COOTensor, RowTensorInner)):
|
|
2326
|
-
const_utils.raise_type_error(
|
|
2327
|
-
"bool() does not support sparse tensor input.")
|
|
2328
2391
|
if isinstance(data, (Tensor, Tensor_)):
|
|
2329
2392
|
tensor_shape = F.shape(data)
|
|
2330
2393
|
tensor_shape_len = len(tensor_shape)
|
|
2331
2394
|
if tensor_shape_len == 0 or (tensor_shape_len == 1 and tensor_shape[0] == 1):
|
|
2332
|
-
return data
|
|
2333
|
-
|
|
2334
|
-
"The truth value of an array with more than one element is ambiguous.")
|
|
2395
|
+
return F.scalar_cast(data, mstype.bool_)
|
|
2396
|
+
raise ValueError("The truth value of an array with more than one element is ambiguous.")
|
|
2335
2397
|
if not F.isconstant(data):
|
|
2336
2398
|
if hasattr(data, "__bool__"):
|
|
2337
2399
|
return data.__bool__()
|
|
2338
2400
|
if hasattr(data, "__len__"):
|
|
2339
2401
|
return len(data) != 0
|
|
2340
|
-
return
|
|
2402
|
+
return F.scalar_cast(data, mstype.bool_)
|
|
2403
|
+
if isinstance(data, (CSRTensor, COOTensor, RowTensorInner)):
|
|
2404
|
+
raise TypeError("bool() does not support sparse tensor input.")
|
|
2341
2405
|
return cast_to_bool(data)
|
|
2342
2406
|
|
|
2343
2407
|
|
|
@@ -2362,11 +2426,10 @@ def int_func(*data):
|
|
|
2362
2426
|
base = 10
|
|
2363
2427
|
if data_len == 2:
|
|
2364
2428
|
base = data[1]
|
|
2365
|
-
if isinstance(target, (Tensor, Tensor_, int, float, bool)) and base == 10 and not F.isconstant(target):
|
|
2366
|
-
return F.scalar_cast(target, mstype.int64)
|
|
2367
2429
|
if not F.isconstant(target):
|
|
2368
|
-
|
|
2369
|
-
"int() does not support non-constant input.")
|
|
2430
|
+
if base != 10:
|
|
2431
|
+
const_utils.raise_type_error("int() does not support non-constant input when 'base' is specified.")
|
|
2432
|
+
return F.scalar_cast(target, mstype.int64)
|
|
2370
2433
|
if isinstance(target, (CSRTensor, COOTensor, RowTensorInner)):
|
|
2371
2434
|
const_utils.raise_type_error(
|
|
2372
2435
|
"int() does not support sparse tensor input.")
|
|
@@ -2388,11 +2451,8 @@ def float_func(*data):
|
|
|
2388
2451
|
if data_len == 0:
|
|
2389
2452
|
return 0.0
|
|
2390
2453
|
data = data[0]
|
|
2391
|
-
if isinstance(data, (Tensor, Tensor_, int, float, bool)) and not F.isconstant(data):
|
|
2392
|
-
return F.scalar_cast(data, mstype.float32)
|
|
2393
2454
|
if not F.isconstant(data):
|
|
2394
|
-
|
|
2395
|
-
"float() does not support non-constant input.")
|
|
2455
|
+
return F.scalar_cast(data, mstype.float32)
|
|
2396
2456
|
if isinstance(data, (CSRTensor, COOTensor, RowTensorInner)):
|
|
2397
2457
|
const_utils.raise_type_error(
|
|
2398
2458
|
"float() does not support sparse tensor input.")
|
|
@@ -2431,17 +2491,15 @@ def tuple_func(*data):
|
|
|
2431
2491
|
"""Implementation of `tuple`."""
|
|
2432
2492
|
data_len = len(data)
|
|
2433
2493
|
if data_len >= 2:
|
|
2434
|
-
|
|
2494
|
+
raise TypeError("tuple() requires 0 or 1 arguments.")
|
|
2435
2495
|
if data_len == 0:
|
|
2436
2496
|
return F.make_tuple()
|
|
2437
2497
|
data = data[0]
|
|
2438
2498
|
if isinstance(data, (CSRTensor, COOTensor, RowTensorInner)):
|
|
2439
|
-
|
|
2440
|
-
"tuple() does not support single sparse tensor input.")
|
|
2499
|
+
raise TypeError("tuple() does not support single sparse tensor input.")
|
|
2441
2500
|
if not isinstance(data, Tensor) and not hasattr(data, "__ms_iter__"):
|
|
2442
2501
|
data_type = F.typeof(data)
|
|
2443
|
-
|
|
2444
|
-
str(data_type) + " object is not iterable.")
|
|
2502
|
+
raise TypeError(str(data_type) + " object is not iterable.")
|
|
2445
2503
|
if isinstance(data, dict):
|
|
2446
2504
|
data = data.keys()
|
|
2447
2505
|
if isinstance(data, (tuple, list)) and F.is_sequence_shape_unknown(data):
|
|
@@ -2704,52 +2762,23 @@ def ms_sum(*data):
|
|
|
2704
2762
|
return result
|
|
2705
2763
|
|
|
2706
2764
|
|
|
2707
|
-
@constexpr
|
|
2708
|
-
def python_len(data):
|
|
2709
|
-
"""Return the result of python built-in len function"""
|
|
2710
|
-
return len(data)
|
|
2711
|
-
|
|
2712
|
-
|
|
2713
2765
|
def ms_len(data):
|
|
2714
2766
|
"""Implementation of `len`."""
|
|
2715
|
-
if not isinstance(data, Tensor) and F.isconstant(data):
|
|
2716
|
-
return python_len(data)
|
|
2717
|
-
return data.__len__()
|
|
2718
|
-
|
|
2719
|
-
|
|
2720
|
-
@constexpr
|
|
2721
|
-
def python_len_with_check(data):
|
|
2722
|
-
"""Return the result of python built-in len function with iterable check"""
|
|
2723
|
-
if not hasattr(data, "__iter__"):
|
|
2724
|
-
raise TypeError(str(type(data)) +
|
|
2725
|
-
" object is not iterable in graph mode.")
|
|
2726
|
-
return len(data)
|
|
2727
|
-
|
|
2728
|
-
|
|
2729
|
-
def ms_len_with_iterable_check(data):
|
|
2730
|
-
"""Implementation of `len` with iterable check, used in len of condition."""
|
|
2731
|
-
if not isinstance(data, Tensor) and F.isconstant(data):
|
|
2732
|
-
return python_len_with_check(data)
|
|
2733
|
-
if not hasattr(data, "__len__"):
|
|
2734
|
-
type_str = str(F.typeof(data))
|
|
2735
|
-
const_utils.raise_type_error(
|
|
2736
|
-
type_str + " object is not iterable in graph mode.")
|
|
2737
2767
|
return data.__len__()
|
|
2738
2768
|
|
|
2739
2769
|
|
|
2740
|
-
def ms_next_with_dyn_input_check(it):
|
|
2741
|
-
"""Implementation of `next` with daynamic input check."""
|
|
2742
|
-
if isinstance(it, (tuple, list)) and F.is_sequence_shape_unknown(it):
|
|
2743
|
-
raise ValueError(f"For 'ListComprehension' syntax [i for i in x], "
|
|
2744
|
-
f"input x can not be dynamic length list/tuple in graph mode")
|
|
2745
|
-
return it.__ms_hasnext__()
|
|
2746
|
-
|
|
2747
|
-
|
|
2748
2770
|
def floor(x):
|
|
2749
2771
|
"""Rounds a tensor down to the closest integer element-wise."""
|
|
2750
2772
|
return x.__floor__()
|
|
2751
2773
|
|
|
2752
2774
|
|
|
2775
|
+
def floor_divide(input, other):
|
|
2776
|
+
r"""
|
|
2777
|
+
Divides the first input tensor by the second input tensor element-wise and round down to the closest integer.
|
|
2778
|
+
"""
|
|
2779
|
+
return F.floor_divide(input, other)
|
|
2780
|
+
|
|
2781
|
+
|
|
2753
2782
|
def uadd(x):
|
|
2754
2783
|
"""Implementation of `uadd`."""
|
|
2755
2784
|
return x.__pos__()
|
|
@@ -2775,19 +2804,25 @@ def bool_(x):
|
|
|
2775
2804
|
return x.__bool__()
|
|
2776
2805
|
|
|
2777
2806
|
|
|
2807
|
+
def check_len_(x):
|
|
2808
|
+
"""Check length is not 0"""
|
|
2809
|
+
return x.__len__() != 0
|
|
2810
|
+
|
|
2811
|
+
|
|
2812
|
+
def real_bool_(x):
|
|
2813
|
+
"""bool function to get truth value"""
|
|
2814
|
+
return bool(x)
|
|
2815
|
+
|
|
2816
|
+
|
|
2778
2817
|
def enumerate_(x, start=0):
|
|
2779
2818
|
"""Enumerate list or tuple or tensor."""
|
|
2780
2819
|
x_type = F.typeof(x)
|
|
2781
2820
|
ret = ()
|
|
2782
2821
|
op_name = "enumerate"
|
|
2783
|
-
if
|
|
2784
|
-
check_is_const_int(start, op_name, "start"):
|
|
2822
|
+
if check_is_const_int(start, op_name, "start"):
|
|
2785
2823
|
if check_is_tensor(x_type):
|
|
2786
2824
|
for i in range(x.shape[0]):
|
|
2787
2825
|
ret += ((start + i, x[i]),)
|
|
2788
|
-
elif F.is_sequence_shape_unknown(x):
|
|
2789
|
-
const_utils.raise_value_error(
|
|
2790
|
-
"For 'enumerate', the dynamic length input is unsupported in graph mode")
|
|
2791
2826
|
else:
|
|
2792
2827
|
ret = zip(range(start, start + len(x)), x)
|
|
2793
2828
|
return ret
|
|
@@ -2857,12 +2892,19 @@ def to_coo(x):
|
|
|
2857
2892
|
return F.dense_to_sparse_coo(x)
|
|
2858
2893
|
|
|
2859
2894
|
|
|
2895
|
+
def tolist(x):
|
|
2896
|
+
"""
|
|
2897
|
+
Convert a Tensor to List, if the input is Tensor scalar, Python scalar will be returned.
|
|
2898
|
+
"""
|
|
2899
|
+
return x.asnumpy().tolist()
|
|
2900
|
+
|
|
2901
|
+
|
|
2860
2902
|
@constexpr
|
|
2861
2903
|
def check_select_condition(cond_type):
|
|
2862
2904
|
"""
|
|
2863
2905
|
Check select input condition.
|
|
2864
2906
|
"""
|
|
2865
|
-
if isinstance(cond_type, mstype.
|
|
2907
|
+
if isinstance(cond_type, mstype.TensorType):
|
|
2866
2908
|
return
|
|
2867
2909
|
raise TypeError(
|
|
2868
2910
|
f"For select, the argument condition should be Tensor, but got {cond_type}.")
|
|
@@ -3015,15 +3057,6 @@ def ge(x, y):
|
|
|
3015
3057
|
return F.ge(x, y)
|
|
3016
3058
|
|
|
3017
3059
|
|
|
3018
|
-
def while_cond(x):
|
|
3019
|
-
"""For while condition, if the condition is a tensor, the loop will not be unrolled"""
|
|
3020
|
-
if issubclass_(F.typeof(x), F.typeof(mstype.tensor)):
|
|
3021
|
-
is_cond = check_is_tensor_bool_cond(F.shape(x))
|
|
3022
|
-
if is_cond:
|
|
3023
|
-
return F.cast(x, mstype.bool_)
|
|
3024
|
-
return x
|
|
3025
|
-
|
|
3026
|
-
|
|
3027
3060
|
def tensor_scatter_add(x, indices, updates):
|
|
3028
3061
|
"""
|
|
3029
3062
|
Creates a new tensor by adding the values from the positions in `x` indicated by
|
|
@@ -3216,7 +3249,7 @@ def get_itemsize(x_type):
|
|
|
3216
3249
|
@constexpr(check=False)
|
|
3217
3250
|
def check_is_tensor(x):
|
|
3218
3251
|
"""check whether x is tensor."""
|
|
3219
|
-
if isinstance(x, mstype.
|
|
3252
|
+
if isinstance(x, mstype.TensorType):
|
|
3220
3253
|
return True
|
|
3221
3254
|
return False
|
|
3222
3255
|
|
|
@@ -3224,13 +3257,12 @@ def check_is_tensor(x):
|
|
|
3224
3257
|
@constexpr
|
|
3225
3258
|
def check_is_tuple_or_list_or_tensor(x, op_name, arg_name):
|
|
3226
3259
|
"""check whether x is list or tuple or tensor."""
|
|
3227
|
-
if isinstance(x, (mstype.List, mstype.Tuple, mstype.
|
|
3260
|
+
if isinstance(x, (mstype.List, mstype.Tuple, mstype.TensorType)):
|
|
3228
3261
|
return True
|
|
3229
3262
|
raise TypeError(
|
|
3230
3263
|
f"For '{op_name}', the '{arg_name}' should be tuple or list or tensor, but got {x}.")
|
|
3231
3264
|
|
|
3232
3265
|
|
|
3233
|
-
@constexpr
|
|
3234
3266
|
def check_is_const_int(x, op_name, arg_name):
|
|
3235
3267
|
"""check whether x is const int."""
|
|
3236
3268
|
if x is None:
|
|
@@ -3242,17 +3274,6 @@ def check_is_const_int(x, op_name, arg_name):
|
|
|
3242
3274
|
return True
|
|
3243
3275
|
|
|
3244
3276
|
|
|
3245
|
-
@_primexpr
|
|
3246
|
-
def check_is_tensor_bool_cond(shp):
|
|
3247
|
-
"""check if tensor is a bool condition"""
|
|
3248
|
-
if not shp or (len(shp) == 1 and shp[0] == 1):
|
|
3249
|
-
return True
|
|
3250
|
-
if None in shp:
|
|
3251
|
-
raise ValueError(f"Only tensor which shape is () or (1,) can be converted to bool, but got tensor shape is "
|
|
3252
|
-
f"None")
|
|
3253
|
-
raise ValueError(f"Only tensor which shape is () or (1,) can be converted to bool, but got tensor shape is {shp}")
|
|
3254
|
-
|
|
3255
|
-
|
|
3256
3277
|
@constexpr
|
|
3257
3278
|
def const_tensor_to_bool(x):
|
|
3258
3279
|
"""convert bool tensor to bool condition
|
|
@@ -3286,7 +3307,6 @@ def check_view_shape(x):
|
|
|
3286
3307
|
|
|
3287
3308
|
|
|
3288
3309
|
check_astype_dtype_const = constexpr(validator.check_astype_dtype)
|
|
3289
|
-
check_transpose_axis_const = constexpr(validator.check_transpose_axis)
|
|
3290
3310
|
max_ = constexpr(validator.max_)
|
|
3291
3311
|
min_ = constexpr(validator.min_)
|
|
3292
3312
|
expanded_shape = validator.expanded_shape
|
|
@@ -3308,7 +3328,7 @@ def empty_compile(dtype, shape):
|
|
|
3308
3328
|
|
|
3309
3329
|
def tensor_bool(x):
|
|
3310
3330
|
"""tensor as condition, if is constant, return immediate bool value"""
|
|
3311
|
-
is_cond =
|
|
3331
|
+
is_cond = F.is_tensor_bool_cond(x)
|
|
3312
3332
|
if is_cond and F.isconstant(x):
|
|
3313
3333
|
return const_tensor_to_bool(x)
|
|
3314
3334
|
return F.cast(x, mstype.bool_)
|
|
@@ -3433,21 +3453,6 @@ def sequence_index(sequence, target, start=None, end=None):
|
|
|
3433
3453
|
return SequenceIndex()(sequence, target, start, end)
|
|
3434
3454
|
|
|
3435
3455
|
|
|
3436
|
-
def list_bool(x):
|
|
3437
|
-
"""Implementation of `tuple_bool`."""
|
|
3438
|
-
return len(x) != 0
|
|
3439
|
-
|
|
3440
|
-
|
|
3441
|
-
def tuple_bool(x):
|
|
3442
|
-
"""Implementation of `tuple_bool`."""
|
|
3443
|
-
return len(x) != 0
|
|
3444
|
-
|
|
3445
|
-
|
|
3446
|
-
def dict_bool(x):
|
|
3447
|
-
"""Implementation of `dict_bool`."""
|
|
3448
|
-
return len(x) != 0
|
|
3449
|
-
|
|
3450
|
-
|
|
3451
3456
|
def none_bool(x):
|
|
3452
3457
|
"""Implementation of `none_bool`."""
|
|
3453
3458
|
return False
|
|
@@ -3517,7 +3522,7 @@ def triu(input, diagonal=0):
|
|
|
3517
3522
|
|
|
3518
3523
|
def tuple_next(xs):
|
|
3519
3524
|
"""Next tuple."""
|
|
3520
|
-
return xs[0],
|
|
3525
|
+
return xs[0], xs[1:]
|
|
3521
3526
|
|
|
3522
3527
|
|
|
3523
3528
|
def tuple_hasnext(xs):
|
|
@@ -3527,7 +3532,7 @@ def tuple_hasnext(xs):
|
|
|
3527
3532
|
|
|
3528
3533
|
def list_next(xs):
|
|
3529
3534
|
"""Next list."""
|
|
3530
|
-
return xs[0],
|
|
3535
|
+
return xs[0], xs[1:]
|
|
3531
3536
|
|
|
3532
3537
|
|
|
3533
3538
|
def list_hasnext(xs):
|
|
@@ -3571,7 +3576,7 @@ def list_append(self_, list_item):
|
|
|
3571
3576
|
|
|
3572
3577
|
def list_insert(self_, index, obj):
|
|
3573
3578
|
"""Insert into list"""
|
|
3574
|
-
if F.is_sequence_shape_unknown(self_) or not F.isconstant(index)
|
|
3579
|
+
if F.is_sequence_shape_unknown(self_) or not F.isconstant(index):
|
|
3575
3580
|
return ListInsert()(self_, index, obj)
|
|
3576
3581
|
return _insert(self_, index, obj)
|
|
3577
3582
|
|
|
@@ -3604,6 +3609,11 @@ def dict_get(self_, key_index, default_value=None):
|
|
|
3604
3609
|
return F.dict_getitem(self_, key_index)
|
|
3605
3610
|
|
|
3606
3611
|
|
|
3612
|
+
def dict_setitem(self_, key, target):
|
|
3613
|
+
"""Dictionary setitem"""
|
|
3614
|
+
return _dict_setitem(self_, key, target)
|
|
3615
|
+
|
|
3616
|
+
|
|
3607
3617
|
def dict_clear(self_):
|
|
3608
3618
|
"""Clear the dict"""
|
|
3609
3619
|
return _dict_clear(self_)
|
|
@@ -3733,7 +3743,6 @@ def bernoulli(input, p=0.5, seed=None):
|
|
|
3733
3743
|
"""
|
|
3734
3744
|
Randomly draws binary numbers from a Bernoulli distribution.
|
|
3735
3745
|
"""
|
|
3736
|
-
check_is_int(seed, 'bernoulli', 'seed')
|
|
3737
3746
|
return F.bernoulli(input, p, seed)
|
|
3738
3747
|
|
|
3739
3748
|
|
|
@@ -3802,6 +3811,14 @@ def xlogy(x, y):
|
|
|
3802
3811
|
return F.xlogy(x, y)
|
|
3803
3812
|
|
|
3804
3813
|
|
|
3814
|
+
def eigvals(x):
|
|
3815
|
+
r"""
|
|
3816
|
+
Computes the eigenvalues of a square matrix(batch square matrices).
|
|
3817
|
+
Refer to :func:`mindspore.ops.eigvals` for more detail.
|
|
3818
|
+
"""
|
|
3819
|
+
return F.eigvals(x)
|
|
3820
|
+
|
|
3821
|
+
|
|
3805
3822
|
def erf(x):
|
|
3806
3823
|
r"""
|
|
3807
3824
|
Computes the Gauss error function of `x` element-wise.
|
|
@@ -3847,6 +3864,13 @@ def cos(x):
|
|
|
3847
3864
|
return F.cos(x)
|
|
3848
3865
|
|
|
3849
3866
|
|
|
3867
|
+
def count_nonzero(x, axis=(), keep_dims=False, dtype=mstype.int32):
|
|
3868
|
+
r"""
|
|
3869
|
+
For details, please refer to :func:`mindspore.ops.count_nonzero`.
|
|
3870
|
+
"""
|
|
3871
|
+
return F.count_nonzero(x, axis, keep_dims, dtype)
|
|
3872
|
+
|
|
3873
|
+
|
|
3850
3874
|
def cov(x, *, correction=1, fweights=None, aweights=None):
|
|
3851
3875
|
r"""
|
|
3852
3876
|
For details, please refer to :func:`mindspore.ops.cov`.
|
|
@@ -3907,7 +3931,7 @@ def addmv(x, mat, vec, beta=1, alpha=1):
|
|
|
3907
3931
|
r"""
|
|
3908
3932
|
Multiplies matrix `mat` and vector `vec`. The vector `x` is added to the final result.
|
|
3909
3933
|
"""
|
|
3910
|
-
return F.addmv(x, mat, vec, beta=beta, alpha=
|
|
3934
|
+
return F.addmv(x, mat, vec, beta=beta, alpha=beta)
|
|
3911
3935
|
|
|
3912
3936
|
|
|
3913
3937
|
def adjoint(x):
|
|
@@ -4015,6 +4039,14 @@ def cholesky_inverse(input_x, upper=False):
|
|
|
4015
4039
|
return F.cholesky_inverse(input_x, upper=upper)
|
|
4016
4040
|
|
|
4017
4041
|
|
|
4042
|
+
def cholesky_solve(input, input2, upper=False):
|
|
4043
|
+
r"""
|
|
4044
|
+
Computes the solution of a set of linear equations with a positive definite matrix,
|
|
4045
|
+
according to its Cholesky decomposition factor `input2` .
|
|
4046
|
+
"""
|
|
4047
|
+
return F.cholesky_solve(input, input2, upper=upper)
|
|
4048
|
+
|
|
4049
|
+
|
|
4018
4050
|
def map_tensor_get(map_tensor, key_tensor, insert_default_value=True):
|
|
4019
4051
|
r"""
|
|
4020
4052
|
Get or create value according the key tensor from a map tensor.
|
|
@@ -4117,7 +4149,8 @@ def expand(input, size):
|
|
|
4117
4149
|
r"""
|
|
4118
4150
|
Returns a new view of the self tensor with singleton dimensions expanded to a larger size.
|
|
4119
4151
|
"""
|
|
4120
|
-
|
|
4152
|
+
size = P.TensorToTuple()(size)
|
|
4153
|
+
return F.broadcast_to(input, size)
|
|
4121
4154
|
|
|
4122
4155
|
|
|
4123
4156
|
def cumprod(input, dim, dtype=None):
|
|
@@ -4139,6 +4172,13 @@ def div(input, value, *, rounding_mode=None):
|
|
|
4139
4172
|
return F.div(input, value, rounding_mode=rounding_mode)
|
|
4140
4173
|
|
|
4141
4174
|
|
|
4175
|
+
def eq(input, other):
|
|
4176
|
+
r"""
|
|
4177
|
+
Computes the equivalence between the tensor `input` and the given input tensor `other` element-wise.
|
|
4178
|
+
"""
|
|
4179
|
+
return F.equal(input, other)
|
|
4180
|
+
|
|
4181
|
+
|
|
4142
4182
|
def equal(x, y):
|
|
4143
4183
|
r"""
|
|
4144
4184
|
Computes the equivalence between the tensor `x` and the given input tensor `y` element-wise.
|
|
@@ -4413,6 +4453,13 @@ def qr(input, some=True):
|
|
|
4413
4453
|
return F.qr(input, 'reduced' if some else 'complete')
|
|
4414
4454
|
|
|
4415
4455
|
|
|
4456
|
+
def ormqr(input, input2, input3, left=True, transpose=False):
|
|
4457
|
+
r"""
|
|
4458
|
+
For details, please refer to :func:`mindspore.ops.ormqr`.
|
|
4459
|
+
"""
|
|
4460
|
+
return F.ormqr(input, input2, input3, left, transpose)
|
|
4461
|
+
|
|
4462
|
+
|
|
4416
4463
|
def amax(input, axis=None, keep_dims=False):
|
|
4417
4464
|
r"""
|
|
4418
4465
|
For details, please refer to :func:`mindspore.ops.amax`.
|
|
@@ -4425,3 +4472,61 @@ def amin(input, axis=None, keep_dims=False):
|
|
|
4425
4472
|
For details, please refer to :func:`mindspore.ops.amin`.
|
|
4426
4473
|
"""
|
|
4427
4474
|
return F.amin(input, axis, keep_dims)
|
|
4475
|
+
|
|
4476
|
+
|
|
4477
|
+
def lu_solve(b, LU_data, LU_pivots):
|
|
4478
|
+
r"""
|
|
4479
|
+
For details, please refer to :func:`mindspore.Tensor.lu_solve`
|
|
4480
|
+
"""
|
|
4481
|
+
return F.lu_solve(b, LU_data, LU_pivots)
|
|
4482
|
+
|
|
4483
|
+
|
|
4484
|
+
def masked_scatter(input, mask, tensor):
|
|
4485
|
+
r"""
|
|
4486
|
+
For details, please refer to :func:`mindspore.Tensor.masked_scatter`
|
|
4487
|
+
"""
|
|
4488
|
+
return array_ops.MaskedScatter()(input, mask, tensor)
|
|
4489
|
+
|
|
4490
|
+
|
|
4491
|
+
def index_put(input, indices, values, accumulate=False):
|
|
4492
|
+
r"""
|
|
4493
|
+
For details, please refer to :func:`mindspore.Tensor.index_put`
|
|
4494
|
+
"""
|
|
4495
|
+
check_bool_type(accumulate, 'accumulate', 'Tensor.index_put')
|
|
4496
|
+
_index_put = array_ops.IndexPut(0 if accumulate is False else 1)
|
|
4497
|
+
return _index_put(input, values, indices)
|
|
4498
|
+
|
|
4499
|
+
|
|
4500
|
+
def aminmax(input, *, axis=0, keepdims=False):
|
|
4501
|
+
r"""
|
|
4502
|
+
For details, please refer to :func:`mindspore.ops.aminmax`.
|
|
4503
|
+
"""
|
|
4504
|
+
return F.aminmax(input, axis=axis, keepdims=keepdims)
|
|
4505
|
+
|
|
4506
|
+
|
|
4507
|
+
def quantile(input, q, axis=None, keepdims=False):
|
|
4508
|
+
r"""
|
|
4509
|
+
For details, please refer to :func:`mindspore.ops.quantile`.
|
|
4510
|
+
"""
|
|
4511
|
+
return F.quantile(input, q, axis, keepdims)
|
|
4512
|
+
|
|
4513
|
+
|
|
4514
|
+
def nanquantile(input, q, axis=None, keepdims=False):
|
|
4515
|
+
r"""
|
|
4516
|
+
For details, please refer to :func:`mindspore.ops.nanquantile`.
|
|
4517
|
+
"""
|
|
4518
|
+
return F.nanquantile(input, q, axis, keepdims)
|
|
4519
|
+
|
|
4520
|
+
|
|
4521
|
+
def orgqr(input, input2):
|
|
4522
|
+
r"""
|
|
4523
|
+
For details, please refer to :func:`mindspore.ops.orgqr`.
|
|
4524
|
+
"""
|
|
4525
|
+
return F.orgqr(input, input2)
|
|
4526
|
+
|
|
4527
|
+
|
|
4528
|
+
def outer(input, vec2):
|
|
4529
|
+
r"""
|
|
4530
|
+
For details, please refer to :func:`mindspore.ops.vec2`.
|
|
4531
|
+
"""
|
|
4532
|
+
return F.outer(input, vec2)
|