mindspore 2.2.14__cp39-cp39-win_amd64.whl → 2.4.0__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +8 -5
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +124 -25
- mindspore/_extends/builtin_operations.py +2 -1
- mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
- mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
- mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
- mindspore/_extends/parse/__init__.py +18 -14
- mindspore/_extends/parse/compile_config.py +299 -0
- mindspore/_extends/parse/namespace.py +2 -2
- mindspore/_extends/parse/parser.py +182 -68
- mindspore/_extends/parse/resources.py +45 -14
- mindspore/_extends/parse/standard_method.py +192 -252
- mindspore/{ops/_op_impl/tbe/atomic_addr_clean.py → _extends/pijit/__init__.py} +6 -16
- mindspore/_extends/pijit/pijit_func_white_list.py +669 -0
- mindspore/_extends/remote/kernel_build_server.py +2 -0
- mindspore/_profiler.py +30 -0
- mindspore/amp.py +67 -26
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/base.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/boost/grad_freeze.py +2 -2
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/boost/less_batch_normalization.py +9 -6
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +20 -7
- mindspore/common/_jit_fallback_utils.py +2 -3
- mindspore/common/_pijit_context.py +190 -0
- mindspore/common/_register_for_adapter.py +7 -0
- mindspore/common/_register_for_recompute.py +48 -0
- mindspore/common/_register_for_tensor.py +10 -10
- mindspore/common/_stub_tensor.py +7 -1
- mindspore/common/_tensor_overload.py +139 -0
- mindspore/common/_utils.py +5 -17
- mindspore/common/api.py +449 -129
- mindspore/common/auto_dynamic_shape.py +27 -14
- mindspore/common/dtype.py +17 -10
- mindspore/common/dump.py +8 -11
- mindspore/common/file_system.py +48 -0
- mindspore/common/generator.py +254 -0
- mindspore/common/hook_handle.py +65 -30
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +34 -14
- mindspore/common/lazy_inline.py +72 -19
- mindspore/common/mindir_util.py +12 -2
- mindspore/common/mutable.py +79 -14
- mindspore/common/no_inline.py +54 -0
- mindspore/common/np_dtype.py +25 -0
- mindspore/common/parameter.py +73 -21
- mindspore/common/recompute.py +292 -0
- mindspore/common/seed.py +9 -9
- mindspore/common/sparse_tensor.py +276 -24
- mindspore/common/symbol.py +122 -0
- mindspore/common/tensor.py +668 -514
- mindspore/communication/__init__.py +6 -11
- mindspore/communication/_comm_helper.py +43 -3
- mindspore/communication/comm_func.py +1395 -0
- mindspore/communication/management.py +117 -104
- mindspore/config/op_info.config +22 -54
- mindspore/context.py +455 -71
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +6 -6
- mindspore/dataset/audio/transforms.py +711 -158
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/core/config.py +7 -0
- mindspore/dataset/core/validator_helpers.py +7 -0
- mindspore/dataset/engine/cache_client.py +2 -2
- mindspore/dataset/engine/datasets.py +201 -116
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +83 -3
- mindspore/dataset/engine/datasets_text.py +39 -39
- mindspore/dataset/engine/datasets_user_defined.py +230 -141
- mindspore/dataset/engine/datasets_vision.py +78 -74
- mindspore/dataset/engine/iterators.py +29 -0
- mindspore/dataset/engine/obs/util.py +7 -0
- mindspore/dataset/engine/offload.py +5 -7
- mindspore/dataset/engine/queue.py +138 -66
- mindspore/dataset/engine/serializer_deserializer.py +2 -2
- mindspore/dataset/engine/validators.py +41 -15
- mindspore/dataset/text/__init__.py +2 -5
- mindspore/dataset/text/transforms.py +408 -121
- mindspore/dataset/text/utils.py +9 -9
- mindspore/dataset/transforms/__init__.py +0 -3
- mindspore/dataset/transforms/transforms.py +261 -76
- mindspore/dataset/utils/browse_dataset.py +9 -9
- mindspore/dataset/utils/line_reader.py +2 -0
- mindspore/dataset/vision/__init__.py +7 -10
- mindspore/dataset/vision/c_transforms.py +10 -10
- mindspore/dataset/vision/py_transforms_util.py +1 -1
- mindspore/dataset/vision/transforms.py +2844 -549
- mindspore/dataset/vision/utils.py +161 -10
- mindspore/dataset/vision/validators.py +16 -3
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/{rewrite/ast_creator_register.py → experimental/es/__init__.py} +5 -20
- mindspore/experimental/es/embedding_service.py +883 -0
- mindspore/experimental/es/embedding_service_layer.py +581 -0
- mindspore/experimental/llm_boost/__init__.py +21 -0
- mindspore/experimental/llm_boost/atb/__init__.py +23 -0
- mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
- mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
- mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
- mindspore/experimental/llm_boost/register.py +129 -0
- mindspore/experimental/llm_boost/utils.py +31 -0
- mindspore/experimental/optim/__init__.py +12 -2
- mindspore/experimental/optim/adadelta.py +161 -0
- mindspore/experimental/optim/adagrad.py +168 -0
- mindspore/experimental/optim/adam.py +35 -34
- mindspore/experimental/optim/adamax.py +170 -0
- mindspore/experimental/optim/adamw.py +124 -15
- mindspore/experimental/optim/asgd.py +153 -0
- mindspore/experimental/optim/lr_scheduler.py +66 -121
- mindspore/experimental/optim/nadam.py +157 -0
- mindspore/experimental/optim/optimizer.py +18 -8
- mindspore/experimental/optim/radam.py +194 -0
- mindspore/experimental/optim/rmsprop.py +154 -0
- mindspore/experimental/optim/rprop.py +164 -0
- mindspore/experimental/optim/sgd.py +28 -19
- mindspore/hal/__init__.py +40 -0
- mindspore/hal/_ascend.py +57 -0
- mindspore/hal/_base.py +57 -0
- mindspore/hal/_cpu.py +56 -0
- mindspore/hal/_gpu.py +57 -0
- mindspore/hal/contiguous_tensors_handle.py +175 -0
- mindspore/hal/device.py +356 -0
- mindspore/hal/event.py +179 -0
- mindspore/hal/memory.py +326 -0
- mindspore/hal/stream.py +357 -0
- mindspore/include/api/data_type.h +2 -2
- mindspore/include/api/dual_abi_helper.h +16 -3
- mindspore/include/api/model.h +4 -3
- mindspore/include/api/model_group.h +13 -1
- mindspore/include/api/status.h +14 -0
- mindspore/include/api/types.h +10 -10
- mindspore/include/c_api/model_c.h +173 -0
- mindspore/include/c_api/types_c.h +19 -0
- mindspore/include/dataset/config.h +2 -2
- mindspore/include/dataset/constants.h +2 -2
- mindspore/include/dataset/execute.h +3 -5
- mindspore/include/dataset/vision.h +58 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +3 -3
- mindspore/mindrecord/__init__.py +5 -1
- mindspore/mindrecord/config.py +809 -0
- mindspore/mindrecord/filereader.py +25 -0
- mindspore/mindrecord/filewriter.py +138 -103
- mindspore/mindrecord/mindpage.py +40 -6
- mindspore/mindrecord/shardutils.py +3 -2
- mindspore/mindrecord/shardwriter.py +7 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +8 -13
- mindspore/mindrecord/tools/cifar10_to_mr.py +9 -15
- mindspore/mindrecord/tools/csv_to_mr.py +4 -9
- mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +7 -12
- mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -6
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mint/__init__.py +1586 -0
- mindspore/mint/distributed/__init__.py +31 -0
- mindspore/mint/distributed/distributed.py +254 -0
- mindspore/{rewrite/ast_transformers → mint/linalg}/__init__.py +9 -4
- mindspore/mint/nn/__init__.py +757 -0
- mindspore/mint/nn/functional.py +679 -0
- mindspore/mint/nn/layer/__init__.py +39 -0
- mindspore/mint/nn/layer/activation.py +133 -0
- mindspore/mint/nn/layer/normalization.py +477 -0
- mindspore/mint/nn/layer/pooling.py +110 -0
- mindspore/mint/optim/__init__.py +24 -0
- mindspore/mint/optim/adamw.py +206 -0
- mindspore/mint/special/__init__.py +63 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/multiprocessing/__init__.py +73 -0
- mindspore/nn/cell.py +461 -323
- mindspore/nn/dynamic_lr.py +2 -2
- mindspore/nn/layer/activation.py +292 -135
- mindspore/nn/layer/basic.py +288 -83
- mindspore/nn/layer/channel_shuffle.py +3 -16
- mindspore/nn/layer/container.py +3 -3
- mindspore/nn/layer/conv.py +75 -66
- mindspore/nn/layer/embedding.py +221 -45
- mindspore/nn/layer/image.py +4 -7
- mindspore/nn/layer/math.py +1 -1
- mindspore/nn/layer/normalization.py +150 -68
- mindspore/nn/layer/padding.py +64 -87
- mindspore/nn/layer/pooling.py +175 -12
- mindspore/nn/layer/rnn_cells.py +6 -16
- mindspore/nn/layer/rnns.py +6 -5
- mindspore/nn/layer/thor_layer.py +1 -2
- mindspore/nn/layer/timedistributed.py +1 -1
- mindspore/nn/layer/transformer.py +55 -53
- mindspore/nn/learning_rate_schedule.py +6 -5
- mindspore/nn/loss/__init__.py +2 -2
- mindspore/nn/loss/loss.py +145 -88
- mindspore/nn/optim/__init__.py +2 -1
- mindspore/nn/optim/ada_grad.py +4 -2
- mindspore/nn/optim/adadelta.py +4 -2
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +102 -181
- mindspore/nn/optim/adamax.py +4 -2
- mindspore/nn/optim/adasum.py +3 -3
- mindspore/nn/optim/asgd.py +4 -2
- mindspore/nn/optim/ftrl.py +31 -61
- mindspore/nn/optim/lamb.py +5 -3
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +6 -4
- mindspore/nn/optim/momentum.py +13 -25
- mindspore/nn/optim/optimizer.py +6 -3
- mindspore/nn/optim/proximal_ada_grad.py +4 -2
- mindspore/nn/optim/rmsprop.py +9 -3
- mindspore/nn/optim/rprop.py +4 -2
- mindspore/nn/optim/sgd.py +5 -3
- mindspore/nn/optim/tft_wrapper.py +127 -0
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
- mindspore/nn/probability/distribution/beta.py +2 -2
- mindspore/nn/probability/distribution/categorical.py +4 -6
- mindspore/nn/probability/distribution/cauchy.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -2
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/logistic.py +1 -1
- mindspore/nn/probability/distribution/poisson.py +2 -2
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +13 -1
- mindspore/nn/wrap/__init__.py +2 -1
- mindspore/nn/wrap/cell_wrapper.py +46 -12
- mindspore/nn/wrap/grad_reducer.py +148 -8
- mindspore/nn/wrap/loss_scale.py +44 -7
- mindspore/numpy/__init__.py +2 -0
- mindspore/numpy/array_creations.py +67 -68
- mindspore/numpy/array_ops.py +70 -66
- mindspore/numpy/dtypes.py +3 -3
- mindspore/numpy/fft.py +966 -0
- mindspore/numpy/logic_ops.py +11 -10
- mindspore/numpy/math_ops.py +147 -152
- mindspore/numpy/utils.py +3 -0
- mindspore/numpy/utils_const.py +4 -4
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +9 -6
- mindspore/ops/_grad_experimental/grad_array_ops.py +4 -129
- mindspore/ops/_grad_experimental/grad_comm_ops.py +135 -36
- mindspore/ops/_grad_experimental/grad_math_ops.py +61 -298
- mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
- mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
- mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
- mindspore/ops/_op_impl/__init__.py +0 -1
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
- mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +1 -1
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
- mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -3
- mindspore/ops/_op_impl/cpu/adam.py +2 -2
- mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
- mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
- mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
- mindspore/ops/_vmap/vmap_array_ops.py +162 -101
- mindspore/ops/_vmap/vmap_base.py +8 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +143 -58
- mindspore/ops/_vmap/vmap_image_ops.py +70 -13
- mindspore/ops/_vmap/vmap_math_ops.py +147 -59
- mindspore/ops/_vmap/vmap_nn_ops.py +292 -117
- mindspore/ops/_vmap/vmap_other_ops.py +1 -1
- mindspore/ops/auto_generate/__init__.py +31 -0
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +309 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +252 -0
- mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
- mindspore/ops/auto_generate/gen_extend_func.py +1701 -0
- mindspore/ops/auto_generate/gen_ops_def.py +8482 -0
- mindspore/ops/auto_generate/gen_ops_prim.py +16704 -0
- mindspore/ops/auto_generate/pyboost_inner_prim.py +549 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +201 -66
- mindspore/ops/composite/math_ops.py +10 -49
- mindspore/ops/composite/multitype_ops/_compile_utils.py +192 -618
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +25 -134
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +8 -3
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
- mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
- mindspore/ops/deprecated.py +14 -3
- mindspore/ops/function/__init__.py +53 -11
- mindspore/ops/function/array_func.py +1269 -1821
- mindspore/ops/function/clip_func.py +19 -31
- mindspore/ops/function/debug_func.py +114 -5
- mindspore/ops/function/fft_func.py +44 -0
- mindspore/ops/function/grad/grad_func.py +30 -22
- mindspore/ops/function/image_func.py +27 -21
- mindspore/ops/function/linalg_func.py +35 -68
- mindspore/ops/function/math_func.py +1170 -2697
- mindspore/ops/function/nn_func.py +2116 -1128
- mindspore/ops/function/other_func.py +8 -8
- mindspore/ops/function/parameter_func.py +5 -93
- mindspore/ops/function/random_func.py +435 -113
- mindspore/ops/function/reshard_func.py +104 -0
- mindspore/ops/function/sparse_func.py +4 -4
- mindspore/ops/function/sparse_unary_func.py +9 -16
- mindspore/ops/function/spectral_func.py +1 -1
- mindspore/ops/function/vmap_func.py +16 -15
- mindspore/ops/functional.py +355 -346
- mindspore/ops/op_info_register.py +18 -45
- mindspore/ops/operations/__init__.py +38 -24
- mindspore/ops/operations/_grad_ops.py +21 -927
- mindspore/ops/operations/_infer_ops.py +19 -0
- mindspore/ops/operations/_inner_ops.py +173 -607
- mindspore/ops/operations/_rl_inner_ops.py +2 -2
- mindspore/ops/operations/_scalar_ops.py +5 -480
- mindspore/ops/operations/_sequence_ops.py +6 -36
- mindspore/ops/operations/_tensor_array.py +8 -8
- mindspore/ops/operations/array_ops.py +106 -2837
- mindspore/ops/operations/comm_ops.py +799 -127
- mindspore/ops/operations/custom_ops.py +124 -119
- mindspore/ops/operations/debug_ops.py +142 -41
- mindspore/ops/operations/image_ops.py +1 -217
- mindspore/ops/operations/inner_ops.py +5 -40
- mindspore/ops/operations/linalg_ops.py +1 -49
- mindspore/ops/operations/manually_defined/__init__.py +24 -0
- mindspore/ops/operations/manually_defined/_inner.py +73 -0
- mindspore/ops/operations/manually_defined/ops_def.py +2271 -0
- mindspore/ops/operations/math_ops.py +666 -4972
- mindspore/ops/operations/nn_ops.py +205 -2213
- mindspore/ops/operations/other_ops.py +60 -49
- mindspore/ops/operations/random_ops.py +50 -54
- mindspore/ops/operations/reshard_ops.py +53 -0
- mindspore/ops/operations/sparse_ops.py +4 -4
- mindspore/ops/primitive.py +216 -103
- mindspore/ops_generate/__init__.py +27 -0
- mindspore/ops_generate/arg_dtype_cast.py +252 -0
- mindspore/ops_generate/arg_handler.py +197 -0
- mindspore/ops_generate/gen_aclnn_implement.py +263 -0
- mindspore/ops_generate/gen_constants.py +36 -0
- mindspore/ops_generate/gen_ops.py +1099 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
- mindspore/ops_generate/gen_pyboost_func.py +1052 -0
- mindspore/ops_generate/gen_utils.py +209 -0
- mindspore/ops_generate/op_proto.py +145 -0
- mindspore/ops_generate/pyboost_utils.py +367 -0
- mindspore/ops_generate/template.py +261 -0
- mindspore/parallel/__init__.py +8 -4
- mindspore/parallel/_auto_parallel_context.py +100 -10
- mindspore/parallel/_cell_wrapper.py +99 -9
- mindspore/parallel/_cost_model_context.py +1 -1
- mindspore/parallel/_dp_allreduce_fusion.py +159 -159
- mindspore/parallel/_parallel_serialization.py +67 -23
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +99 -22
- mindspore/parallel/_transformer/__init__.py +1 -1
- mindspore/parallel/_transformer/layers.py +1 -1
- mindspore/parallel/_transformer/loss.py +1 -1
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/op_parallel_config.py +1 -1
- mindspore/parallel/_transformer/transformer.py +2 -2
- mindspore/parallel/_utils.py +173 -6
- mindspore/parallel/algo_parameter_config.py +8 -10
- mindspore/parallel/checkpoint_transform.py +204 -38
- mindspore/parallel/cluster/__init__.py +15 -0
- mindspore/parallel/cluster/process_entity/__init__.py +18 -0
- mindspore/parallel/cluster/process_entity/_api.py +352 -0
- mindspore/parallel/cluster/process_entity/_utils.py +101 -0
- mindspore/parallel/cluster/run.py +136 -0
- mindspore/parallel/mpi/__init__.py +1 -1
- mindspore/parallel/mpi/_mpi_config.py +1 -1
- mindspore/parallel/parameter_broadcast.py +151 -0
- mindspore/parallel/shard.py +279 -37
- mindspore/parallel/transform_safetensors.py +993 -0
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +4 -2
- mindspore/profiler/common/constant.py +29 -0
- mindspore/profiler/common/process_pool.py +41 -0
- mindspore/profiler/common/registry.py +47 -0
- mindspore/profiler/common/singleton.py +28 -0
- mindspore/profiler/common/util.py +153 -0
- mindspore/profiler/dynamic_profiler.py +694 -0
- mindspore/profiler/envprofiling.py +18 -20
- mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +71 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +180 -0
- mindspore/profiler/parser/ascend_analysis/function_event.py +185 -0
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +136 -0
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +131 -0
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +104 -0
- mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +123 -0
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +75 -0
- mindspore/profiler/parser/ascend_cluster_generator.py +14 -9
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -1
- mindspore/profiler/parser/ascend_flops_generator.py +20 -4
- mindspore/profiler/parser/ascend_hccl_generator.py +29 -278
- mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
- mindspore/profiler/parser/ascend_memory_generator.py +185 -0
- mindspore/profiler/parser/ascend_msprof_exporter.py +148 -146
- mindspore/profiler/parser/ascend_msprof_generator.py +73 -283
- mindspore/profiler/parser/ascend_op_generator.py +92 -42
- mindspore/profiler/parser/ascend_timeline_generator.py +298 -133
- mindspore/profiler/parser/base_timeline_generator.py +25 -25
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +25 -12
- mindspore/profiler/parser/framework_parser.py +4 -393
- mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
- mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
- mindspore/profiler/parser/integrator.py +3 -1
- mindspore/profiler/parser/memory_usage_parser.py +0 -154
- mindspore/profiler/parser/minddata_parser.py +72 -3
- mindspore/profiler/parser/profiler_info.py +94 -7
- mindspore/profiler/profiler.py +153 -0
- mindspore/profiler/profiling.py +631 -508
- mindspore/rewrite/__init__.py +2 -14
- mindspore/rewrite/api/node.py +122 -36
- mindspore/rewrite/api/pattern_engine.py +2 -3
- mindspore/rewrite/api/scoped_value.py +16 -15
- mindspore/rewrite/api/symbol_tree.py +45 -29
- mindspore/rewrite/ast_helpers/__init__.py +3 -6
- mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
- mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
- mindspore/rewrite/common/__init__.py +1 -2
- mindspore/rewrite/common/config.py +24 -0
- mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
- mindspore/rewrite/{namer.py → common/namer.py} +63 -18
- mindspore/rewrite/common/namespace.py +118 -0
- mindspore/rewrite/node/__init__.py +5 -5
- mindspore/rewrite/node/call_function.py +23 -7
- mindspore/rewrite/node/cell_container.py +7 -3
- mindspore/rewrite/node/control_flow.py +53 -28
- mindspore/rewrite/node/node.py +212 -196
- mindspore/rewrite/node/node_manager.py +51 -22
- mindspore/rewrite/node/node_topological_manager.py +3 -23
- mindspore/rewrite/parsers/__init__.py +12 -0
- mindspore/rewrite/parsers/arguments_parser.py +8 -9
- mindspore/rewrite/parsers/assign_parser.py +637 -413
- mindspore/rewrite/parsers/attribute_parser.py +3 -4
- mindspore/rewrite/parsers/class_def_parser.py +115 -148
- mindspore/rewrite/parsers/constant_parser.py +5 -5
- mindspore/rewrite/parsers/container_parser.py +4 -6
- mindspore/rewrite/parsers/expr_parser.py +55 -0
- mindspore/rewrite/parsers/for_parser.py +31 -98
- mindspore/rewrite/parsers/function_def_parser.py +13 -5
- mindspore/rewrite/parsers/if_parser.py +28 -10
- mindspore/rewrite/parsers/module_parser.py +8 -182
- mindspore/rewrite/parsers/parser.py +1 -5
- mindspore/rewrite/parsers/parser_register.py +1 -1
- mindspore/rewrite/parsers/return_parser.py +5 -10
- mindspore/rewrite/parsers/while_parser.py +59 -0
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree/__init__.py +20 -0
- mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +705 -186
- mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
- mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
- mindspore/run_check/_check_version.py +40 -115
- mindspore/run_check/run_check.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +597 -263
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +7 -5
- mindspore/train/_utils.py +204 -4
- mindspore/train/amp.py +335 -295
- mindspore/train/anf_ir_pb2.py +14 -2
- mindspore/train/callback/__init__.py +5 -2
- mindspore/train/callback/_backup_and_restore.py +5 -5
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +220 -43
- mindspore/train/callback/_cluster_monitor.py +201 -0
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_flops_collector.py +239 -0
- mindspore/train/callback/_landscape.py +15 -9
- mindspore/train/callback/_loss_monitor.py +5 -5
- mindspore/train/callback/_on_request_exit.py +136 -33
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +12 -12
- mindspore/train/callback/_tft_register.py +352 -0
- mindspore/train/callback/_time_monitor.py +3 -3
- mindspore/train/data_sink.py +6 -5
- mindspore/train/dataset_helper.py +66 -23
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/accuracy.py +7 -7
- mindspore/train/metrics/confusion_matrix.py +8 -6
- mindspore/train/metrics/cosine_similarity.py +6 -4
- mindspore/train/metrics/error.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/perplexity.py +2 -1
- mindspore/train/metrics/roc.py +4 -4
- mindspore/train/metrics/topk.py +2 -2
- mindspore/train/mind_ir_pb2.py +116 -37
- mindspore/train/model.py +382 -76
- mindspore/train/serialization.py +787 -288
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +51 -28
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +21 -0
- mindspore/utils/utils.py +60 -0
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/METADATA +8 -4
- mindspore-2.4.0.dist-info/RECORD +1406 -0
- {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/entry_points.txt +1 -0
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
- mindspore/gen_ops.py +0 -273
- mindspore/include/c_api/ms/abstract.h +0 -67
- mindspore/include/c_api/ms/attribute.h +0 -197
- mindspore/include/c_api/ms/base/handle_types.h +0 -43
- mindspore/include/c_api/ms/base/macros.h +0 -32
- mindspore/include/c_api/ms/base/status.h +0 -33
- mindspore/include/c_api/ms/base/types.h +0 -282
- mindspore/include/c_api/ms/context.h +0 -102
- mindspore/include/c_api/ms/graph.h +0 -160
- mindspore/include/c_api/ms/node.h +0 -606
- mindspore/include/c_api/ms/tensor.h +0 -161
- mindspore/include/c_api/ms/value.h +0 -84
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/nn/layer/flash_attention.py +0 -189
- mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +0 -93
- mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +0 -66
- mindspore/ops/_op_impl/cpu/concat.py +0 -39
- mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
- mindspore/ops/_op_impl/tbe/__init__.py +0 -47
- mindspore/ops/_op_impl/tbe/abs.py +0 -38
- mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/acos.py +0 -37
- mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/acosh.py +0 -37
- mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
- mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
- mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
- mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
- mindspore/ops/_op_impl/tbe/add.py +0 -42
- mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/add_n.py +0 -39
- mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
- mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
- mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
- mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
- mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
- mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
- mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
- mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/asin.py +0 -37
- mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/asinh.py +0 -37
- mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/assign.py +0 -79
- mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
- mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
- mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/atan.py +0 -37
- mindspore/ops/_op_impl/tbe/atan2.py +0 -38
- mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/atanh.py +0 -37
- mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
- mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
- mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
- mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
- mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
- mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
- mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
- mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
- mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
- mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
- mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cast.py +0 -55
- mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/cdist.py +0 -38
- mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/ceil.py +0 -37
- mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/celu.py +0 -39
- mindspore/ops/_op_impl/tbe/centralization.py +0 -39
- mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
- mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/concat.py +0 -40
- mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
- mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
- mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
- mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
- mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
- mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/cos.py +0 -37
- mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/cosh.py +0 -37
- mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
- mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cummin.py +0 -41
- mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
- mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
- mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
- mindspore/ops/_op_impl/tbe/diag.py +0 -38
- mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
- mindspore/ops/_op_impl/tbe/dilation.py +0 -40
- mindspore/ops/_op_impl/tbe/div.py +0 -41
- mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
- mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
- mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
- mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
- mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
- mindspore/ops/_op_impl/tbe/elu.py +0 -38
- mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/equal.py +0 -42
- mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/erf.py +0 -37
- mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfc.py +0 -37
- mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
- mindspore/ops/_op_impl/tbe/exp.py +0 -40
- mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
- mindspore/ops/_op_impl/tbe/expm1.py +0 -37
- mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
- mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/fill.py +0 -56
- mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/flatten.py +0 -48
- mindspore/ops/_op_impl/tbe/floor.py +0 -37
- mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
- mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
- mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
- mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
- mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
- mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
- mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
- mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/ger.py +0 -43
- mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/greater.py +0 -43
- mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
- mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
- mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
- mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
- mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
- mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
- mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
- mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/im2col.py +0 -42
- mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
- mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
- mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/inv.py +0 -38
- mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/invert.py +0 -37
- mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/iou.py +0 -38
- mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/is_close.py +0 -40
- mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
- mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
- mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
- mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
- mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
- mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
- mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/lerp.py +0 -38
- mindspore/ops/_op_impl/tbe/less.py +0 -41
- mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/log.py +0 -40
- mindspore/ops/_op_impl/tbe/log1p.py +0 -37
- mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
- mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
- mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
- mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
- mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/matmul.py +0 -53
- mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
- mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
- mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
- mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum.py +0 -39
- mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
- mindspore/ops/_op_impl/tbe/minimum.py +0 -40
- mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mish.py +0 -37
- mindspore/ops/_op_impl/tbe/mod.py +0 -41
- mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/mul.py +0 -37
- mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
- mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
- mindspore/ops/_op_impl/tbe/neg.py +0 -39
- mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
- mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
- mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
- mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
- mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
- mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/pack.py +0 -58
- mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
- mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
- mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/pdist.py +0 -36
- mindspore/ops/_op_impl/tbe/pooling.py +0 -46
- mindspore/ops/_op_impl/tbe/population_count.py +0 -38
- mindspore/ops/_op_impl/tbe/pow.py +0 -41
- mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/prelu.py +0 -37
- mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/range.py +0 -39
- mindspore/ops/_op_impl/tbe/real_div.py +0 -38
- mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
- mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
- mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
- mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
- mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6.py +0 -38
- mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/renorm.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
- mindspore/ops/_op_impl/tbe/rint.py +0 -37
- mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roll.py +0 -42
- mindspore/ops/_op_impl/tbe/round.py +0 -38
- mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
- mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
- mindspore/ops/_op_impl/tbe/select.py +0 -38
- mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/selu.py +0 -39
- mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sgd.py +0 -62
- mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sign.py +0 -38
- mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/sin.py +0 -37
- mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sinh.py +0 -37
- mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/slice.py +0 -58
- mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
- mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax.py +0 -37
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
- mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/softplus.py +0 -37
- mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softsign.py +0 -37
- mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sort.py +0 -38
- mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/split_d.py +0 -38
- mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/split_v.py +0 -39
- mindspore/ops/_op_impl/tbe/splitv.py +0 -39
- mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/square.py +0 -38
- mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
- mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
- mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
- mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
- mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
- mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
- mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
- mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
- mindspore/ops/_op_impl/tbe/sub.py +0 -39
- mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tan.py +0 -38
- mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh.py +0 -37
- mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
- mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
- mindspore/ops/_op_impl/tbe/tile.py +0 -37
- mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
- mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
- mindspore/ops/_op_impl/tbe/transpose.py +0 -60
- mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
- mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
- mindspore/ops/_op_impl/tbe/trunc.py +0 -39
- mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/unpack.py +0 -38
- mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
- mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
- mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
- mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
- mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
- mindspore/ops/_tracefunc.py +0 -241
- mindspore/ops/arg_dtype_cast.py +0 -54
- mindspore/ops/silent_check.py +0 -162
- mindspore/profiler/parser/msadvisor_analyzer.py +0 -82
- mindspore/profiler/parser/msadvisor_parser.py +0 -240
- mindspore/rewrite/api/tree_node_helper.py +0 -60
- mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
- mindspore/rewrite/namespace.py +0 -53
- mindspore-2.2.14.dist-info/RECORD +0 -1924
- {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/WHEEL +0 -0
- {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/top_level.txt +0 -0
mindspore/numpy/math_ops.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2020-
|
|
1
|
+
# Copyright 2020-2024 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -79,7 +79,7 @@ def absolute(x, dtype=None):
|
|
|
79
79
|
Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
|
|
80
80
|
not supported.
|
|
81
81
|
Currently the backend kernel only supports float calculation, if the input
|
|
82
|
-
is not a `float`, then it will be casted to
|
|
82
|
+
is not a `float`, then it will be casted to ``mstype.float32`` and casted back.
|
|
83
83
|
|
|
84
84
|
Args:
|
|
85
85
|
x (Tensor): Tensor to be used for calculation.
|
|
@@ -295,7 +295,7 @@ def add(x1, x2, dtype=None):
|
|
|
295
295
|
# broadcast is not fully supported in tensor_add on CPU,
|
|
296
296
|
# so we use tensor_sub as a substitute solution
|
|
297
297
|
if _get_device() == 'CPU':
|
|
298
|
-
return subtract(x1, F.
|
|
298
|
+
return subtract(x1, F.neg(_to_tensor(x2)), dtype=dtype)
|
|
299
299
|
return _apply_tensor_op(F.tensor_add, x1, x2, dtype=dtype)
|
|
300
300
|
|
|
301
301
|
|
|
@@ -604,14 +604,14 @@ def mean(a, axis=None, keepdims=False, dtype=None):
|
|
|
604
604
|
Args:
|
|
605
605
|
a (Tensor): input tensor containing numbers whose mean is desired.
|
|
606
606
|
If a is not an array, a conversion is attempted.
|
|
607
|
-
axis (
|
|
607
|
+
axis (Union[int, tuple(int), None], optional): Axis or axes along
|
|
608
608
|
which the means are computed. The default is to compute
|
|
609
609
|
the mean of the flattened array. If this is a tuple of
|
|
610
|
-
ints, a mean is performed over multiple axes.
|
|
610
|
+
ints, a mean is performed over multiple axes. Default: ``None`` .
|
|
611
611
|
keepdims (bool, optional): If this is set to ``True`` , the axes which
|
|
612
612
|
are reduced are left in the result as dimensions with
|
|
613
613
|
size one. With this option, the result will broadcast
|
|
614
|
-
correctly against the input tensor.
|
|
614
|
+
correctly against the input tensor. Default: ``False`` .
|
|
615
615
|
dtype (:class:`mindspore.dtype`, optional): Default: ``None`` . Overrides the dtype of the
|
|
616
616
|
output Tensor.
|
|
617
617
|
|
|
@@ -902,7 +902,7 @@ def std(x, axis=None, ddof=0, keepdims=False):
|
|
|
902
902
|
|
|
903
903
|
If ``None`` , compute the standard deviation of the flattened array.
|
|
904
904
|
ddof (int): Means Delta Degrees of Freedom. The divisor used in calculations is :math:`N - ddof`,
|
|
905
|
-
where :math:`N` represents the number of elements. Default: 0
|
|
905
|
+
where :math:`N` represents the number of elements. Default: ``0``.
|
|
906
906
|
keepdims: If this is set to True, the axes which are reduced are left in the result as
|
|
907
907
|
dimensions with size one. With this option, the result will broadcast correctly against the input tensor.
|
|
908
908
|
If the default value is passed, then keepdims will not be passed through to the std method of
|
|
@@ -1011,14 +1011,14 @@ def average(x, axis=None, weights=None, returned=False):
|
|
|
1011
1011
|
|
|
1012
1012
|
Args:
|
|
1013
1013
|
x (Tensor): A Tensor to be averaged.
|
|
1014
|
-
axis (Union[None, int, tuple(int)]): Axis along which to average `x`. Default: ``None`` .
|
|
1014
|
+
axis (Union[None, int, tuple(int)], optional): Axis along which to average `x`. Default: ``None`` .
|
|
1015
1015
|
If the axis is `None`, it will average over all of the elements of the tensor `x`.
|
|
1016
1016
|
If the axis is negative, it counts from the last to the first axis.
|
|
1017
|
-
weights (Union[None, Tensor]): Weights associated with the values in `x`. Default: ``None`` .
|
|
1017
|
+
weights (Union[None, Tensor], optional): Weights associated with the values in `x`. Default: ``None`` .
|
|
1018
1018
|
If `weights` is `None`, all the data in `x` are assumed to have a weight equal to one.
|
|
1019
1019
|
If `weights` is 1-D tensor, the length must be the same as the given axis.
|
|
1020
1020
|
Otherwise, `weights` should have the same shape as `x`.
|
|
1021
|
-
returned (bool): Default: ``False`` .
|
|
1021
|
+
returned (bool, optional): Default: ``False`` .
|
|
1022
1022
|
If `True`, the tuple (average, sum_of_weights) is returned.
|
|
1023
1023
|
If `False`, only the average is returned.
|
|
1024
1024
|
|
|
@@ -1154,7 +1154,7 @@ def square(x, dtype=None):
|
|
|
1154
1154
|
|
|
1155
1155
|
Returns:
|
|
1156
1156
|
Tensor or scalar, element-wise ``x*x``, of the same shape and dtype as `x`.
|
|
1157
|
-
This is a scalar if `x` is a scalar
|
|
1157
|
+
This is a scalar if `x` is a scalar.
|
|
1158
1158
|
|
|
1159
1159
|
Supported Platforms:
|
|
1160
1160
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1402,7 +1402,7 @@ def amax(a, axis=None, keepdims=False, initial=None, where=True):
|
|
|
1402
1402
|
|
|
1403
1403
|
Args:
|
|
1404
1404
|
a (Tensor): Input data.
|
|
1405
|
-
axis (
|
|
1405
|
+
axis (Union[int, tuple(int), None], optional): Default: ``None`` . Axis or
|
|
1406
1406
|
axes along which to operate. By default, flattened input is used. If
|
|
1407
1407
|
this is a tuple of integers, the maximum is selected over multiple axes,
|
|
1408
1408
|
instead of a single axis or all the axes as before.
|
|
@@ -1458,7 +1458,7 @@ def amin(a, axis=None, keepdims=False, initial=None, where=True):
|
|
|
1458
1458
|
|
|
1459
1459
|
Args:
|
|
1460
1460
|
a (Tensor): Input data.
|
|
1461
|
-
axis (
|
|
1461
|
+
axis (Union[int, tuple(int), None], optional): Default: ``None`` . Axis or
|
|
1462
1462
|
axes along which to operate. By default, flattened input is used. If
|
|
1463
1463
|
this is a tuple of integers, the minimum is selected over multiple axes,
|
|
1464
1464
|
instead of a single axis or all the axes as before.
|
|
@@ -1552,7 +1552,7 @@ def hypot(x1, x2, dtype=None):
|
|
|
1552
1552
|
if _get_device() == 'CPU':
|
|
1553
1553
|
# broadcast is not fully supported in tensor_add on CPU,
|
|
1554
1554
|
# so we use tensor_sub as a substitute solution
|
|
1555
|
-
return F.sqrt(F.tensor_sub(F.square(x1), F.
|
|
1555
|
+
return F.sqrt(F.tensor_sub(F.square(x1), F.neg(F.square(x2))))
|
|
1556
1556
|
return F.sqrt(F.tensor_add(F.square(x1), F.square(x2)))
|
|
1557
1557
|
|
|
1558
1558
|
return _apply_tensor_op(_hypot, x1, x2, dtype=dtype)
|
|
@@ -1716,7 +1716,7 @@ def fix(x):
|
|
|
1716
1716
|
x = F.cast(x, mstype.float32)
|
|
1717
1717
|
floored = F.floor(x)
|
|
1718
1718
|
# change to F.ceil once supported on CPU.
|
|
1719
|
-
ceiled = F.
|
|
1719
|
+
ceiled = F.neg(F.floor(F.neg(x)))
|
|
1720
1720
|
is_neg = F.tensor_lt(x, zeros(F.shape(x), F.dtype(x)))
|
|
1721
1721
|
return F.select(is_neg, ceiled, floored)
|
|
1722
1722
|
|
|
@@ -1992,10 +1992,10 @@ def ediff1d(ary, to_end=None, to_begin=None):
|
|
|
1992
1992
|
|
|
1993
1993
|
Args:
|
|
1994
1994
|
ary (Tensor): If necessary, will be flattened before the differences are taken.
|
|
1995
|
-
to_end (Tensor
|
|
1996
|
-
returned differences.
|
|
1997
|
-
to_begin (Tensor
|
|
1998
|
-
of the returned differences.
|
|
1995
|
+
to_end (Tensor, scalar, optional): Number(s) to append at the end of the
|
|
1996
|
+
returned differences. Default: ``None`` .
|
|
1997
|
+
to_begin (Tensor, scalar, optional): Number(s) to prepend at the beginning
|
|
1998
|
+
of the returned differences. Default: ``None`` .
|
|
1999
1999
|
|
|
2000
2000
|
Returns:
|
|
2001
2001
|
The differences.
|
|
@@ -2432,7 +2432,7 @@ def _shape_reduced(shape, axes):
|
|
|
2432
2432
|
"""Removes dimensions corresponding to argument axes"""
|
|
2433
2433
|
ndim_orig = F.tuple_len(shape)
|
|
2434
2434
|
ndim_out = ndim_orig - F.tuple_len(axes)
|
|
2435
|
-
shape_out = [0]*ndim_out
|
|
2435
|
+
shape_out = [0] * ndim_out
|
|
2436
2436
|
idx_out = 0
|
|
2437
2437
|
for i in range(ndim_orig):
|
|
2438
2438
|
if i not in axes:
|
|
@@ -2499,8 +2499,8 @@ def nanmax(a, axis=None, dtype=None, keepdims=False):
|
|
|
2499
2499
|
Args:
|
|
2500
2500
|
a (Union[int, float, list, tuple, Tensor]): Array containing numbers whose maximum
|
|
2501
2501
|
is desired. If `a` is not an array, a conversion is attempted.
|
|
2502
|
-
axis (Union[int, tuple
|
|
2503
|
-
computed. The default is to compute the maximum of the flattened array.
|
|
2502
|
+
axis (Union[int, tuple(int), None], optional): Axis or axes along which the maximum is
|
|
2503
|
+
computed. The default is to compute the maximum of the flattened array. Default: ``None`` .
|
|
2504
2504
|
dtype (:class:`mindspore.dtype`, optional): Default: ``None`` . Overrides the dtype of the
|
|
2505
2505
|
output Tensor.
|
|
2506
2506
|
keepdims (boolean, optional): Default: ``False`` . If this is set to True, the axes which
|
|
@@ -2531,7 +2531,7 @@ def nanmax(a, axis=None, dtype=None, keepdims=False):
|
|
|
2531
2531
|
if not isinstance(keepdims, int):
|
|
2532
2532
|
_raise_type_error("integer argument expected, got", keepdims)
|
|
2533
2533
|
nan_mask = _isnan(a)
|
|
2534
|
-
a = F.select(nan_mask,
|
|
2534
|
+
a = F.select(nan_mask, P.FillV2()(F.shape(a), Tensor(-sys.maxsize - 1, F.dtype(a))), a)
|
|
2535
2535
|
reduce_fn = _reduce_max_keepdims if keepdims else _reduce_max_default
|
|
2536
2536
|
return _reduce(a, reduce_fn, axis=axis, keepdims=keepdims, dtype=dtype)
|
|
2537
2537
|
|
|
@@ -2549,8 +2549,8 @@ def nanmin(a, axis=None, dtype=None, keepdims=False):
|
|
|
2549
2549
|
Args:
|
|
2550
2550
|
a (Union[int, float, list, tuple, Tensor]): Array containing numbers whose minimum
|
|
2551
2551
|
is desired. If `a` is not an array, a conversion is attempted.
|
|
2552
|
-
axis (Union[int, tuple
|
|
2553
|
-
computed. The default is to compute the minimum of the flattened array.
|
|
2552
|
+
axis (Union[int, tuple(int), None], optional): Axis or axes along which the minimum is
|
|
2553
|
+
computed. The default is to compute the minimum of the flattened array. Default: ``None`` .
|
|
2554
2554
|
dtype (:class:`mindspore.dtype`, optional): Default: ``None`` . Overrides the dtype of the
|
|
2555
2555
|
output Tensor.
|
|
2556
2556
|
keepdims (boolean, optional): Default: ``False`` . If this is set to True, the axes which
|
|
@@ -2581,7 +2581,7 @@ def nanmin(a, axis=None, dtype=None, keepdims=False):
|
|
|
2581
2581
|
if not isinstance(keepdims, int):
|
|
2582
2582
|
_raise_type_error("integer argument expected, got", keepdims)
|
|
2583
2583
|
nan_mask = _isnan(a)
|
|
2584
|
-
a = F.select(nan_mask,
|
|
2584
|
+
a = F.select(nan_mask, P.FillV2()(F.shape(a), Tensor(sys.maxsize, F.dtype(a))), a)
|
|
2585
2585
|
reduce_fn = _reduce_min_keepdims if keepdims else _reduce_min_default
|
|
2586
2586
|
return _reduce(a, reduce_fn, axis=axis, keepdims=keepdims, dtype=dtype)
|
|
2587
2587
|
|
|
@@ -2605,7 +2605,7 @@ def nansum(a, axis=None, dtype=None, keepdims=False):
|
|
|
2605
2605
|
a (Union[int, float, list, tuple, Tensor]): Array containing numbers
|
|
2606
2606
|
whose sum is desired. If `a` is not an array, a conversion is attempted.
|
|
2607
2607
|
axis (Union[int, tuple of int, None], optional): Axis or axes along which the sum is
|
|
2608
|
-
computed. The default is to compute the sum of the flattened array.
|
|
2608
|
+
computed. The default is to compute the sum of the flattened array. Default: ``None`` .
|
|
2609
2609
|
dtype (:class:`mindspore.dtype`, optional): Default: ``None`` . Overrides the dtype of the
|
|
2610
2610
|
output Tensor.
|
|
2611
2611
|
keepdims (boolean, optional): Default: ``False`` . If this is set to True, the axes which
|
|
@@ -2662,7 +2662,7 @@ def nanmean(a, axis=None, dtype=None, keepdims=False):
|
|
|
2662
2662
|
a (Union[int, float, list, tuple, Tensor]): Array containing numbers
|
|
2663
2663
|
whose mean is desired. If `a` is not an array, a conversion is attempted.
|
|
2664
2664
|
axis (Union[int, tuple of int, None], optional): Axis or axes along which the mean is
|
|
2665
|
-
computed. The default is to compute the mean of the flattened array.
|
|
2665
|
+
computed. The default is to compute the mean of the flattened array. Default: ``None`` .
|
|
2666
2666
|
dtype (:class:`mindspore.dtype`, optional): Default: ``None`` . Overrides the dtype of the
|
|
2667
2667
|
output Tensor.
|
|
2668
2668
|
keepdims (boolean, optional): Default: ``False`` . If this is set to True, the axes which
|
|
@@ -2724,7 +2724,7 @@ def nanvar(a, axis=None, dtype=None, ddof=0, keepdims=False):
|
|
|
2724
2724
|
a (Union[int, float, list, tuple, Tensor]): Array containing numbers
|
|
2725
2725
|
whose variance is desired. If `a` is not an array, a conversion is attempted.
|
|
2726
2726
|
axis (Union[int, tuple of int, None], optional): Axis or axes along which the variance is
|
|
2727
|
-
computed. The default is to compute the variance of the flattened array.
|
|
2727
|
+
computed. The default is to compute the variance of the flattened array. Default: ``None`` .
|
|
2728
2728
|
dtype (:class:`mindspore.dtype`, optional): Default: ``None`` . Overrides the dtype of the
|
|
2729
2729
|
output Tensor.
|
|
2730
2730
|
ddof (int, optional): "Delta Degrees of Freedom": the divisor used in the calculation is
|
|
@@ -2779,7 +2779,7 @@ def nanstd(a, axis=None, dtype=None, ddof=0, keepdims=False):
|
|
|
2779
2779
|
a (Union[int, float, list, tuple, Tensor]): Calculates the standard deviation of the non-NaN values.
|
|
2780
2780
|
axis (Union[int, tuple of int, None], optional): Axis or axes along which the standard
|
|
2781
2781
|
deviation is computed. The default is to compute the standard deviation of the
|
|
2782
|
-
flattened array.
|
|
2782
|
+
flattened array. Default: ``None`` .
|
|
2783
2783
|
dtype (:class:`mindspore.dtype`, optional): Default: ``None`` . Overrides the dtype of the
|
|
2784
2784
|
output Tensor.
|
|
2785
2785
|
ddof (int, optional): "Delta Degrees of Freedom": the divisor used in the calculation is
|
|
@@ -2894,9 +2894,9 @@ def kron(a, b):
|
|
|
2894
2894
|
|
|
2895
2895
|
# scales a by the shape of b
|
|
2896
2896
|
kron_shape = _seq_prod(shape_a, shape_b)
|
|
2897
|
-
a = F.reshape(a, _add_unit_axes(shape_a, 2*ndim, True))
|
|
2898
|
-
a = F.tile(a, _add_unit_axes(shape_b, 2*ndim, False))
|
|
2899
|
-
a = moveaxis(a, F.make_range(ndim, 2*ndim), F.make_range(1, 2*ndim, 2))
|
|
2897
|
+
a = F.reshape(a, _add_unit_axes(shape_a, 2 * ndim, True))
|
|
2898
|
+
a = F.tile(a, _add_unit_axes(shape_b, 2 * ndim, False))
|
|
2899
|
+
a = moveaxis(a, F.make_range(ndim, 2 * ndim), F.make_range(1, 2 * ndim, 2))
|
|
2900
2900
|
a = F.reshape(a, kron_shape)
|
|
2901
2901
|
# scales b by the shape of a
|
|
2902
2902
|
b = F.tile(b, shape_a)
|
|
@@ -2997,11 +2997,11 @@ def cross(a, b, axisa=- 1, axisb=- 1, axisc=- 1, axis=None):
|
|
|
2997
2997
|
cx = F.tensor_sub(_get_slice_product(1, 2), _get_slice_product(2, 1)) # ay*bz - az*by
|
|
2998
2998
|
cy = F.tensor_sub(_get_slice_product(2, 0), _get_slice_product(0, 2)) # az*bx - ax*bz
|
|
2999
2999
|
elif a_has_z:
|
|
3000
|
-
cx = F.
|
|
3000
|
+
cx = F.neg(_get_slice_product(2, 1)) # -az*by
|
|
3001
3001
|
cy = _get_slice_product(2, 0) # az*bx
|
|
3002
3002
|
else: # b_has_z
|
|
3003
3003
|
cx = _get_slice_product(1, 2) # ay*bz
|
|
3004
|
-
cy = F.
|
|
3004
|
+
cy = F.neg(_get_slice_product(0, 2)) # -ax*bz
|
|
3005
3005
|
res = _concat((cx, cy, cz)).reshape(shape_out)
|
|
3006
3006
|
return moveaxis(res, -1, axisc).astype(dtype)
|
|
3007
3007
|
|
|
@@ -3035,7 +3035,7 @@ def ceil(x, dtype=None):
|
|
|
3035
3035
|
>>> print(output)
|
|
3036
3036
|
[-1. -1. -0. 1. 2. 2. 2.]
|
|
3037
3037
|
"""
|
|
3038
|
-
return _apply_tensor_op(lambda x: F.
|
|
3038
|
+
return _apply_tensor_op(lambda x: F.neg(F.floor(F.neg(x.astype(mstype.float32)))),
|
|
3039
3039
|
x, dtype=dtype)
|
|
3040
3040
|
|
|
3041
3041
|
|
|
@@ -3080,8 +3080,8 @@ def positive(a, dtype=None):
|
|
|
3080
3080
|
[1. -1.]
|
|
3081
3081
|
"""
|
|
3082
3082
|
_check_input_tensor(a)
|
|
3083
|
-
neg_tensor = F.
|
|
3084
|
-
return _apply_tensor_op(F.
|
|
3083
|
+
neg_tensor = F.neg(a)
|
|
3084
|
+
return _apply_tensor_op(F.neg, neg_tensor, dtype=dtype)
|
|
3085
3085
|
|
|
3086
3086
|
|
|
3087
3087
|
def negative(a, dtype=None):
|
|
@@ -3110,7 +3110,7 @@ def negative(a, dtype=None):
|
|
|
3110
3110
|
>>> print(output)
|
|
3111
3111
|
[-1. 1.]
|
|
3112
3112
|
"""
|
|
3113
|
-
return _apply_tensor_op(F.
|
|
3113
|
+
return _apply_tensor_op(F.neg, a, dtype=dtype)
|
|
3114
3114
|
|
|
3115
3115
|
|
|
3116
3116
|
def cumsum(a, axis=None, dtype=None):
|
|
@@ -3118,8 +3118,8 @@ def cumsum(a, axis=None, dtype=None):
|
|
|
3118
3118
|
Returns the cumulative sum of the elements along a given axis.
|
|
3119
3119
|
|
|
3120
3120
|
Note:
|
|
3121
|
-
If ``a.dtype`` is
|
|
3122
|
-
`dtype` will be elevated to
|
|
3121
|
+
If ``a.dtype`` is `int8`, `int16` or `bool`, the result
|
|
3122
|
+
`dtype` will be elevated to `int32`.
|
|
3123
3123
|
|
|
3124
3124
|
Args:
|
|
3125
3125
|
a (Tensor): Input tensor.
|
|
@@ -3161,8 +3161,8 @@ def nancumsum(a, axis=None, dtype=None):
|
|
|
3161
3161
|
Zeros are returned for slices that are all-NaN or empty.
|
|
3162
3162
|
|
|
3163
3163
|
Note:
|
|
3164
|
-
If ``a.dtype`` is
|
|
3165
|
-
`dtype` will be elevated to
|
|
3164
|
+
If ``a.dtype`` is `int8`, `int16` or `bool`, the result
|
|
3165
|
+
`dtype` will be elevated to `int32`.
|
|
3166
3166
|
|
|
3167
3167
|
Args:
|
|
3168
3168
|
a (Tensor): Input tensor.
|
|
@@ -3171,7 +3171,7 @@ def nancumsum(a, axis=None, dtype=None):
|
|
|
3171
3171
|
dtype (:class:`mindspore.dtype`, optional): If not specified, stay the same as `a`,
|
|
3172
3172
|
unless `a` has an integer dtype with a precision less than that of the
|
|
3173
3173
|
default platform integer. In that case, the default platform integer
|
|
3174
|
-
is used.
|
|
3174
|
+
is used. Default: ``None`` .
|
|
3175
3175
|
|
|
3176
3176
|
Returns:
|
|
3177
3177
|
Tensor.
|
|
@@ -3231,7 +3231,7 @@ def cbrt(x, dtype=None):
|
|
|
3231
3231
|
def _cbrt(x):
|
|
3232
3232
|
compute_type = promote_types(x.dtype, "float32")
|
|
3233
3233
|
x = x.astype(compute_type)
|
|
3234
|
-
#
|
|
3234
|
+
# use P.Sign() once gpu support is added
|
|
3235
3235
|
abs_x = F.absolute(x)
|
|
3236
3236
|
sign_x = abs_x / x
|
|
3237
3237
|
return sign_x * F.tensor_pow(abs_x, 1. / 3.)
|
|
@@ -3271,12 +3271,7 @@ def log1p(x, dtype=None):
|
|
|
3271
3271
|
|
|
3272
3272
|
def logaddexp(x1, x2, dtype=None):
|
|
3273
3273
|
"""
|
|
3274
|
-
Logarithm of the sum of exponentiations of the inputs.
|
|
3275
|
-
|
|
3276
|
-
Calculates ``log(exp(x1) + exp(x2))``. This function is useful in statistics where the
|
|
3277
|
-
calculated probabilities of events may be so small as to exceed the range of normal
|
|
3278
|
-
floating point numbers. In such cases the logarithm of the calculated probability is
|
|
3279
|
-
stored. This function allows adding probabilities stored in such a fashion.
|
|
3274
|
+
Logarithm of the sum of exponentiations of the inputs. Calculates ``log(exp(x1) + exp(x2))``.
|
|
3280
3275
|
|
|
3281
3276
|
Note:
|
|
3282
3277
|
Numpy arguments `out`, `where`, `casting`, `order`, `subok`, `signature`, and `extobj` are
|
|
@@ -3502,7 +3497,7 @@ def tan(x, dtype=None):
|
|
|
3502
3497
|
Tensor or scalar. This is a scalar if `x` is a scalar.
|
|
3503
3498
|
|
|
3504
3499
|
Raises:
|
|
3505
|
-
TypeError: If the input is not a tensor or
|
|
3500
|
+
TypeError: If the input is not a tensor or the dtype of tensor is mindspore.float64.
|
|
3506
3501
|
|
|
3507
3502
|
Supported Platforms:
|
|
3508
3503
|
``Ascend`` ``CPU``
|
|
@@ -3937,11 +3932,11 @@ def _gradient_along_axis(f, h, axis):
|
|
|
3937
3932
|
"""compute the gradients of `f` along a given axis, a helper function of gradient."""
|
|
3938
3933
|
end = f.shape[axis]
|
|
3939
3934
|
upper_edge = _slice_along_axis(f, axis, 1, 2) - _slice_along_axis(f, axis, 0, 1)
|
|
3940
|
-
lower_edge = _slice_along_axis(f, axis, end-1, end) - _slice_along_axis(f, axis, end-2, end-1)
|
|
3935
|
+
lower_edge = _slice_along_axis(f, axis, end - 1, end) - _slice_along_axis(f, axis, end - 2, end - 1)
|
|
3941
3936
|
if end <= 2:
|
|
3942
3937
|
a_grad = concatenate((upper_edge, lower_edge), axis)
|
|
3943
3938
|
else:
|
|
3944
|
-
middle = (_slice_along_axis(f, axis, 2, end) - _slice_along_axis(f, axis, 0, end-2)) * 0.5
|
|
3939
|
+
middle = (_slice_along_axis(f, axis, 2, end) - _slice_along_axis(f, axis, 0, end - 2)) * 0.5
|
|
3945
3940
|
a_grad = concatenate((upper_edge, middle, lower_edge), axis)
|
|
3946
3941
|
return a_grad / h
|
|
3947
3942
|
|
|
@@ -3983,10 +3978,10 @@ def gradient(f, *varargs, axis=None, edge_order=1):
|
|
|
3983
3978
|
1. single scalar to specify a sample distance for all dimensions.
|
|
3984
3979
|
2. N scalars to specify a constant sample distance for each dimension.
|
|
3985
3980
|
axis (Union[None, int, tuple(int), list(int)], optional): Gradient is calculated
|
|
3986
|
-
only along the given axis or axes. The default
|
|
3981
|
+
only along the given axis or axes. The default ``(axis = None)`` is to calculate
|
|
3987
3982
|
the gradient for all the axes of the input tensor. `axis` may be negative,
|
|
3988
3983
|
in which case it counts from the last to the first `axis`.
|
|
3989
|
-
edge_order (int): Gradient is calculated using N-th order accurate differences
|
|
3984
|
+
edge_order (int, optional): Gradient is calculated using N-th order accurate differences
|
|
3990
3985
|
at the boundaries. Default: ``1`` .
|
|
3991
3986
|
|
|
3992
3987
|
Returns:
|
|
@@ -4055,21 +4050,22 @@ def sum_(a, axis=None, dtype=None, keepdims=False, initial=None):
|
|
|
4055
4050
|
`extobj` are not supported.
|
|
4056
4051
|
|
|
4057
4052
|
Args:
|
|
4058
|
-
|
|
4059
|
-
axis (Union[None, int, tuple(int)]): Axis or axes along which a sum is performed. Default:
|
|
4053
|
+
a (Union[int, float, bool, list, tuple, Tensor]): Elements to sum.
|
|
4054
|
+
axis (Union[None, int, tuple(int)], optional): Axis or axes along which a sum is performed. Default: ``None``.
|
|
4060
4055
|
If `None`, sum all of the elements of the input array.
|
|
4061
4056
|
If axis is negative it counts from the last to the first axis.
|
|
4062
4057
|
If axis is a tuple of integers, a sum is performed on all of the axes specified in the tuple
|
|
4063
4058
|
instead of a single axis or all the axes as before.
|
|
4064
4059
|
dtype (:class:`mindspore.dtype`, optional): Defaults to `None`. Overrides the dtype of the
|
|
4065
4060
|
output Tensor.
|
|
4066
|
-
keepdims (bool): If this is set to True, the axes which are reduced are left in the result as
|
|
4061
|
+
keepdims (bool, optional): If this is set to True, the axes which are reduced are left in the result as
|
|
4067
4062
|
dimensions with size one. With this option, the result will broadcast correctly against the input array.
|
|
4068
4063
|
If the default value is passed, then keepdims will not be passed through to the sum method of
|
|
4069
4064
|
sub-classes of ndarray, however any non-default value will be. If the sub-class method does not
|
|
4070
|
-
implement keepdims any exceptions will be raised. Default:
|
|
4071
|
-
initial (scalar): Starting value for the sum, if `None`,
|
|
4072
|
-
|
|
4065
|
+
implement keepdims any exceptions will be raised. Default: ``False``.
|
|
4066
|
+
initial (scalar, optional): Starting value for the sum, if `None`,
|
|
4067
|
+
which refers to the first element of the reduction.
|
|
4068
|
+
Default: ``None``.
|
|
4073
4069
|
|
|
4074
4070
|
Returns:
|
|
4075
4071
|
Tensor. An array with the same shape as a, with the specified axis removed.
|
|
@@ -4104,8 +4100,8 @@ def _min_cost_chain_matmul(dims):
|
|
|
4104
4100
|
"""
|
|
4105
4101
|
dims = tuple(dims)
|
|
4106
4102
|
n = len(dims) - 1
|
|
4107
|
-
m = [[0]*n for _ in range(n)]
|
|
4108
|
-
s = [[0]*n for _ in range(n)]
|
|
4103
|
+
m = [[0] * n for _ in range(n)]
|
|
4104
|
+
s = [[0] * n for _ in range(n)]
|
|
4109
4105
|
for pos in range(1, n):
|
|
4110
4106
|
for i in range(n - pos):
|
|
4111
4107
|
j = i + pos
|
|
@@ -4171,18 +4167,18 @@ def multi_dot(arrays):
|
|
|
4171
4167
|
Examples:
|
|
4172
4168
|
>>> import mindspore.numpy as np
|
|
4173
4169
|
>>> A = np.ones((10000, 100))
|
|
4174
|
-
>>> B = np.ones((100,
|
|
4175
|
-
>>> C = np.ones((
|
|
4170
|
+
>>> B = np.ones((100, 100))
|
|
4171
|
+
>>> C = np.ones((100, 5))
|
|
4176
4172
|
>>> D = np.ones((5, 333))
|
|
4177
4173
|
>>> output = np.multi_dot([A, B, C, D])
|
|
4178
4174
|
>>> print(output)
|
|
4179
|
-
[[
|
|
4180
|
-
[
|
|
4181
|
-
[
|
|
4175
|
+
[[50000. 50000. 50000. ... 50000. 50000. 50000.]
|
|
4176
|
+
[50000. 50000. 50000. ... 50000. 50000. 50000.]
|
|
4177
|
+
[50000. 50000. 50000. ... 50000. 50000. 50000.]
|
|
4182
4178
|
...
|
|
4183
|
-
[
|
|
4184
|
-
[
|
|
4185
|
-
[
|
|
4179
|
+
[50000. 50000. 50000. ... 50000. 50000. 50000.]
|
|
4180
|
+
[50000. 50000. 50000. ... 50000. 50000. 50000.]
|
|
4181
|
+
[50000. 50000. 50000. ... 50000. 50000. 50000.]]
|
|
4186
4182
|
"""
|
|
4187
4183
|
if len(arrays) < 2:
|
|
4188
4184
|
_raise_value_error('Expecting at least 2 arrays')
|
|
@@ -4285,7 +4281,7 @@ def argmin(a, axis=None):
|
|
|
4285
4281
|
|
|
4286
4282
|
Examples:
|
|
4287
4283
|
>>> import mindspore.numpy as np
|
|
4288
|
-
>>> a = np.arange(10, 16).reshape(2, 3)
|
|
4284
|
+
>>> a = np.arange(10, 16).reshape(2, 3)
|
|
4289
4285
|
>>> print(np.argmin(a))
|
|
4290
4286
|
0
|
|
4291
4287
|
>>> print(np.argmin(a, axis=0))
|
|
@@ -4314,12 +4310,12 @@ def searchsorted(a, v, side='left', sorter=None):
|
|
|
4314
4310
|
None, then it must be sorted in ascending order, otherwise `sorter` must be
|
|
4315
4311
|
an array of indices that sort it.
|
|
4316
4312
|
v (Union[int, float, bool, list, tuple, Tensor]): Values to insert into `a`.
|
|
4317
|
-
side ('left', 'right', optional): If ``'left'`` , the index of the first suitable
|
|
4313
|
+
side ('left', 'right', optional): If ``'left'`` (default value), the index of the first suitable
|
|
4318
4314
|
location found is given. If ``'right'`` , return the last such index. If there is
|
|
4319
4315
|
no suitable index, return either 0 or N (where N is the length of `a`).
|
|
4320
4316
|
sorter (Union[int, float, bool, list, tuple, Tensor]): 1-D optional array of
|
|
4321
4317
|
integer indices that sort array `a` into ascending order. They are typically
|
|
4322
|
-
the result of argsort.
|
|
4318
|
+
the result of argsort. Default: ``None`` .
|
|
4323
4319
|
|
|
4324
4320
|
Returns:
|
|
4325
4321
|
Tensor, array of insertion points with the same shape as `v`.
|
|
@@ -4383,9 +4379,9 @@ def interp(x, xp, fp, left=None, right=None):
|
|
|
4383
4379
|
fp (Union[int, float, bool, list, tuple, Tensor]): 1-D sequence of floats, the
|
|
4384
4380
|
y-coordinates of the data points, same length as `xp`.
|
|
4385
4381
|
left (float, optional): Value to return for ``x < xp[0]``, default is ``fp[0]``
|
|
4386
|
-
once obtained.
|
|
4382
|
+
once obtained. Default: ``None`` .
|
|
4387
4383
|
right (float, optional): Value to return for ``x > xp[-1]``, default is ``fp[-1]``
|
|
4388
|
-
once obtained.
|
|
4384
|
+
once obtained. Default: ``None`` .
|
|
4389
4385
|
|
|
4390
4386
|
Returns:
|
|
4391
4387
|
Tensor, the interpolated values, same shape as `x`.
|
|
@@ -4426,7 +4422,7 @@ def interp(x, xp, fp, left=None, right=None):
|
|
|
4426
4422
|
x_1 = F.gather_nd(xp, indices_1)
|
|
4427
4423
|
y_0 = F.gather_nd(fp, indices_0)
|
|
4428
4424
|
y_1 = F.gather_nd(fp, indices_1)
|
|
4429
|
-
res = (y_0*(x_1 - x) + y_1*(x - x_0))/(x_1 - x_0)
|
|
4425
|
+
res = (y_0 * (x_1 - x) + y_1 * (x - x_0)) / (x_1 - x_0)
|
|
4430
4426
|
res = F.select(F.equal(x_0, x_1), y_0, res)
|
|
4431
4427
|
|
|
4432
4428
|
idx_0 = _to_tensor([0])
|
|
@@ -4554,7 +4550,7 @@ def copysign(x1, x2, dtype=None):
|
|
|
4554
4550
|
else:
|
|
4555
4551
|
pos_tensor = F.absolute(x1)
|
|
4556
4552
|
|
|
4557
|
-
neg_tensor = F.
|
|
4553
|
+
neg_tensor = F.neg(pos_tensor)
|
|
4558
4554
|
less_zero = F.less(x2, 0)
|
|
4559
4555
|
res = F.select(less_zero, neg_tensor, pos_tensor)
|
|
4560
4556
|
|
|
@@ -4690,21 +4686,21 @@ def histogram(a, bins=10, range=None, weights=None, density=False): # pylint: di
|
|
|
4690
4686
|
bins (Union[int, tuple, list, Tensor], optional): If `bins` is an int, it
|
|
4691
4687
|
defines the number of equal-width bins in the given range (10, by
|
|
4692
4688
|
default). If `bins` is a sequence, it defines the bin edges, including
|
|
4693
|
-
the rightmost edge, allowing for non-uniform bin widths.
|
|
4689
|
+
the rightmost edge, allowing for non-uniform bin widths. Default: ``10`` .
|
|
4694
4690
|
range((float, float), optional): The lower and upper range of the bins. If
|
|
4695
4691
|
not provided, `range` is simply ``(a.min(), a.max())``. Values outside
|
|
4696
4692
|
the range are ignored. The first element of the range must be less than
|
|
4697
|
-
or equal to the second.
|
|
4693
|
+
or equal to the second. Default: ``None`` .
|
|
4698
4694
|
weights (Union[int, float, bool, list, tuple, Tensor], optional): An array
|
|
4699
4695
|
of weights, of the same shape as `a`. If density is True, the weights
|
|
4700
4696
|
are normalized, so that the integral of the density over the range
|
|
4701
|
-
remains 1.
|
|
4697
|
+
remains 1. Default: ``None`` .
|
|
4702
4698
|
density (boolean, optional): If False, the result will contain the number of
|
|
4703
4699
|
samples in each bin. If True, the result is the value of the probability
|
|
4704
4700
|
density function at the bin, normalized such that the integral over the
|
|
4705
4701
|
range is 1. Note that the sum of the histogram values will not be equal
|
|
4706
4702
|
to 1 unless bins of unity width are chosen; it is not a probability mass
|
|
4707
|
-
function.
|
|
4703
|
+
function. Default: ``False`` .
|
|
4708
4704
|
|
|
4709
4705
|
Returns:
|
|
4710
4706
|
(Tensor, Tensor), the values of the histogram and the bin edges.
|
|
@@ -4744,7 +4740,7 @@ def histogram(a, bins=10, range=None, weights=None, density=False): # pylint: di
|
|
|
4744
4740
|
return count, bin_edges
|
|
4745
4741
|
if density:
|
|
4746
4742
|
count = F.cast(count, mstype.float32)
|
|
4747
|
-
count = count/diff(bin_edges)/F.reduce_sum(count)
|
|
4743
|
+
count = count / diff(bin_edges) / F.reduce_sum(count)
|
|
4748
4744
|
return count, bin_edges
|
|
4749
4745
|
|
|
4750
4746
|
|
|
@@ -4800,7 +4796,7 @@ def histogramdd(sample, bins=10, range=None, weights=None, density=False): # pyl
|
|
|
4800
4796
|
such as ``histogramdd((X, Y, Z))``.
|
|
4801
4797
|
|
|
4802
4798
|
The first form should be preferred.
|
|
4803
|
-
bins (Union[int, tuple, list], optional): The bin specification:
|
|
4799
|
+
bins (Union[int, tuple, list], optional): Default: ``10`` . The bin specification:
|
|
4804
4800
|
|
|
4805
4801
|
A sequence of arrays describing the monotonically increasing bin edges along
|
|
4806
4802
|
each dimension.
|
|
@@ -4812,12 +4808,12 @@ def histogramdd(sample, bins=10, range=None, weights=None, density=False): # pyl
|
|
|
4812
4808
|
``(lower, upper)`` tuple giving the outer bin edges to be used if the edges
|
|
4813
4809
|
are not given explicitly in bins. An entry of None in the sequence results in
|
|
4814
4810
|
the minimum and maximum values being used for the corresponding dimension.
|
|
4815
|
-
The default, None, is equivalent to passing a tuple of `D` None values.
|
|
4811
|
+
The default, None, is equivalent to passing a tuple of `D` None values. Default: ``None`` .
|
|
4816
4812
|
weights (Union[list, tuple, Tensor], optional): An array with shape `(N,)` of values
|
|
4817
|
-
`w_i` weighing each sample ``(x_i, y_i, z_i, …)``.
|
|
4813
|
+
`w_i` weighing each sample ``(x_i, y_i, z_i, …)``. Default: ``None`` .
|
|
4818
4814
|
density (boolean, optional): If False, the default, returns the number of samples
|
|
4819
4815
|
in each bin. If True, returns the probability density function at the bin,
|
|
4820
|
-
``bin_count / sample_count / bin_volume``.
|
|
4816
|
+
``bin_count / sample_count / bin_volume``. Default: ``False`` .
|
|
4821
4817
|
|
|
4822
4818
|
Returns:
|
|
4823
4819
|
(Tensor, list of Tensor), the values of the histogram and the bin edges.
|
|
@@ -4904,7 +4900,7 @@ def histogram2d(x, y, bins=10, range=None, weights=None, density=False): # pylin
|
|
|
4904
4900
|
coordinates of the points to be histogrammed.
|
|
4905
4901
|
y (Union[list, tuple, Tensor]): An array with shape `(N,)` containing the y
|
|
4906
4902
|
coordinates of the points to be histogrammed.
|
|
4907
|
-
bins (Union[int, tuple, list], optional): The bin specification:
|
|
4903
|
+
bins (Union[int, tuple, list], optional): Default: ``10`` . The bin specification:
|
|
4908
4904
|
|
|
4909
4905
|
If int, the number of bins for the two dimensions ``(nx=ny=bins)``.
|
|
4910
4906
|
|
|
@@ -4919,12 +4915,12 @@ def histogram2d(x, y, bins=10, range=None, weights=None, density=False): # pylin
|
|
|
4919
4915
|
range(Union[list, tuple], optional): has shape (2, 2), the leftmost and rightmost
|
|
4920
4916
|
edges of the bins along each dimension (if not specified explicitly in the bins
|
|
4921
4917
|
parameters): ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
|
|
4922
|
-
will be considered outliers and not tallied in the histogram.
|
|
4918
|
+
will be considered outliers and not tallied in the histogram. Default: ``None`` .
|
|
4923
4919
|
weights (Union[list, tuple, Tensor], optional): An array with shape `(N,)` of values
|
|
4924
|
-
`w_i` weighing each sample `(x_i, y_i)`.
|
|
4920
|
+
`w_i` weighing each sample `(x_i, y_i)`. Default: ``None`` .
|
|
4925
4921
|
density (boolean, optional): If False, the default, returns the number of samples
|
|
4926
4922
|
in each bin. If True, returns the probability density function at the bin,
|
|
4927
|
-
``bin_count / sample_count / bin_volume``.
|
|
4923
|
+
``bin_count / sample_count / bin_volume``. Default: ``False`` .
|
|
4928
4924
|
|
|
4929
4925
|
Returns:
|
|
4930
4926
|
(Tensor, Tensor, Tensor), the values of the bi-directional histogram and the bin edges
|
|
@@ -5065,8 +5061,8 @@ def polyadd(a1, a2):
|
|
|
5065
5061
|
Numpy object poly1d is currently not supported.
|
|
5066
5062
|
|
|
5067
5063
|
Args:
|
|
5068
|
-
a1 (Union[int, float, list, tuple, Tensor): Input polynomial.
|
|
5069
|
-
a2 (Union[int, float, list, tuple, Tensor): Input polynomial.
|
|
5064
|
+
a1 (Union[int, float, list, tuple, Tensor]): Input polynomial.
|
|
5065
|
+
a2 (Union[int, float, list, tuple, Tensor]): Input polynomial.
|
|
5070
5066
|
|
|
5071
5067
|
Returns:
|
|
5072
5068
|
Tensor, the sum of the inputs.
|
|
@@ -5101,8 +5097,8 @@ def polysub(a1, a2):
|
|
|
5101
5097
|
Numpy object poly1d is currently not supported.
|
|
5102
5098
|
|
|
5103
5099
|
Args:
|
|
5104
|
-
a1 (Union[int, float, list, tuple, Tensor): Minuend polynomial.
|
|
5105
|
-
a2 (Union[int, float, list, tuple, Tensor): Subtrahend polynomial.
|
|
5100
|
+
a1 (Union[int, float, list, tuple, Tensor]): Minuend polynomial.
|
|
5101
|
+
a2 (Union[int, float, list, tuple, Tensor]): Subtrahend polynomial.
|
|
5106
5102
|
|
|
5107
5103
|
Returns:
|
|
5108
5104
|
Tensor, the difference of the inputs.
|
|
@@ -5118,7 +5114,7 @@ def polysub(a1, a2):
|
|
|
5118
5114
|
>>> print(np.polysub([2, 10, -2], [3, 10, -4]))
|
|
5119
5115
|
[-1 0 2]
|
|
5120
5116
|
"""
|
|
5121
|
-
return polyadd(a1, F.
|
|
5117
|
+
return polyadd(a1, F.neg(_to_tensor(a2)))
|
|
5122
5118
|
|
|
5123
5119
|
|
|
5124
5120
|
def polyval(p, x):
|
|
@@ -5133,10 +5129,10 @@ def polyval(p, x):
|
|
|
5133
5129
|
Numpy object poly1d is currently not supported.
|
|
5134
5130
|
|
|
5135
5131
|
Args:
|
|
5136
|
-
p (Union[int, float, bool, list, tuple, Tensor): 1D array of polynomial
|
|
5132
|
+
p (Union[int, float, bool, list, tuple, Tensor]): 1D array of polynomial
|
|
5137
5133
|
coefficients (including coefficients equal to zero) from highest
|
|
5138
5134
|
degree to the constant term.
|
|
5139
|
-
x (Union[int, float, bool, list, tuple, Tensor): A number, an array of
|
|
5135
|
+
x (Union[int, float, bool, list, tuple, Tensor]): A number, an array of
|
|
5140
5136
|
numbers, at which to evaluate `p`.
|
|
5141
5137
|
|
|
5142
5138
|
Returns:
|
|
@@ -5157,7 +5153,7 @@ def polyval(p, x):
|
|
|
5157
5153
|
shape = F.shape(x)
|
|
5158
5154
|
exp_p = arange(_type_convert(int, p.size) - 1, -1, -1).astype(mstype.float32)
|
|
5159
5155
|
var_p = (x.reshape(shape + (1,)))**exp_p
|
|
5160
|
-
return F.reduce_sum(p*var_p, -1)
|
|
5156
|
+
return F.reduce_sum(p * var_p, -1)
|
|
5161
5157
|
|
|
5162
5158
|
|
|
5163
5159
|
def polyder(p, m=1):
|
|
@@ -5168,7 +5164,7 @@ def polyder(p, m=1):
|
|
|
5168
5164
|
Numpy object poly1d is currently not supported.
|
|
5169
5165
|
|
|
5170
5166
|
Args:
|
|
5171
|
-
p (Union[int, float, bool, list, tuple, Tensor): Polynomial to differentiate.
|
|
5167
|
+
p (Union[int, float, bool, list, tuple, Tensor]): Polynomial to differentiate.
|
|
5172
5168
|
A sequence is interpreted as polynomial coefficients.
|
|
5173
5169
|
m (int, optional): Default: ``1`` , order of differentiation.
|
|
5174
5170
|
|
|
@@ -5205,8 +5201,8 @@ def polymul(a1, a2):
|
|
|
5205
5201
|
Numpy object poly1d is currently not supported.
|
|
5206
5202
|
|
|
5207
5203
|
Args:
|
|
5208
|
-
a1 (Union[int, float, bool, list, tuple, Tensor): Input polynomial.
|
|
5209
|
-
a2 (Union[int, float, bool, list, tuple, Tensor): Input polynomial.
|
|
5204
|
+
a1 (Union[int, float, bool, list, tuple, Tensor]): Input polynomial.
|
|
5205
|
+
a2 (Union[int, float, bool, list, tuple, Tensor]): Input polynomial.
|
|
5210
5206
|
|
|
5211
5207
|
Returns:
|
|
5212
5208
|
Tensor, a new polynomial representing the derivative.
|
|
@@ -5235,10 +5231,10 @@ def polyint(p, m=1, k=None):
|
|
|
5235
5231
|
Numpy object poly1d is currently not supported.
|
|
5236
5232
|
|
|
5237
5233
|
Args:
|
|
5238
|
-
p (Union[int, float, bool, list, tuple, Tensor): Polynomial to integrate. A
|
|
5234
|
+
p (Union[int, float, bool, list, tuple, Tensor]): Polynomial to integrate. A
|
|
5239
5235
|
sequence is interpreted as polynomial coefficients.
|
|
5240
5236
|
m (int, optional): Defaults to 1, Order of the antiderivative.
|
|
5241
|
-
k (Union[int, list
|
|
5237
|
+
k (Union[int, list[int]], optional): Integration constants. They are given
|
|
5242
5238
|
in the order of integration: those corresponding to highest-order terms
|
|
5243
5239
|
come first. If None (default), all constants are assumed to be zero. If
|
|
5244
5240
|
``m = 1``, a single scalar can be given instead of a list.
|
|
@@ -5342,7 +5338,7 @@ def unwrap(p, discont=3.141592653589793, axis=-1):
|
|
|
5342
5338
|
differently than numpy due to differences in round-off.
|
|
5343
5339
|
|
|
5344
5340
|
Args:
|
|
5345
|
-
p (Union[int, float, bool, list, tuple, Tensor): Input array.
|
|
5341
|
+
p (Union[int, float, bool, list, tuple, Tensor]): Input array.
|
|
5346
5342
|
discont (float, optional): Maximum discontinuity between values, default: ``pi`` .
|
|
5347
5343
|
axis (int, optional): Axis along which unwrap will operate, default: ``-1`` .
|
|
5348
5344
|
|
|
@@ -5370,9 +5366,9 @@ def unwrap(p, discont=3.141592653589793, axis=-1):
|
|
|
5370
5366
|
axis = _check_axis_in_range(axis, ndim)
|
|
5371
5367
|
dd = diff(p, axis=axis)
|
|
5372
5368
|
ddmod = remainder(add(dd, pi), 2*pi) - pi
|
|
5373
|
-
ddmod = F.masked_fill(ddmod, F.logical_and(ddmod == -pi, dd > 0), pi)
|
|
5369
|
+
ddmod = F.masked_fill(ddmod, F.logical_and(ddmod == -pi, dd > 0), F.cast(pi, ddmod.dtype))
|
|
5374
5370
|
ph_correct = ddmod - dd
|
|
5375
|
-
ph_correct = F.masked_fill(ph_correct, absolute(dd) < discont, 0)
|
|
5371
|
+
ph_correct = F.masked_fill(ph_correct, absolute(dd) < discont, F.cast(0, ph_correct.dtype))
|
|
5376
5372
|
slice_all = _list_comprehensions(F.rank(p), F.make_slice(None, None, None), True)
|
|
5377
5373
|
slice0 = _tuple_setitem(slice_all, axis, F.make_slice(0, 1, None))
|
|
5378
5374
|
slice1 = _tuple_setitem(slice_all, axis, F.make_slice(1, None, None))
|
|
@@ -5477,14 +5473,14 @@ def ravel_multi_index(multi_index, dims, mode='clip', order='C'):
|
|
|
5477
5473
|
Args:
|
|
5478
5474
|
multi_index (tuple of array_like):
|
|
5479
5475
|
A tuple of integer arrays, one array for each dimension.
|
|
5480
|
-
dims (Union[int, tuple
|
|
5481
|
-
mode ({`wrap`, `clip`}): Specifies how out-of-bounds indices are handled. Default: ``clip'
|
|
5476
|
+
dims (Union[int, tuple(int)]): The shape of array into which the indices from multi_index apply.
|
|
5477
|
+
mode ({`wrap`, `clip`}, optional): Specifies how out-of-bounds indices are handled. Default: ``'clip'``.
|
|
5482
5478
|
|
|
5483
5479
|
- `wrap`: wrap around
|
|
5484
5480
|
- `clip`: clip to the range
|
|
5485
5481
|
|
|
5486
5482
|
In `clip` mode, a negative index which would normally wrap will clip to 0 instead.
|
|
5487
|
-
order ({`C`, `F`}): Determines whether the multi-index should be viewed as indexing in
|
|
5483
|
+
order ({`C`, `F`}, optional): Determines whether the multi-index should be viewed as indexing in
|
|
5488
5484
|
row-major (C-style) or column-major (Fortran-style) order.
|
|
5489
5485
|
|
|
5490
5486
|
Returns:
|
|
@@ -5539,7 +5535,7 @@ def _vector_norm(x, _ord, axis, keepdims):
|
|
|
5539
5535
|
elif _ord == 0:
|
|
5540
5536
|
res = P.ReduceSum(keepdims)(F.not_equal(x, 0).astype(mstype.float32), axis)
|
|
5541
5537
|
else:
|
|
5542
|
-
res = power(P.ReduceSum(keepdims)(power(absolute(x), _ord), axis), 1
|
|
5538
|
+
res = power(P.ReduceSum(keepdims)(power(absolute(x), _ord), axis), 1. / _ord)
|
|
5543
5539
|
return res
|
|
5544
5540
|
|
|
5545
5541
|
|
|
@@ -5586,7 +5582,7 @@ def norm(x, ord=None, axis=None, keepdims=False): # pylint: disable=redefined-bu
|
|
|
5586
5582
|
the 2-norm of ``x.ravel`` will be returned.
|
|
5587
5583
|
ord (Union[None, 'fro', 'nuc', inf, -inf, int, float], optional): Order of the norm.
|
|
5588
5584
|
inf means numpy’s inf object. Default: ``None`` .
|
|
5589
|
-
axis (Union[
|
|
5585
|
+
axis (Union[int, 2-tuple(int), None], optional): If `axis` is an integer, it
|
|
5590
5586
|
specifies the axis of `x` along which to compute the vector norms. If `axis` is
|
|
5591
5587
|
a 2-tuple, it specifies the axes that hold 2-D matrices, and the matrix norms of
|
|
5592
5588
|
these matrices are computed. If `axis` is None then either a vector norm (when x
|
|
@@ -5800,72 +5796,71 @@ def rint(x, dtype=None):
|
|
|
5800
5796
|
|
|
5801
5797
|
|
|
5802
5798
|
def correlate(a, v, mode='valid'):
|
|
5803
|
-
"""
|
|
5799
|
+
r"""
|
|
5804
5800
|
Cross-correlation of two 1-dimensional sequences.
|
|
5805
5801
|
|
|
5806
5802
|
This function computes the correlation as generally defined in signal processing texts:
|
|
5807
5803
|
|
|
5808
|
-
:math:`c_{av}[k] =
|
|
5804
|
+
:math:`c_{av}[k] = \sum_{n}{a[n+k] * conj(v[n])}`
|
|
5809
5805
|
|
|
5810
5806
|
with `a` and `v` sequences being zero-padded where necessary and conj being the conjugate.
|
|
5811
5807
|
|
|
5812
5808
|
Note:
|
|
5813
|
-
|
|
5809
|
+
- `correlate` is currently only used in `mindscience` scientific computing scenarios and
|
|
5810
|
+
dose not support other usage scenarios.
|
|
5811
|
+
- `correlate` is not supported on Windows platform yet.
|
|
5814
5812
|
|
|
5815
5813
|
Args:
|
|
5816
5814
|
a (Union[list, tuple, Tensor]): First input sequence.
|
|
5817
5815
|
v (Union[list, tuple, Tensor]): Second input sequence.
|
|
5818
|
-
mode (str, optional):
|
|
5819
|
-
|
|
5820
|
-
|
|
5821
|
-
|
|
5822
|
-
|
|
5823
|
-
|
|
5824
|
-
|
|
5825
|
-
|
|
5826
|
-
|
|
5827
|
-
|
|
5816
|
+
mode (str, optional): Specifies padding mode. The optional values are
|
|
5817
|
+
``"same"`` , ``"valid"`` and ``"full"`` . Default: ``"valid"`` .
|
|
5818
|
+
|
|
5819
|
+
- ``"same"``: it returns output of length :math:`max(M, N)`. Boundary
|
|
5820
|
+
effects are still visible.
|
|
5821
|
+
|
|
5822
|
+
- ``"valid"``: it returns output of length :math:`max(M, N) - min(M, N) + 1`.
|
|
5823
|
+
The convolution product is only given for points where the signals overlap
|
|
5824
|
+
completely. Values outside the signal boundary have no effect.
|
|
5825
|
+
|
|
5826
|
+
- ``"full"``: it returns the convolution at each point of overlap, with
|
|
5827
|
+
an output shape of :math:`(N + M - 1,)`.At the end-points of the convolution,
|
|
5828
|
+
the signals do not overlap completely, and boundary effects may be seen.
|
|
5828
5829
|
|
|
5829
5830
|
Returns:
|
|
5830
|
-
Tensor
|
|
5831
|
+
Tensor, Discrete cross-correlation of `a` and `v`.
|
|
5831
5832
|
|
|
5832
5833
|
Raises:
|
|
5833
|
-
TypeError: If
|
|
5834
|
+
TypeError: If `a` or `v` is not a tensor.
|
|
5835
|
+
TypeError: If `a` and `v` is of different dtype.
|
|
5834
5836
|
ValueError: If `a` and `v` are empty or have wrong dimensions
|
|
5835
5837
|
|
|
5836
5838
|
Supported Platforms:
|
|
5837
|
-
``GPU``
|
|
5839
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
5838
5840
|
|
|
5839
5841
|
Examples:
|
|
5840
|
-
>>> import mindspore.numpy as
|
|
5841
|
-
>>>
|
|
5842
|
+
>>> import mindspore.numpy as mnp
|
|
5843
|
+
>>> from mindspore import Tensor
|
|
5844
|
+
>>> output = mnp.correlate(Tensor([1., 2., 3.]), Tensor([0., 1., 0.5]))
|
|
5842
5845
|
>>> print(output)
|
|
5843
5846
|
[3.5]
|
|
5844
|
-
>>> output =
|
|
5847
|
+
>>> output = mnp.correlate(Tensor([1., 2., 3.]), Tensor([0., 1., 0.5]), mode="same")
|
|
5845
5848
|
>>> print(output)
|
|
5846
5849
|
[2. 3.5 3. ]
|
|
5847
|
-
>>> output =
|
|
5850
|
+
>>> output = mnp.correlate(Tensor([1., 2., 3., 4., 5.]), Tensor([1., 2.]), mode="full")
|
|
5848
5851
|
>>> print(output)
|
|
5849
|
-
[ 2. 5. 8. 11. 14.]
|
|
5850
|
-
"""
|
|
5851
|
-
|
|
5852
|
-
|
|
5853
|
-
|
|
5854
|
-
|
|
5855
|
-
|
|
5852
|
+
[ 2. 5. 8. 11. 14. 5.]
|
|
5853
|
+
"""
|
|
5854
|
+
if isinstance(a, list):
|
|
5855
|
+
a = ops.auto_generate.list_to_tuple(a)
|
|
5856
|
+
if isinstance(a, tuple):
|
|
5857
|
+
a = ops.auto_generate.tuple_to_tensor(a)
|
|
5858
|
+
if isinstance(v, list):
|
|
5859
|
+
v = ops.auto_generate.list_to_tuple(v)
|
|
5860
|
+
if isinstance(v, tuple):
|
|
5861
|
+
v = ops.auto_generate.tuple_to_tensor(v)
|
|
5862
|
+
return ops.auto_generate.correlate(a, v, mode)
|
|
5856
5863
|
|
|
5857
|
-
promote_dtype = _promote(a.dtype, v.dtype)
|
|
5858
|
-
# P.Conv2D requires that the two tensors have the same data type.
|
|
5859
|
-
# If the promote data type is not supported, it will be converted to float32.
|
|
5860
|
-
# The supported dtype list may vary in the future.
|
|
5861
|
-
if promote_dtype not in [mstype.float32, mstype.float16]:
|
|
5862
|
-
promote_dtype = mstype.float32
|
|
5863
|
-
a = a.astype(promote_dtype)
|
|
5864
|
-
v = v.astype(promote_dtype)
|
|
5865
|
-
if a.size < v.size:
|
|
5866
|
-
a, v = v, a
|
|
5867
|
-
return _compute_1d_conv(a, v, mode)[::-1]
|
|
5868
|
-
return _compute_1d_conv(a, v, mode)
|
|
5869
5864
|
|
|
5870
5865
|
|
|
5871
5866
|
def _compute_1d_conv(a, v, mode):
|