mindspore 2.4.10__cp39-cp39-win_amd64.whl → 2.6.0__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +13 -6
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -38
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +83 -0
- mindspore/_extends/parse/deprecated/__init__.py +0 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +394 -0
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +47 -198
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +229 -99
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +17 -12
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +11 -5
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +138 -43
- mindspore/common/__init__.py +6 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -2
- mindspore/common/_stub_tensor.py +30 -14
- mindspore/common/_tensor_cpp_method.py +17 -0
- mindspore/common/_tensor_docs.py +4760 -0
- mindspore/common/api.py +480 -372
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +39 -36
- mindspore/common/dump.py +9 -6
- mindspore/common/file_system.py +9 -1
- mindspore/common/generator.py +5 -0
- mindspore/common/hook_handle.py +6 -2
- mindspore/common/initializer.py +13 -10
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +9 -3
- mindspore/common/mindir_util.py +10 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +135 -52
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +975 -1981
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +52 -2
- mindspore/communication/comm_func.py +240 -181
- mindspore/communication/management.py +95 -26
- mindspore/context.py +324 -573
- mindspore/dataset/__init__.py +65 -37
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/callback/ds_callback.py +2 -1
- mindspore/dataset/core/config.py +87 -6
- mindspore/dataset/engine/cache_admin.py +3 -3
- mindspore/dataset/engine/cache_client.py +6 -5
- mindspore/dataset/engine/datasets.py +292 -267
- mindspore/dataset/engine/datasets_audio.py +22 -8
- mindspore/dataset/engine/datasets_standard_format.py +46 -27
- mindspore/dataset/engine/datasets_text.py +78 -48
- mindspore/dataset/engine/datasets_user_defined.py +183 -117
- mindspore/dataset/engine/datasets_vision.py +120 -44
- mindspore/dataset/engine/iterators.py +283 -63
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/obs/util.py +8 -0
- mindspore/dataset/engine/queue.py +40 -0
- mindspore/dataset/engine/samplers.py +289 -43
- mindspore/dataset/engine/serializer_deserializer.py +3 -2
- mindspore/dataset/engine/validators.py +53 -11
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/py_transforms_util.py +17 -0
- mindspore/dataset/transforms/transforms.py +31 -14
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/dataset/vision/validators.py +1 -2
- mindspore/device_context/__init__.py +21 -0
- mindspore/device_context/ascend/__init__.py +25 -0
- mindspore/device_context/ascend/device.py +72 -0
- mindspore/device_context/ascend/op_debug.py +153 -0
- mindspore/device_context/ascend/op_precision.py +193 -0
- mindspore/device_context/ascend/op_tuning.py +123 -0
- mindspore/{ops_generate/gen_constants.py → device_context/cpu/__init__.py} +6 -17
- mindspore/device_context/cpu/device.py +62 -0
- mindspore/device_context/cpu/op_tuning.py +43 -0
- mindspore/device_context/gpu/__init__.py +21 -0
- mindspore/device_context/gpu/device.py +70 -0
- mindspore/device_context/gpu/op_precision.py +67 -0
- mindspore/device_context/gpu/op_tuning.py +175 -0
- mindspore/device_manager.py +170 -0
- mindspore/dnnl.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/llm_boost/__init__.py +1 -0
- mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +209 -0
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
- mindspore/experimental/llm_boost/register.py +1 -0
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +6 -6
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +7 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +73 -46
- mindspore/experimental/optim/radam.py +34 -31
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -53
- mindspore/hal/event.py +52 -52
- mindspore/hal/memory.py +179 -120
- mindspore/hal/stream.py +150 -109
- mindspore/include/api/context.h +0 -1
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +50 -0
- mindspore/mindrecord/__init__.py +21 -8
- mindspore/mindrecord/config.py +17 -316
- mindspore/mindrecord/filereader.py +1 -9
- mindspore/mindrecord/filewriter.py +5 -15
- mindspore/mindrecord/mindpage.py +1 -9
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +798 -761
- mindspore/mint/distributed/__init__.py +70 -4
- mindspore/mint/distributed/distributed.py +2679 -44
- mindspore/mint/linalg/__init__.py +8 -0
- mindspore/mint/nn/__init__.py +743 -22
- mindspore/mint/nn/functional.py +716 -23
- mindspore/mint/nn/layer/__init__.py +21 -4
- mindspore/mint/nn/layer/_functions.py +334 -0
- mindspore/mint/nn/layer/activation.py +276 -1
- mindspore/mint/nn/layer/basic.py +123 -0
- mindspore/mint/nn/layer/conv.py +933 -0
- mindspore/mint/nn/layer/normalization.py +223 -28
- mindspore/mint/nn/layer/padding.py +797 -0
- mindspore/mint/nn/layer/pooling.py +235 -0
- mindspore/mint/optim/__init__.py +3 -1
- mindspore/mint/optim/adam.py +223 -0
- mindspore/mint/optim/adamw.py +26 -19
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/mint/special/__init__.py +2 -1
- mindspore/multiprocessing/__init__.py +5 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1373 -192
- mindspore/nn/dynamic_lr.py +2 -1
- mindspore/nn/layer/activation.py +29 -27
- mindspore/nn/layer/basic.py +51 -35
- mindspore/nn/layer/channel_shuffle.py +3 -3
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +53 -42
- mindspore/nn/layer/embedding.py +12 -11
- mindspore/nn/layer/normalization.py +56 -49
- mindspore/nn/layer/padding.py +4 -3
- mindspore/nn/layer/pooling.py +120 -42
- mindspore/nn/layer/rnn_cells.py +1 -1
- mindspore/nn/layer/rnns.py +2 -1
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +59 -36
- mindspore/nn/learning_rate_schedule.py +8 -4
- mindspore/nn/loss/loss.py +58 -55
- mindspore/nn/optim/ada_grad.py +7 -5
- mindspore/nn/optim/adadelta.py +11 -9
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +19 -15
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +3 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lars.py +1 -4
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +3 -3
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +11 -9
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/optim/thor.py +2 -1
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/utils/init.py +13 -11
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +181 -122
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +6 -7
- mindspore/numpy/array_creations.py +63 -65
- mindspore/numpy/array_ops.py +149 -144
- mindspore/numpy/logic_ops.py +41 -42
- mindspore/numpy/math_ops.py +361 -359
- mindspore/numpy/utils.py +17 -18
- mindspore/numpy/utils_const.py +5 -6
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +5 -3
- mindspore/ops/_grad_experimental/grad_comm_ops.py +112 -16
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -2
- mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -65
- mindspore/ops/_vmap/vmap_array_ops.py +52 -25
- mindspore/ops/_vmap/vmap_base.py +0 -2
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +21 -14
- mindspore/ops/_vmap/vmap_math_ops.py +15 -16
- mindspore/ops/_vmap/vmap_nn_ops.py +29 -42
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +258 -46
- mindspore/ops/auto_generate/gen_extend_func.py +757 -185
- mindspore/ops/auto_generate/gen_ops_def.py +4197 -2243
- mindspore/ops/auto_generate/gen_ops_prim.py +16976 -6055
- mindspore/ops/auto_generate/pyboost_inner_prim.py +221 -87
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +20 -25
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +228 -30
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -30
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +40 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +2089 -2403
- mindspore/ops/function/clip_func.py +80 -23
- mindspore/ops/function/debug_func.py +57 -57
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +2 -2
- mindspore/ops/function/linalg_func.py +47 -78
- mindspore/ops/function/math_func.py +4351 -3813
- mindspore/ops/function/nn_func.py +1712 -637
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +18 -84
- mindspore/ops/function/random_func.py +452 -387
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +6 -6
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +26 -18
- mindspore/ops/functional.py +23 -7
- mindspore/ops/functional_overload.py +1548 -0
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +23 -15
- mindspore/ops/operations/_custom_ops_utils.py +235 -0
- mindspore/ops/operations/_embedding_cache_ops.py +4 -4
- mindspore/ops/operations/_grad_ops.py +2 -43
- mindspore/ops/operations/_infer_ops.py +2 -1
- mindspore/ops/operations/_inner_ops.py +43 -84
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +81 -324
- mindspore/ops/operations/comm_ops.py +154 -108
- mindspore/ops/operations/custom_ops.py +298 -87
- mindspore/ops/operations/debug_ops.py +157 -59
- mindspore/ops/operations/inner_ops.py +7 -5
- mindspore/ops/operations/linalg_ops.py +1 -57
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +928 -180
- mindspore/ops/operations/math_ops.py +32 -234
- mindspore/ops/operations/nn_ops.py +212 -531
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +66 -53
- mindspore/ops/tensor_method.py +1895 -0
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +135 -0
- mindspore/ops_generate/aclnn/gen_aclnn_implement.py +257 -0
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/api/add_tensor_docs_generator.py +56 -0
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +105 -0
- mindspore/ops_generate/api/functional_map_cpp_generator.py +504 -0
- mindspore/ops_generate/api/functional_overload_py_generator.py +112 -0
- mindspore/ops_generate/api/functions_cc_generator.py +237 -0
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/api/op_api_proto.py +235 -0
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +461 -0
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/base_generator.py +11 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/common/gen_utils.py +348 -0
- mindspore/ops_generate/common/op_proto.py +473 -0
- mindspore/ops_generate/common/template.py +523 -0
- mindspore/ops_generate/gen_ops.py +22 -1069
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +191 -0
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +296 -0
- mindspore/ops_generate/op_def/ops_def_h_generator.py +74 -0
- mindspore/ops_generate/op_def/ops_name_h_generator.py +83 -0
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/op_def_py/op_def_py_generator.py +132 -0
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +489 -0
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +139 -0
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +93 -0
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +175 -0
- mindspore/ops_generate/pyboost/op_template_parser.py +517 -0
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +407 -0
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +100 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +148 -0
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +155 -0
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +132 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +272 -0
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +938 -0
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +357 -0
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +179 -36
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +7 -3
- mindspore/parallel/_auto_parallel_context.py +159 -40
- mindspore/parallel/_cell_wrapper.py +132 -15
- mindspore/parallel/_parallel_serialization.py +107 -5
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +199 -23
- mindspore/parallel/algo_parameter_config.py +4 -4
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +700 -35
- mindspore/parallel/cluster/process_entity/_api.py +276 -50
- mindspore/parallel/cluster/process_entity/_utils.py +41 -6
- mindspore/parallel/cluster/run.py +21 -4
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +258 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +25 -14
- mindspore/parallel/shard.py +137 -59
- mindspore/parallel/transform_safetensors.py +364 -305
- mindspore/profiler/__init__.py +22 -5
- mindspore/profiler/analysis/__init__.py +0 -0
- mindspore/profiler/analysis/parser/__init__.py +0 -0
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +170 -0
- mindspore/profiler/analysis/parser/base_parser.py +158 -0
- mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
- mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
- mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +264 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +109 -0
- mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
- mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
- mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
- mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +415 -0
- mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
- mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
- mindspore/profiler/analysis/task_manager.py +131 -0
- mindspore/profiler/analysis/time_converter.py +84 -0
- mindspore/profiler/analysis/viewer/__init__.py +0 -0
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +372 -0
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +250 -0
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +320 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +327 -0
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +376 -0
- mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
- mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +96 -0
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
- mindspore/profiler/analysis/work_flow.py +73 -0
- mindspore/profiler/common/ascend_msprof_exporter.py +139 -0
- mindspore/profiler/common/command_executor.py +90 -0
- mindspore/profiler/common/constant.py +186 -3
- mindspore/profiler/common/file_manager.py +208 -0
- mindspore/profiler/common/log.py +130 -0
- mindspore/profiler/common/msprof_cmd_tool.py +221 -0
- mindspore/profiler/common/path_manager.py +395 -0
- mindspore/profiler/common/process_bar.py +168 -0
- mindspore/profiler/common/process_pool.py +9 -3
- mindspore/profiler/common/profiler_context.py +500 -0
- mindspore/profiler/common/profiler_info.py +304 -0
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_output_path.py +284 -0
- mindspore/profiler/common/profiler_parameters.py +251 -0
- mindspore/profiler/common/profiler_path_manager.py +179 -0
- mindspore/profiler/common/record_function.py +76 -0
- mindspore/profiler/common/tlv_decoder.py +76 -0
- mindspore/profiler/common/util.py +75 -2
- mindspore/profiler/dynamic_profiler.py +341 -75
- mindspore/profiler/envprofiler.py +163 -0
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +242 -0
- mindspore/profiler/platform/__init__.py +21 -0
- mindspore/profiler/platform/base_profiler.py +40 -0
- mindspore/profiler/platform/cpu_profiler.py +124 -0
- mindspore/profiler/platform/gpu_profiler.py +74 -0
- mindspore/profiler/platform/npu_profiler.py +335 -0
- mindspore/profiler/profiler.py +1073 -90
- mindspore/profiler/profiler_action_controller.py +187 -0
- mindspore/profiler/profiler_interface.py +118 -0
- mindspore/profiler/schedule.py +243 -0
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +2 -3
- mindspore/run_check/_check_version.py +27 -20
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +37 -0
- mindspore/runtime/device.py +27 -0
- mindspore/runtime/event.py +209 -0
- mindspore/runtime/executor.py +177 -0
- mindspore/runtime/memory.py +416 -0
- mindspore/runtime/stream.py +460 -0
- mindspore/runtime/thread_bind_core.py +401 -0
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +96 -27
- mindspore/train/amp.py +9 -5
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +53 -55
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_early_stop.py +1 -1
- mindspore/train/callback/_flops_collector.py +103 -68
- mindspore/train/callback/_history.py +8 -5
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +52 -19
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +228 -108
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +15 -16
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +11 -10
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +174 -46
- mindspore/train/model.py +269 -136
- mindspore/train/serialization.py +622 -978
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +2 -3
- mindspore/train/train_thor/model_thor.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +6 -3
- mindspore/utils/dryrun.py +140 -0
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/runtime_execution_order_check.py +552 -0
- mindspore/utils/utils.py +138 -4
- mindspore/version.py +1 -1
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/METADATA +3 -3
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/RECORD +564 -395
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/entry_points.txt +1 -1
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/common/_tensor_overload.py +0 -139
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -197
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_aclnn_implement.py +0 -263
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/gen_pyboost_func.py +0 -1052
- mindspore/ops_generate/gen_utils.py +0 -209
- mindspore/ops_generate/op_proto.py +0 -145
- mindspore/ops_generate/template.py +0 -261
- mindspore/profiler/envprofiling.py +0 -254
- mindspore/profiler/profiling.py +0 -1926
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/WHEEL +0 -0
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/top_level.txt +0 -0
|
@@ -17,17 +17,20 @@ from mindspore.common import dtype as mstype
|
|
|
17
17
|
from mindspore.ops.auto_generate.pyboost_inner_prim import *
|
|
18
18
|
|
|
19
19
|
|
|
20
|
-
def
|
|
20
|
+
def acosh(input):
|
|
21
21
|
r"""
|
|
22
|
-
Computes
|
|
22
|
+
Computes inverse hyperbolic cosine of the inputs element-wise.
|
|
23
23
|
|
|
24
24
|
.. math::
|
|
25
25
|
|
|
26
|
-
out_i = \
|
|
26
|
+
out_i = \cosh^{-1}(input_i)
|
|
27
|
+
|
|
28
|
+
.. note::
|
|
29
|
+
Given an input tensor input, the function computes inverse hyperbolic cosine of every element.
|
|
30
|
+
Input range is [1, inf].
|
|
27
31
|
|
|
28
32
|
Args:
|
|
29
|
-
input (Tensor): The
|
|
30
|
-
:math:`(N,*)`, where :math:`*` means any number of additional dimensions.
|
|
33
|
+
input (Tensor): The input tensor of inverse hyperbolic cosine function.
|
|
31
34
|
|
|
32
35
|
Returns:
|
|
33
36
|
Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
|
|
@@ -42,28 +45,25 @@ def acos(input):
|
|
|
42
45
|
>>> import mindspore
|
|
43
46
|
>>> import numpy as np
|
|
44
47
|
>>> from mindspore import Tensor, ops
|
|
45
|
-
>>> input = Tensor(np.array([0
|
|
46
|
-
>>> output = ops.
|
|
48
|
+
>>> input = Tensor(np.array([1.0, 1.5, 3.0, 100.0]), mindspore.float32)
|
|
49
|
+
>>> output = ops.acosh_ext(input)
|
|
47
50
|
>>> print(output)
|
|
48
|
-
[0.
|
|
51
|
+
[0. 0.9624236 1.7627472 5.298292 ]
|
|
49
52
|
"""
|
|
50
|
-
return
|
|
53
|
+
return acosh_impl(input)
|
|
51
54
|
|
|
52
55
|
|
|
53
|
-
def
|
|
56
|
+
def acos(input):
|
|
54
57
|
r"""
|
|
55
|
-
Computes
|
|
58
|
+
Computes arccosine of input tensors element-wise.
|
|
56
59
|
|
|
57
60
|
.. math::
|
|
58
61
|
|
|
59
|
-
out_i = \
|
|
60
|
-
|
|
61
|
-
.. note::
|
|
62
|
-
Given an input tensor input, the function computes inverse hyperbolic cosine of every element.
|
|
63
|
-
Input range is [1, inf].
|
|
62
|
+
out_i = \cos^{-1}(input_i)
|
|
64
63
|
|
|
65
64
|
Args:
|
|
66
|
-
input (Tensor): The
|
|
65
|
+
input (Tensor): The shape of tensor is
|
|
66
|
+
:math:`(N,*)`, where :math:`*` means any number of additional dimensions.
|
|
67
67
|
|
|
68
68
|
Returns:
|
|
69
69
|
Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
|
|
@@ -78,12 +78,12 @@ def acosh(input):
|
|
|
78
78
|
>>> import mindspore
|
|
79
79
|
>>> import numpy as np
|
|
80
80
|
>>> from mindspore import Tensor, ops
|
|
81
|
-
>>> input = Tensor(np.array([
|
|
82
|
-
>>> output = ops.
|
|
81
|
+
>>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
|
|
82
|
+
>>> output = ops.acos_ext(input)
|
|
83
83
|
>>> print(output)
|
|
84
|
-
[0.
|
|
84
|
+
[0.7377037 1.5307857 1.2661037 0.9764114]
|
|
85
85
|
"""
|
|
86
|
-
return
|
|
86
|
+
return acos_impl(input)
|
|
87
87
|
|
|
88
88
|
|
|
89
89
|
def adaptive_avg_pool2d_grad(grad_output, x):
|
|
@@ -93,6 +93,13 @@ def adaptive_avg_pool2d_grad(grad_output, x):
|
|
|
93
93
|
return adaptive_avg_pool2d_grad_impl(grad_output, x)
|
|
94
94
|
|
|
95
95
|
|
|
96
|
+
def adaptive_avg_pool3d(input, output_size):
|
|
97
|
+
r"""
|
|
98
|
+
None
|
|
99
|
+
"""
|
|
100
|
+
return adaptive_avg_pool3d_impl(input, output_size)
|
|
101
|
+
|
|
102
|
+
|
|
96
103
|
def add(input, other, alpha=1):
|
|
97
104
|
r"""
|
|
98
105
|
Adds scaled other value to input Tensor.
|
|
@@ -151,14 +158,38 @@ def add(input, other, alpha=1):
|
|
|
151
158
|
|
|
152
159
|
def argmax(input, dim=None, keepdim=False):
|
|
153
160
|
r"""
|
|
161
|
+
argmax(input) -> Tensor
|
|
162
|
+
|
|
163
|
+
Return the indices of the maximum values of a tensor.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
input (Tensor): Input tensor.
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
Tensor.
|
|
170
|
+
|
|
171
|
+
Supported Platforms:
|
|
172
|
+
``Ascend``
|
|
173
|
+
|
|
174
|
+
Examples:
|
|
175
|
+
>>> import numpy as np
|
|
176
|
+
>>> from mindspore import Tensor
|
|
177
|
+
>>> from mindspore import ops
|
|
178
|
+
>>> x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
|
|
179
|
+
>>> output = ops.auto_generate.argmax_ext(x)
|
|
180
|
+
>>> print(output)
|
|
181
|
+
6
|
|
182
|
+
|
|
183
|
+
.. function:: argmax(input, dim, keepdim=False) -> Tensor
|
|
184
|
+
:noindex:
|
|
185
|
+
|
|
154
186
|
Return the indices of the maximum values of a tensor across a dimension.
|
|
155
187
|
|
|
156
188
|
Args:
|
|
157
189
|
input (Tensor): Input tensor.
|
|
158
|
-
dim (
|
|
159
|
-
value within the flattened input will be returned. Default: ``None`` .
|
|
190
|
+
dim (int): The dimension to reduce.
|
|
160
191
|
keepdim (bool, optional): Whether the output tensor retains the specified
|
|
161
|
-
dimension.
|
|
192
|
+
dimension. Default: ``False`` .
|
|
162
193
|
|
|
163
194
|
Returns:
|
|
164
195
|
Tensor, indices of the maximum values across a dimension.
|
|
@@ -206,46 +237,54 @@ def argmin(input, dim=None, keepdim=False):
|
|
|
206
237
|
Examples:
|
|
207
238
|
>>> import numpy as np
|
|
208
239
|
>>> from mindspore import Tensor
|
|
209
|
-
>>> from mindspore import
|
|
240
|
+
>>> from mindspore import ops
|
|
210
241
|
>>> x = Tensor(np.array([[1, 20, 5], [67, 8, 9], [130, 24, 15]]).astype(np.float32))
|
|
211
|
-
>>> output =
|
|
242
|
+
>>> output = ops.auto_generate.argmin_ext(x, dim=-1)
|
|
212
243
|
>>> print(output)
|
|
213
244
|
[0 1 2]
|
|
214
245
|
"""
|
|
215
246
|
return argmin_impl(input, dim, keepdim)
|
|
216
247
|
|
|
217
248
|
|
|
218
|
-
def
|
|
249
|
+
def argsort(input, dim=-1, descending=False, stable=False):
|
|
219
250
|
r"""
|
|
220
|
-
|
|
251
|
+
Sorts the input tensor along the given dimension in specified order and return the sorted indices.
|
|
221
252
|
|
|
222
|
-
..
|
|
223
|
-
|
|
224
|
-
out_i = \sin^{-1}(input_i)
|
|
253
|
+
.. warning::
|
|
254
|
+
This is an experimental optimizer API that is subject to change.
|
|
225
255
|
|
|
226
256
|
Args:
|
|
227
|
-
input
|
|
228
|
-
|
|
257
|
+
input(Tensor): The input tensor to sort.
|
|
258
|
+
dim (int, optional): The dim to sort along. Default: ``-1`` , means the last dimension.
|
|
259
|
+
The Ascend backend only supports sorting the last dimension.
|
|
260
|
+
descending (bool, optional): The sort order. If `descending` is ``True`` then the elements
|
|
261
|
+
are sorted in descending order by value. Otherwise sort in ascending order. Default: ``False`` .
|
|
262
|
+
stable (bool, optional): Whether to use stable sorting algorithm. Default: ``False``.
|
|
229
263
|
|
|
230
264
|
Returns:
|
|
231
|
-
Tensor,
|
|
265
|
+
Tensor, the indices of sorted input tensor. Data type is int64.
|
|
232
266
|
|
|
233
267
|
Raises:
|
|
234
|
-
|
|
268
|
+
ValueError: If `dim` is out of range.
|
|
269
|
+
TypeError: If dtype of `dim` is not int32.
|
|
270
|
+
TypeError: If dtype of `descending` is not bool.
|
|
271
|
+
TypeError: If dtype of `stable` is not bool.
|
|
235
272
|
|
|
236
273
|
Supported Platforms:
|
|
237
|
-
``Ascend``
|
|
274
|
+
``Ascend``
|
|
238
275
|
|
|
239
276
|
Examples:
|
|
240
277
|
>>> import mindspore
|
|
241
278
|
>>> import numpy as np
|
|
242
279
|
>>> from mindspore import Tensor, ops
|
|
243
|
-
>>>
|
|
244
|
-
>>>
|
|
245
|
-
>>> print(
|
|
246
|
-
[0
|
|
280
|
+
>>> x = Tensor(np.array([[8, 2, 1], [5, 9, 3], [4, 6, 7]]), mindspore.float16)
|
|
281
|
+
>>> sort = ops.auto_generate.argsort_ext(x)
|
|
282
|
+
>>> print(sort)
|
|
283
|
+
[[2 1 0]
|
|
284
|
+
[2 0 1]
|
|
285
|
+
[0 1 2]]
|
|
247
286
|
"""
|
|
248
|
-
return
|
|
287
|
+
return argsort_impl(input, dim, descending, stable)
|
|
249
288
|
|
|
250
289
|
|
|
251
290
|
def asinh(input):
|
|
@@ -280,6 +319,39 @@ def asinh(input):
|
|
|
280
319
|
return asinh_impl(input)
|
|
281
320
|
|
|
282
321
|
|
|
322
|
+
def asin(input):
|
|
323
|
+
r"""
|
|
324
|
+
Computes arcsine of input tensors element-wise.
|
|
325
|
+
|
|
326
|
+
.. math::
|
|
327
|
+
|
|
328
|
+
out_i = \sin^{-1}(input_i)
|
|
329
|
+
|
|
330
|
+
Args:
|
|
331
|
+
input (Tensor): The shape of tensor is
|
|
332
|
+
:math:`(N,*)`, where :math:`*` means any number of additional dimensions.
|
|
333
|
+
|
|
334
|
+
Returns:
|
|
335
|
+
Tensor, has the same shape as `input`. The dtype of output is float32 when dtype of `input` is in [bool, int8, uint8, int16, int32, int64]. Otherwise output has the same dtype as `input`.
|
|
336
|
+
|
|
337
|
+
Raises:
|
|
338
|
+
TypeError: If `input` is not a Tensor.
|
|
339
|
+
|
|
340
|
+
Supported Platforms:
|
|
341
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
342
|
+
|
|
343
|
+
Examples:
|
|
344
|
+
>>> import mindspore
|
|
345
|
+
>>> import numpy as np
|
|
346
|
+
>>> from mindspore import Tensor, ops
|
|
347
|
+
>>> input = Tensor(np.array([0.74, 0.04, 0.30, 0.56]), mindspore.float32)
|
|
348
|
+
>>> output = ops.asin_ext(input)
|
|
349
|
+
>>> print(output)
|
|
350
|
+
[0.8330927 0.04001068 0.30469266 0.59438497 ]
|
|
351
|
+
"""
|
|
352
|
+
return asin_impl(input)
|
|
353
|
+
|
|
354
|
+
|
|
283
355
|
def atan2(input, other):
|
|
284
356
|
r"""
|
|
285
357
|
Returns arctangent of input/other element-wise.
|
|
@@ -316,7 +388,7 @@ def atan2(input, other):
|
|
|
316
388
|
>>> from mindspore import Tensor, ops
|
|
317
389
|
>>> input = Tensor(np.array([0, 1]), mindspore.float32)
|
|
318
390
|
>>> other = Tensor(np.array([1, 1]), mindspore.float32)
|
|
319
|
-
>>> output =
|
|
391
|
+
>>> output = ops.auto_generate.atan2_ext(input, other)
|
|
320
392
|
>>> print(output)
|
|
321
393
|
[0. 0.7853982]
|
|
322
394
|
"""
|
|
@@ -356,6 +428,102 @@ def atan(input):
|
|
|
356
428
|
return atan_impl(input)
|
|
357
429
|
|
|
358
430
|
|
|
431
|
+
def avg_pool1d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True):
|
|
432
|
+
r"""
|
|
433
|
+
Applies a 1D average pooling over an input Tensor which can be regarded as a composition of 1D input planes.
|
|
434
|
+
|
|
435
|
+
Typically the input is of shape :math:`(N_{in}, C_{in}, L_{in})`, avg_pool1d outputs regional average in the
|
|
436
|
+
:math:`(L_{in})`-dimension. Given kernel size as :math:`ks = l_{ker}` and `stride` as :math:`s = s_0`, the
|
|
437
|
+
operation is as follows.
|
|
438
|
+
|
|
439
|
+
.. math::
|
|
440
|
+
\text{output}(N_i, C_j, l) = \frac{1}{l_{ker}} \sum_{n=0}^{l_{ker}-1}
|
|
441
|
+
\text{input}(N_i, C_j, s_0 \times l + n)
|
|
442
|
+
|
|
443
|
+
.. warning::
|
|
444
|
+
This is an experimental API that is subject to change or deletion.
|
|
445
|
+
|
|
446
|
+
Args:
|
|
447
|
+
input (Tensor): Tensor of shape :math:`(N, C_{in}, L_{in})`.
|
|
448
|
+
kernel_size (Union(int, tuple[int])): The size of kernel window used to take the average value.
|
|
449
|
+
stride (Union(int, tuple[int]), optional): The distance of kernel moving. `stride` can either be an int
|
|
450
|
+
number or a tuple of one int number. Default: ``None``, the same value as `kernel_size`.
|
|
451
|
+
padding (Union(int, tuple[int]), optional): The pad length to be filled. `padding` can either be an integer
|
|
452
|
+
or a tuple of one integer. Default: ``0`` .
|
|
453
|
+
ceil_mode (bool, optional): If True, apply ceil instead of floor to compute the output shape. Default: ``False``.
|
|
454
|
+
count_include_pad (bool, optional): If True, include the zero-padding in the averaging calculation. Default: ``True`` .
|
|
455
|
+
|
|
456
|
+
Returns:
|
|
457
|
+
Tensor of shape :math:`(N, C_{in}, L_{out})`.
|
|
458
|
+
|
|
459
|
+
Raises:
|
|
460
|
+
TypeError: If `input` is not a Tensor.
|
|
461
|
+
TypeError: If `kernel_size` or `stride` is not an int.
|
|
462
|
+
TypeError: If `ceil_mode` or `count_include_pad` is not a bool.
|
|
463
|
+
ValueError: If `kernel_size` or `stride` is less than `1`.
|
|
464
|
+
ValueError: If `kernel_size` or `stride` or `padding` is not int nor a tuple whose length is greater than `1`.
|
|
465
|
+
|
|
466
|
+
Supported Platforms:
|
|
467
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
468
|
+
|
|
469
|
+
Examples:
|
|
470
|
+
>>> import mindspore
|
|
471
|
+
>>> import numpy as np
|
|
472
|
+
>>> from mindspore import Tensor, ops
|
|
473
|
+
>>> input_x = Tensor(np.random.randint(0, 10, [1, 3, 6]), mindspore.float32)
|
|
474
|
+
>>> output = ops.auto_generate.avg_pool1d_ext(input_x, kernel_size=6, stride=1)
|
|
475
|
+
>>> print(output.shape)
|
|
476
|
+
(1, 3, 1)
|
|
477
|
+
"""
|
|
478
|
+
return avg_pool1d_impl(input, kernel_size, stride, padding, ceil_mode, count_include_pad)
|
|
479
|
+
|
|
480
|
+
|
|
481
|
+
def bincount(input, weights=None, minlength=0):
|
|
482
|
+
r"""
|
|
483
|
+
Count the occurrences of each value in the input.
|
|
484
|
+
|
|
485
|
+
If `minlength` is not specified, the length of the output Tensor is the maximum value in the input plus one.
|
|
486
|
+
If `minlength` is specified, the length of the output Tensor is the maximum value between `minlength` or
|
|
487
|
+
the maximum value in the input plus one.
|
|
488
|
+
|
|
489
|
+
Each value in the output Tensor represents the number of occurrences of that index value in the input.
|
|
490
|
+
If `weights` is specified, the output results are weighted,
|
|
491
|
+
i.e., :math:`out[n] += weight[i]` instead of :math:`out[n] += 1`.
|
|
492
|
+
|
|
493
|
+
.. warning::
|
|
494
|
+
This is an experimental API that is subject to change or deletion.
|
|
495
|
+
|
|
496
|
+
Args:
|
|
497
|
+
input (Tensor): A one-dimensional Tensor.
|
|
498
|
+
weights (Tensor, optional): Weights with the same shape as the input. Default: ``None``.
|
|
499
|
+
minlength (int, optional): The minimum length of output Tensor. Should be non-negative. Default: ``0``.
|
|
500
|
+
|
|
501
|
+
Returns:
|
|
502
|
+
Tensor, If input is non-empty, the output shape is :math:`(max(max(input)+1, minlength), )`,
|
|
503
|
+
otherwise the shape is :math:`(0, )`.
|
|
504
|
+
|
|
505
|
+
Raises:
|
|
506
|
+
TypeError: If `input` or `weights` is not a Tensor.
|
|
507
|
+
ValueError: If `input` contains negative values.
|
|
508
|
+
ValueError: If `input` is not one-dimensional or `input` and `weights` do not have the same shape.
|
|
509
|
+
|
|
510
|
+
Supported Platforms:
|
|
511
|
+
``Ascend``
|
|
512
|
+
|
|
513
|
+
Examples:
|
|
514
|
+
>>> from mindspore import ops, Tensor
|
|
515
|
+
>>> print(ops.auto_generate.bincount_ext(Tensor(np.arange(5))))
|
|
516
|
+
[1 1 1 1 1]
|
|
517
|
+
>>> print(ops.auto_generate.bincount_ext(Tensor(np.array([0, 1, 1, 3, 2, 1, 7]))))
|
|
518
|
+
[1 3 1 1 0 0 0 1]
|
|
519
|
+
>>> w = Tensor(np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6])) # weights
|
|
520
|
+
>>> x = Tensor(np.array([0, 1, 1, 2, 2, 2]))
|
|
521
|
+
>>> print(ops.auto_generate.bincount_ext(x, weights=w, minlength=5))
|
|
522
|
+
[0.3 0.7 1.1 0. 0. ]
|
|
523
|
+
"""
|
|
524
|
+
return bincount_impl(input, weights, minlength)
|
|
525
|
+
|
|
526
|
+
|
|
359
527
|
def bmm(input, mat2):
|
|
360
528
|
r"""
|
|
361
529
|
Performs batch matrix-matrix multiplication of two three-dimensional tensors.
|
|
@@ -463,13 +631,6 @@ def fold(input, output_size, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
463
631
|
return fold_impl(input, converted_output_size, converted_kernel_size, converted_dilation, converted_padding, converted_stride)
|
|
464
632
|
|
|
465
633
|
|
|
466
|
-
def copy(variable, value):
|
|
467
|
-
r"""
|
|
468
|
-
None
|
|
469
|
-
"""
|
|
470
|
-
return copy_impl(variable, value)
|
|
471
|
-
|
|
472
|
-
|
|
473
634
|
def cummin(input, dim):
|
|
474
635
|
r"""
|
|
475
636
|
Returns a tuple (values, indices) where `values` is the cumulative minimum value of input Tensor `input`
|
|
@@ -480,6 +641,9 @@ def cummin(input, dim):
|
|
|
480
641
|
y_{i} = \min(x_{1}, x_{2}, ... , x_{i})
|
|
481
642
|
\end{array}
|
|
482
643
|
|
|
644
|
+
.. note::
|
|
645
|
+
O2 mode is not supported in Ascend.
|
|
646
|
+
|
|
483
647
|
Args:
|
|
484
648
|
input (Tensor): The input Tensor, The dimension must be greater than 0.
|
|
485
649
|
dim (int): Operation dimension. The value of `dim` must be in the range `[-input.ndim, input.ndim - 1]`.
|
|
@@ -494,9 +658,6 @@ def cummin(input, dim):
|
|
|
494
658
|
TypeError: If `dim` is not an int.
|
|
495
659
|
ValueError: If `dim` is out the range of `[-input.ndim, input.ndim - 1]`.
|
|
496
660
|
|
|
497
|
-
.. note::
|
|
498
|
-
O2 mode is not supported in Ascend.
|
|
499
|
-
|
|
500
661
|
Supported Platforms:
|
|
501
662
|
``Ascend``
|
|
502
663
|
|
|
@@ -561,6 +722,54 @@ def cumsum(input, dim, dtype=None):
|
|
|
561
722
|
return cumsum_impl(input, dim, dtype)
|
|
562
723
|
|
|
563
724
|
|
|
725
|
+
def diag(input, diagonal=0):
|
|
726
|
+
r"""
|
|
727
|
+
If input is a vector (1-D tensor), then returns a 2-D square tensor with the elements of input as the diagonal.
|
|
728
|
+
|
|
729
|
+
If input is a matrix (2-D tensor), then returns a 1-D tensor with the diagonal elements of input.
|
|
730
|
+
|
|
731
|
+
The argument diagonal controls which diagonal to consider:
|
|
732
|
+
|
|
733
|
+
- If `diagonal` = 0, it is the main diagonal.
|
|
734
|
+
|
|
735
|
+
- If `diagonal` > 0, it is above the main diagonal.
|
|
736
|
+
|
|
737
|
+
- If `diagonal` < 0, it is below the main diagonal.
|
|
738
|
+
|
|
739
|
+
.. warning::
|
|
740
|
+
This is an experimental API that is subject to change or deletion.
|
|
741
|
+
|
|
742
|
+
Args:
|
|
743
|
+
input (Tensor): The input tensor.
|
|
744
|
+
diagonal (int, optional): the diagonal to consider. Defaults: ``0``.
|
|
745
|
+
|
|
746
|
+
Returns:
|
|
747
|
+
Tensor, has the same dtype as the `input`, its shape is up to `diagonal`.
|
|
748
|
+
|
|
749
|
+
- If `input` shape is :math:`(x_0)` : then output shape is :math:`(x_0 + \left | diagonal \right | , x_0 + \left | diagonal \right | )` 2-D Tensor.
|
|
750
|
+
|
|
751
|
+
- If `input` shape is :math:`(x_0, x_1)` : then output shape is main diagonal to move :math:`(\left | diagonal \right |)` elements remains elements' length 1-D Tensor.
|
|
752
|
+
|
|
753
|
+
Raises:
|
|
754
|
+
TypeError: If `input` is not a Tensor.
|
|
755
|
+
ValueError: If shape of `input` is not 1-D and 2-D.
|
|
756
|
+
|
|
757
|
+
Supported Platforms:
|
|
758
|
+
``Ascend``
|
|
759
|
+
|
|
760
|
+
Examples:
|
|
761
|
+
>>> from mindspore import Tensor, ops
|
|
762
|
+
>>> input = Tensor([1, 2, 3, 4]).astype('int32')
|
|
763
|
+
>>> output = ops.auto_generate.diag_ext(input)
|
|
764
|
+
>>> print(output)
|
|
765
|
+
[[1 0 0 0]
|
|
766
|
+
[0 2 0 0]
|
|
767
|
+
[0 0 3 0]
|
|
768
|
+
[0 0 0 4]]
|
|
769
|
+
"""
|
|
770
|
+
return diag_impl(input, diagonal)
|
|
771
|
+
|
|
772
|
+
|
|
564
773
|
def elu(input, alpha=1.0):
|
|
565
774
|
r"""
|
|
566
775
|
Exponential Linear Unit activation function.
|
|
@@ -652,6 +861,40 @@ def flatten(input, start_dim=0, end_dim=-1):
|
|
|
652
861
|
return flatten_impl(input, start_dim, end_dim)
|
|
653
862
|
|
|
654
863
|
|
|
864
|
+
def frac(input):
|
|
865
|
+
r"""
|
|
866
|
+
Calculates the fractional part of each element in the input.
|
|
867
|
+
|
|
868
|
+
.. math::
|
|
869
|
+
out_i = input_i - \lfloor |input_i| \rfloor * sgn(input_i)
|
|
870
|
+
|
|
871
|
+
.. warning::
|
|
872
|
+
This is an experimental API that is subject to change or deletion.
|
|
873
|
+
|
|
874
|
+
Args:
|
|
875
|
+
input (Tensor): The input Tensor.
|
|
876
|
+
|
|
877
|
+
Returns:
|
|
878
|
+
Tensor, has the same shape and type as input.
|
|
879
|
+
|
|
880
|
+
Raises:
|
|
881
|
+
TypeError: If `input` is not a Tensor.
|
|
882
|
+
|
|
883
|
+
Supported Platforms:
|
|
884
|
+
``Ascend``
|
|
885
|
+
|
|
886
|
+
Examples:
|
|
887
|
+
>>> import mindspore
|
|
888
|
+
>>> import numpy as np
|
|
889
|
+
>>> from mindspore import Tensor, ops
|
|
890
|
+
>>> x = Tensor([2, 4.2, -2.5], mindspore.float16)
|
|
891
|
+
>>> output = ops.frac_ext(x)
|
|
892
|
+
>>> print(output)
|
|
893
|
+
[ 0. 0.1992 -0.5 ]
|
|
894
|
+
"""
|
|
895
|
+
return frac_impl(input)
|
|
896
|
+
|
|
897
|
+
|
|
655
898
|
def histc(input, bins=100, min=0, max=0):
|
|
656
899
|
r"""
|
|
657
900
|
Computes the histogram of a tensor.
|
|
@@ -767,6 +1010,56 @@ def unfold(input, kernel_size, dilation=1, padding=0, stride=1):
|
|
|
767
1010
|
return unfold_impl(input, converted_kernel_size, converted_dilation, converted_padding, converted_stride)
|
|
768
1011
|
|
|
769
1012
|
|
|
1013
|
+
def index_add(input, dim, index, source, alpha=1):
|
|
1014
|
+
r"""
|
|
1015
|
+
Accumulate the elements of `alpha` times `source` into the `input` by adding to the index in the order given in `index`. For example, if ``dim == 0`` , ``index[i] == j`` , and ``alpha = -1`` , then the `i` th row of `source` is subtracted from the `j` th row of `input` . The `dim` th dimension of `source` must have the same size as the length of `index` , and all other dimensions must match `input`, or an error will be raised. For a 3-D tensor, the output is defined as follows:
|
|
1016
|
+
|
|
1017
|
+
.. math::
|
|
1018
|
+
\begin{array}{ll}
|
|
1019
|
+
input[index[i],\ :,\ :]\ +=\ alpha * source[i,\ :,\ :] \qquad \#if\ dim == 0 \\
|
|
1020
|
+
input[:,\ \ index[i],\ :]\ +=\ alpha * source[:,\ \ i,\ :] \qquad \#if\ dim == 1 \\
|
|
1021
|
+
input[:,\ :,\ \ index[i]]\ +=\ alpha * source[:,\ :,\ \ i] \qquad\#if\ dim == 2 \\
|
|
1022
|
+
\end{array}
|
|
1023
|
+
|
|
1024
|
+
.. warning::
|
|
1025
|
+
This is an experimental API that is subject to change or deletion.
|
|
1026
|
+
|
|
1027
|
+
Args:
|
|
1028
|
+
input (Tensor): The input Tensor.
|
|
1029
|
+
dim (int): The dimension along which to index.
|
|
1030
|
+
index (Tensor): Add the value of "input Tensor" and `source` along the dimension of the `dim` according to the specified index value, with data type int32. The `index` must be 1D with the same size as the size of `source` in the `dim` dimension. The values of `index` should be in [0, b), where the b is the size of "input Tensor" in the `dim` dimension.
|
|
1031
|
+
source (Tensor): The input tensor with the value to add. Must have same data type as "input Tensor". The shape must be the same as "input Tensor" except the `dim` th dimension.
|
|
1032
|
+
alpha (number, optional): The scalar multiplier for source. Default: ``1``.
|
|
1033
|
+
|
|
1034
|
+
Returns:
|
|
1035
|
+
Tensor, has the same shape and dtype as `input`.
|
|
1036
|
+
|
|
1037
|
+
Raises:
|
|
1038
|
+
TypeError: If neither `index` nor `source` is a Tensor.
|
|
1039
|
+
ValueError: If the value of `dim` is out of the dimension range of `source` shape.
|
|
1040
|
+
ValueError: If `index` rank is not the same as `source` rank.
|
|
1041
|
+
ValueError: If shape of `index` is not 1D or size of `index` is not equal to dimension of source[dim].
|
|
1042
|
+
ValueError: If the shape of `source` is not the same as that of `input` except the `dim` axis.
|
|
1043
|
+
|
|
1044
|
+
Supported Platforms:
|
|
1045
|
+
``Ascend``
|
|
1046
|
+
|
|
1047
|
+
Examples:
|
|
1048
|
+
>>> import numpy as np
|
|
1049
|
+
>>> import mindspore
|
|
1050
|
+
>>> from mindspore import Tensor, ops
|
|
1051
|
+
>>> x = Tensor(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), mindspore.float32)
|
|
1052
|
+
>>> index = Tensor(np.array([0, 2]), mindspore.int32)
|
|
1053
|
+
>>> y = Tensor(np.array([[0.5, 1.0], [1.0, 1.5], [2.0, 2.5]]), mindspore.float32)
|
|
1054
|
+
>>> output = ops.auto_generate.index_add_ext(x, 1, index, y, alpha=1)
|
|
1055
|
+
>>> print(output)
|
|
1056
|
+
[[ 1.5 2. 4. ]
|
|
1057
|
+
[ 5. 5. 7.5]
|
|
1058
|
+
[ 9. 8. 11.5]]
|
|
1059
|
+
"""
|
|
1060
|
+
return index_add_impl(input, dim, index, source, alpha)
|
|
1061
|
+
|
|
1062
|
+
|
|
770
1063
|
def index_select(input, dim, index):
|
|
771
1064
|
r"""
|
|
772
1065
|
Generates a new Tensor that accesses the values of `input` along the specified `dim` dimension
|
|
@@ -813,18 +1106,55 @@ def index_select(input, dim, index):
|
|
|
813
1106
|
return index_select_impl(input, dim, index)
|
|
814
1107
|
|
|
815
1108
|
|
|
1109
|
+
def inplace_adds(input, other, alpha=1):
|
|
1110
|
+
r"""
|
|
1111
|
+
None
|
|
1112
|
+
"""
|
|
1113
|
+
return inplace_adds_impl(input, other, alpha)
|
|
1114
|
+
|
|
1115
|
+
|
|
816
1116
|
def inplace_add(input, other, alpha=1):
|
|
817
1117
|
r"""
|
|
818
1118
|
None
|
|
819
1119
|
"""
|
|
820
|
-
return inplace_add_impl(input, other, alpha)
|
|
1120
|
+
return inplace_add_impl(input, other, alpha)
|
|
1121
|
+
|
|
1122
|
+
|
|
1123
|
+
def sub_tensor_(input, other, alpha=1):
|
|
1124
|
+
r"""
|
|
1125
|
+
None
|
|
1126
|
+
"""
|
|
1127
|
+
return sub_tensor_impl(input, other, alpha)
|
|
821
1128
|
|
|
822
1129
|
|
|
823
|
-
def
|
|
1130
|
+
def isneginf(input):
|
|
824
1131
|
r"""
|
|
825
|
-
|
|
1132
|
+
Determines which elements are -inf for each position.
|
|
1133
|
+
|
|
1134
|
+
.. warning::
|
|
1135
|
+
- This API can be used only on the Atlas A2 training series.
|
|
1136
|
+
|
|
1137
|
+
Args:
|
|
1138
|
+
input (Tensor): Input Tensor.
|
|
1139
|
+
|
|
1140
|
+
Returns:
|
|
1141
|
+
Tensor with the same shape as the input, where elements are `True` if the corresponding element in the `input` is negative infinity, and `False` otherwise.
|
|
1142
|
+
|
|
1143
|
+
Raises:
|
|
1144
|
+
TypeError: If the input is not a tensor.
|
|
1145
|
+
|
|
1146
|
+
Supported Platforms:
|
|
1147
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1148
|
+
|
|
1149
|
+
Examples:
|
|
1150
|
+
>>> from mindspore import ops, Tensor
|
|
1151
|
+
>>> from mindspore import dtype as mstype
|
|
1152
|
+
>>> output = ops.isneginf(Tensor([[-float("inf"), float("inf")], [1, -float("inf")]], mstype.float32))
|
|
1153
|
+
>>> print(output)
|
|
1154
|
+
[[ True False]
|
|
1155
|
+
[False True]]
|
|
826
1156
|
"""
|
|
827
|
-
return
|
|
1157
|
+
return isneginf_impl(input)
|
|
828
1158
|
|
|
829
1159
|
|
|
830
1160
|
def l1_loss(input, target, reduction='mean'):
|
|
@@ -908,7 +1238,7 @@ def leaky_relu(input, negative_slope=0.01):
|
|
|
908
1238
|
|
|
909
1239
|
Args:
|
|
910
1240
|
input (Tensor): The input of leaky_relu is a Tensor of any dimension.
|
|
911
|
-
negative_slope (Union[int, float]): Slope of the activation function when the element of `input` is less than 0.
|
|
1241
|
+
negative_slope (Union[int, float], optional): Slope of the activation function when the element of `input` is less than 0.
|
|
912
1242
|
Default: ``0.01`` .
|
|
913
1243
|
|
|
914
1244
|
Returns:
|
|
@@ -933,32 +1263,65 @@ def leaky_relu(input, negative_slope=0.01):
|
|
|
933
1263
|
return leaky_relu_impl(input, negative_slope)
|
|
934
1264
|
|
|
935
1265
|
|
|
936
|
-
def
|
|
1266
|
+
def log10(input):
|
|
937
1267
|
r"""
|
|
938
|
-
|
|
939
|
-
Supposes a slice in the given axis, :math:`x` for each element :math:`x_i`,
|
|
940
|
-
the Log Softmax function is shown as follows:
|
|
1268
|
+
Returns the logarithm to the base 10 of a tensor element-wise.
|
|
941
1269
|
|
|
942
1270
|
.. math::
|
|
943
|
-
|
|
1271
|
+
y_i = \log_{10}(x_i)
|
|
944
1272
|
|
|
945
|
-
|
|
1273
|
+
.. warning::
|
|
1274
|
+
- This is an experimental API that is subject to change or deletion.
|
|
1275
|
+
- If the input value of operator Log10 is within the range (0, 0.01] or [0.95, 1.05], the output accuracy
|
|
1276
|
+
may be affacted.
|
|
946
1277
|
|
|
947
1278
|
Args:
|
|
948
|
-
input (Tensor): The
|
|
949
|
-
dim (int, optional): The axis to perform the Log softmax operation. Default: ``None`` .
|
|
1279
|
+
input (Tensor): Input Tensor of any dimension. The value must be greater than 0.
|
|
950
1280
|
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
1281
|
+
Returns:
|
|
1282
|
+
Tensor, has the same shape as the `input`, and the dtype changes according to the `input.dtype`.
|
|
1283
|
+
|
|
1284
|
+
- if `input.dtype` is in [float16, float32, float64, bfloat16], the output dtype is the same as the `input.dtype`.
|
|
1285
|
+
- if `input.dtype` is integer or boolean type, the output dtype is float32.
|
|
1286
|
+
|
|
1287
|
+
Raises:
|
|
1288
|
+
TypeError: If `input` is not a Tensor.
|
|
1289
|
+
|
|
1290
|
+
Supported Platforms:
|
|
1291
|
+
``Ascend``
|
|
1292
|
+
|
|
1293
|
+
Examples:
|
|
1294
|
+
>>> import mindspore
|
|
1295
|
+
>>> import numpy as np
|
|
1296
|
+
>>> from mindspore import Tensor, ops
|
|
1297
|
+
>>> x = Tensor(np.array([3.0, 5.0, 7.0]), mindspore.float32)
|
|
1298
|
+
>>> output = ops.auto_generate.log10_ext(x)
|
|
1299
|
+
>>> print(output)
|
|
1300
|
+
[0.47712136 0.69897 0.845098 ]
|
|
1301
|
+
"""
|
|
1302
|
+
return log10_impl(input)
|
|
1303
|
+
|
|
1304
|
+
|
|
1305
|
+
def log2(input):
|
|
1306
|
+
r"""
|
|
1307
|
+
Returns the logarithm to the base 2 of a tensor element-wise.
|
|
1308
|
+
|
|
1309
|
+
.. math::
|
|
1310
|
+
y_i = \log_2(x_i)
|
|
1311
|
+
|
|
1312
|
+
.. warning::
|
|
1313
|
+
- If the input value of operator Log2 is within the range (0, 0.01] or [0.95, 1.05], the output accuracy
|
|
1314
|
+
may be affacted.
|
|
1315
|
+
|
|
1316
|
+
Args:
|
|
1317
|
+
input (Tensor): Input Tensor of any dimension. The value must be greater than 0.
|
|
955
1318
|
|
|
956
1319
|
Returns:
|
|
957
|
-
Tensor,
|
|
1320
|
+
Tensor, has the same shape as the `input`. If `input.dtype` is of integer or boolean type, the output dtype
|
|
1321
|
+
will be float32. Otherwise, the output dtype will be the same as `input.dtype`.
|
|
958
1322
|
|
|
959
1323
|
Raises:
|
|
960
|
-
TypeError: If `
|
|
961
|
-
ValueError: If `dim` is not in range [-len(input.shape), len(input.shape)).
|
|
1324
|
+
TypeError: If `input` is not a Tensor.
|
|
962
1325
|
|
|
963
1326
|
Supported Platforms:
|
|
964
1327
|
``Ascend``
|
|
@@ -967,12 +1330,12 @@ def log_softmax(input, dim=None, dtype=None):
|
|
|
967
1330
|
>>> import mindspore
|
|
968
1331
|
>>> import numpy as np
|
|
969
1332
|
>>> from mindspore import Tensor, ops
|
|
970
|
-
>>>
|
|
971
|
-
>>> output = ops.auto_generate.
|
|
1333
|
+
>>> x = Tensor(np.array([3.0, 5.0, 7.0]), mindspore.float32)
|
|
1334
|
+
>>> output = ops.auto_generate.log2_ext(x)
|
|
972
1335
|
>>> print(output)
|
|
973
|
-
[
|
|
1336
|
+
[1.5849625 2.321928 2.807355 ]
|
|
974
1337
|
"""
|
|
975
|
-
return
|
|
1338
|
+
return log2_impl(input)
|
|
976
1339
|
|
|
977
1340
|
|
|
978
1341
|
def logaddexp(input, other):
|
|
@@ -992,7 +1355,7 @@ def logaddexp(input, other):
|
|
|
992
1355
|
input (Tensor): Input Tensor. The dtype of `input` must be float.
|
|
993
1356
|
other (Tensor): Input Tensor. The dtype of `other` must be float.
|
|
994
1357
|
If the shape of `input` is not equal to the shape of `other`,
|
|
995
|
-
they must be broadcastable to a common shape
|
|
1358
|
+
they must be broadcastable to a common shape.
|
|
996
1359
|
|
|
997
1360
|
Returns:
|
|
998
1361
|
Tensor, with the same dtype as `input` and `other`.
|
|
@@ -1016,11 +1379,100 @@ def logaddexp(input, other):
|
|
|
1016
1379
|
return logaddexp_impl(input, other)
|
|
1017
1380
|
|
|
1018
1381
|
|
|
1019
|
-
def
|
|
1382
|
+
def logsumexp(input, dim, keepdim=False):
|
|
1383
|
+
r"""
|
|
1384
|
+
Computes the logarithm of the sum of exponentiations of all elements along the specified `dim` dimension of the `input` (with numerical stabilization), and retains the dimension based on the `keepdim` parameter.
|
|
1385
|
+
|
|
1386
|
+
.. math::
|
|
1387
|
+
|
|
1388
|
+
logsumexp(input) = \log(\sum(e^{input-input_{max}})) + input_{max}
|
|
1389
|
+
|
|
1390
|
+
.. warning::
|
|
1391
|
+
This is an experimental API that is subject to change or deletion.
|
|
1392
|
+
|
|
1393
|
+
Args:
|
|
1394
|
+
input (Tensor): Input Tensor.
|
|
1395
|
+
dim (Union[int, tuple(int), list(int)], optional): The dimension to be reduced (the value should be within `[0, len(input.shape) - 1]`), when the `dim` is `()`, all dimensions are reduced.
|
|
1396
|
+
keepdim (bool, optional): Whether the output tensor retains the dimension `dim`, default: `False`.
|
|
1397
|
+
|
|
1398
|
+
Returns:
|
|
1399
|
+
Tensor, the dtype changes according to the `input.dtype`, and the shape changes according to the values of `dim` and `keepdim`.
|
|
1400
|
+
|
|
1401
|
+
- If `input.dtype` is in [float16, float32, bfloat16], the output dtype is the same as the `input.dtype`.
|
|
1402
|
+
- If `input.dtype` is an integer or boolean type, the output dtype is float32.
|
|
1403
|
+
- If `dim` is (), and `keepdim` is False, the output is a 0-D tensor representing the logarithm of the sum of exponentiations of all elements in the `input` tensor.
|
|
1404
|
+
- If `dim` is `1`, and `keepdim` is False, the shape of output is :math:`(input.shape[0], input.shape[2], ..., input.shape[n])`.
|
|
1405
|
+
- If `dim` is `(1, 2)`, and `keepdim` is False, the shape of output is :math:`(input.shape[0], input.shape[3], ..., input.shape[n])`.
|
|
1406
|
+
|
|
1407
|
+
Raises:
|
|
1408
|
+
TypeError: If `input` is not a Tensor.
|
|
1409
|
+
TypeError: If dtype of `input` is not one of: bool, int8, int16, int32, int64, uint8, float16, float32, bfloat16.
|
|
1410
|
+
TypeError: If `dim` is not an int or tuple(int) or list(list).
|
|
1411
|
+
TypeError: If `keepdim` is not a bool.
|
|
1412
|
+
ValueError: If the value of any elements of `dim` is not in the range `[0, len(input.shape) - 1]`.
|
|
1413
|
+
RuntimeError: If any element of `dim` is repeated.
|
|
1414
|
+
|
|
1415
|
+
Supported Platforms:
|
|
1416
|
+
``Ascend``
|
|
1417
|
+
|
|
1418
|
+
Examples:
|
|
1419
|
+
>>> import numpy as np
|
|
1420
|
+
>>> from mindspore import Tensor, ops
|
|
1421
|
+
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
|
|
1422
|
+
>>> output = ops.auto_generate.logsumexp_ext(x, 1, keepdim=True)
|
|
1423
|
+
>>> print(output.shape)
|
|
1424
|
+
(3, 1, 5, 6)
|
|
1425
|
+
"""
|
|
1426
|
+
return logsumexp_impl(input, dim, keepdim)
|
|
1427
|
+
|
|
1428
|
+
|
|
1429
|
+
def log_softmax(input, dim=None, dtype=None):
|
|
1430
|
+
r"""
|
|
1431
|
+
Applies the Log Softmax function to the input tensor on the specified axis.
|
|
1432
|
+
Supposes a slice in the given axis, :math:`x` for each element :math:`x_i`,
|
|
1433
|
+
the Log Softmax function is shown as follows:
|
|
1434
|
+
|
|
1435
|
+
.. math::
|
|
1436
|
+
\text{output}(x_i) = \log \left(\frac{\exp(x_i)} {\sum_{j = 0}^{N-1}\exp(x_j)}\right),
|
|
1437
|
+
|
|
1438
|
+
where :math:`N` is the length of the Tensor.
|
|
1439
|
+
|
|
1440
|
+
Args:
|
|
1441
|
+
input (Tensor): The input Tensor.
|
|
1442
|
+
dim (int, optional): The axis to perform the Log softmax operation. Default: ``None`` .
|
|
1443
|
+
|
|
1444
|
+
Keyword Args:
|
|
1445
|
+
dtype (:class:`mindspore.dtype`, optional): The desired dtype of returned Tensor. If not set to None, the input
|
|
1446
|
+
Tensor will be cast to `dtype` before the operation is performed. This is useful for preventing overflows.
|
|
1447
|
+
If set to None, stay the same as original Tensor. Default: ``None`` . Supported data type is {float16, float32, double, bfloat16}.
|
|
1448
|
+
|
|
1449
|
+
Returns:
|
|
1450
|
+
Tensor, with the same shape as the input.
|
|
1451
|
+
|
|
1452
|
+
Raises:
|
|
1453
|
+
TypeError: If `dim` is not an int.
|
|
1454
|
+
ValueError: If `dim` is not in range [-len(input.shape), len(input.shape)).
|
|
1455
|
+
|
|
1456
|
+
Supported Platforms:
|
|
1457
|
+
``Ascend``
|
|
1458
|
+
|
|
1459
|
+
Examples:
|
|
1460
|
+
>>> import mindspore
|
|
1461
|
+
>>> import numpy as np
|
|
1462
|
+
>>> from mindspore import Tensor, ops
|
|
1463
|
+
>>> logits = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
|
|
1464
|
+
>>> output = ops.auto_generate.log_softmax(logits, dim=-1)
|
|
1465
|
+
>>> print(output)
|
|
1466
|
+
[-4.4519143 -3.4519143 -2.4519143 -1.4519144 -0.4519144]
|
|
1467
|
+
"""
|
|
1468
|
+
return log_softmax_impl(input, dim, dtype)
|
|
1469
|
+
|
|
1470
|
+
|
|
1471
|
+
def matmul(input, other):
|
|
1020
1472
|
r"""
|
|
1021
1473
|
None
|
|
1022
1474
|
"""
|
|
1023
|
-
return matmul_impl(input,
|
|
1475
|
+
return matmul_impl(input, other)
|
|
1024
1476
|
|
|
1025
1477
|
|
|
1026
1478
|
def matrix_inverse(input):
|
|
@@ -1053,42 +1505,93 @@ def matrix_inverse(input):
|
|
|
1053
1505
|
return matrix_inverse_impl(input)
|
|
1054
1506
|
|
|
1055
1507
|
|
|
1056
|
-
def
|
|
1508
|
+
def max_unpool2d(input, indices, kernel_size, stride=None, padding=0, output_size=None):
|
|
1509
|
+
r"""
|
|
1510
|
+
Computes the inverse of `max_pool2d`.
|
|
1511
|
+
|
|
1512
|
+
`max_unpool2d` keeps the maximal value and set all position of non-maximal values to zero. Typically the input is of shape :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`, and the output is of shape :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`. The operation is as follows.
|
|
1513
|
+
|
|
1514
|
+
.. math::
|
|
1515
|
+
\begin{array}{ll} \\
|
|
1516
|
+
H_{out} = (H_{in} - 1) \times stride[0] - 2 \times padding[0] + kernel\_size[0] \\
|
|
1517
|
+
W_{out} = (W_{in} - 1) \times stride[1] - 2 \times padding[1] + kernel\_size[1] \\
|
|
1518
|
+
\end{array}
|
|
1519
|
+
|
|
1520
|
+
.. warning::
|
|
1521
|
+
This is an experimental API that is subject to change or deletion.
|
|
1522
|
+
|
|
1523
|
+
Args:
|
|
1524
|
+
input (Tensor): The input Tensor to invert. Tensor of shape :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
|
|
1525
|
+
indices (Tensor): Max values' index represented by the indices. Tensor of shape must be same with input 'input'. Values of indices must belong to :math:`[0, H_{in} \times W_{in} - 1]`. Data type must be in int32 or int64.
|
|
1526
|
+
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value, an int number that represents height and width of the kernel, or a tuple of two int numbers that represent height and width respectively.
|
|
1527
|
+
stride (Union[int, tuple[int]], optional): The distance of kernel moving, an int number that represents the height and width of movement are both stride, or a tuple of two int numbers that represent height and width of movement respectively. Default: ``None`` , which indicates the moving step is `kernel_size` .
|
|
1528
|
+
padding (Union[int, tuple[int]], optional): The pad value to be filled. Default: ``0`` . If `padding` is an integer, the paddings of height and width are the same, equal to padding. If `padding` is a tuple of two integers, the padding of height and width equal to padding[0] and padding[1] correspondingly.
|
|
1529
|
+
output_size (tuple[int], optional): The target output size. Default: ``None`` . If output_size == (), then the shape of output computed by `kernel_size`, `stride` and `padding`. If output_size != (), then output_size must be :math:`(N, C, H, W)` , :math:`(C, H, W)` or :math:`(H, W)` and output_size must belong to :math:`[(N, C, H_{out} - stride[0], W_{out} - stride[1]), (N, C, H_{out} + stride[0], W_{out} + stride[1])]`.
|
|
1530
|
+
|
|
1531
|
+
Returns:
|
|
1532
|
+
Tensor, with shape :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, with the same data type with `input`.
|
|
1533
|
+
|
|
1534
|
+
Raises:
|
|
1535
|
+
TypeError: If data type of `input` or `indices` is not supported.
|
|
1536
|
+
TypeError: If `kernel_size`, `stride` or `padding` is neither an int nor a tuple.
|
|
1537
|
+
ValueError: If numbers in `stride`, `padding` or `kernel_size` are not positive.
|
|
1538
|
+
ValueError: If the shapes of `input` and `indices` are different.
|
|
1539
|
+
ValueError: If the length of `input` is not 3 or 4.
|
|
1540
|
+
ValueError: If the type of `output_size` is not tuple.
|
|
1541
|
+
ValueError: If `output_size` is not close to output size computed by attr `kernel_size`, `stride`, `padding`.
|
|
1542
|
+
|
|
1543
|
+
Supported Platforms:
|
|
1544
|
+
``Ascend``
|
|
1545
|
+
|
|
1546
|
+
Examples:
|
|
1547
|
+
>>> import numpy as np
|
|
1548
|
+
>>> from mindspore import Tensor, ops
|
|
1549
|
+
>>> input = Tensor(np.array([[[[0, 1], [8, 9]]]]).astype(np.float32))
|
|
1550
|
+
>>> indices = Tensor(np.array([[[[0, 1], [2, 3]]]]).astype(np.int64))
|
|
1551
|
+
>>> output = ops.max_unpool2d_ext(input, indices, 1, stride=1, padding=0)
|
|
1552
|
+
>>> print(output.asnumpy())
|
|
1553
|
+
[[[[0. 1.]
|
|
1554
|
+
[8. 9.]]]]
|
|
1555
|
+
"""
|
|
1556
|
+
return max_unpool2d_impl(input, indices, kernel_size, stride, padding, output_size)
|
|
1557
|
+
|
|
1558
|
+
|
|
1559
|
+
def mean(input, dim=None, keepdim=False, dtype=None):
|
|
1057
1560
|
r"""
|
|
1058
1561
|
Reduces all dimension of a tensor by averaging all elements in the dimension, by default.
|
|
1059
|
-
And reduce a dimension of `input` along the specified `
|
|
1562
|
+
And reduce a dimension of `input` along the specified `dim`. `keepdim`
|
|
1060
1563
|
determines whether the dimensions of the output and input are the same.
|
|
1061
1564
|
|
|
1062
1565
|
Note:
|
|
1063
|
-
The `
|
|
1566
|
+
The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
1064
1567
|
|
|
1065
1568
|
Args:
|
|
1066
1569
|
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
1067
1570
|
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
1068
|
-
|
|
1571
|
+
dim (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. Default: ``None`` ,
|
|
1069
1572
|
reduce all dimensions. Only constant value is allowed. Assume the rank of `input` is r,
|
|
1070
1573
|
and the value range is [-r,r).
|
|
1071
|
-
|
|
1574
|
+
keepdim (bool): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
1072
1575
|
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
1073
1576
|
dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
|
|
1074
1577
|
|
|
1075
1578
|
Returns:
|
|
1076
1579
|
Tensor, has the same data type as input tensor.
|
|
1077
1580
|
|
|
1078
|
-
- If `
|
|
1581
|
+
- If `dim` is ``None`` , and `keepdim` is ``False`` ,
|
|
1079
1582
|
the output is a 0-D tensor representing the product of all elements in the input tensor.
|
|
1080
|
-
- If `
|
|
1583
|
+
- If `dim` is int, set as 1, and `keepdim` is ``False`` ,
|
|
1081
1584
|
the shape of output is :math:`(x_0, x_2, ..., x_R)`.
|
|
1082
|
-
- If `
|
|
1585
|
+
- If `dim` is tuple(int), set as (1, 2), and `keepdim` is ``False`` ,
|
|
1083
1586
|
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
|
|
1084
|
-
- If `
|
|
1587
|
+
- If `dim` is 1-D Tensor, set as [1, 2], and `keepdim` is ``False`` ,
|
|
1085
1588
|
the shape of output is :math:`(x_0, x_3, ..., x_R)`.
|
|
1086
1589
|
|
|
1087
1590
|
Raises:
|
|
1088
1591
|
TypeError: If `x` is not a Tensor.
|
|
1089
|
-
TypeError: If `
|
|
1090
|
-
TypeError: If `
|
|
1091
|
-
ValueError: If `
|
|
1592
|
+
TypeError: If `dim` is not one of the following: int, tuple, list or Tensor.
|
|
1593
|
+
TypeError: If `keepdim` is not a bool.
|
|
1594
|
+
ValueError: If `dim` is out of range.
|
|
1092
1595
|
|
|
1093
1596
|
Supported Platforms:
|
|
1094
1597
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1098,7 +1601,7 @@ def mean(input, axis=None, keep_dims=False, dtype=None):
|
|
|
1098
1601
|
>>> import numpy as np
|
|
1099
1602
|
>>> from mindspore import Tensor, ops
|
|
1100
1603
|
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
|
|
1101
|
-
>>> output = ops.
|
|
1604
|
+
>>> output = ops.mean_ext(x, 1, keepdim=True)
|
|
1102
1605
|
>>> result = output.shape
|
|
1103
1606
|
>>> print(result)
|
|
1104
1607
|
(3, 1, 5, 6)
|
|
@@ -1107,25 +1610,25 @@ def mean(input, axis=None, keep_dims=False, dtype=None):
|
|
|
1107
1610
|
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
1108
1611
|
... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
|
|
1109
1612
|
... mindspore.float32)
|
|
1110
|
-
>>> output = ops.
|
|
1613
|
+
>>> output = ops.mean_ext(x)
|
|
1111
1614
|
>>> print(output)
|
|
1112
1615
|
5.0
|
|
1113
1616
|
>>> print(output.shape)
|
|
1114
1617
|
()
|
|
1115
|
-
>>> # case 2: Reduces a dimension along the
|
|
1116
|
-
>>> output = ops.
|
|
1618
|
+
>>> # case 2: Reduces a dimension along the dim 0
|
|
1619
|
+
>>> output = ops.mean_ext(x, 0, True)
|
|
1117
1620
|
>>> print(output)
|
|
1118
1621
|
[[[4. 4. 4. 4. 4. 4.]
|
|
1119
1622
|
[5. 5. 5. 5. 5. 5.]
|
|
1120
1623
|
[6. 6. 6. 6. 6. 6.]]]
|
|
1121
|
-
>>> # case 3: Reduces a dimension along the
|
|
1122
|
-
>>> output = ops.
|
|
1624
|
+
>>> # case 3: Reduces a dimension along the dim 1
|
|
1625
|
+
>>> output = ops.mean_ext(x, 1, True)
|
|
1123
1626
|
>>> print(output)
|
|
1124
1627
|
[[[2. 2. 2. 2. 2. 2.]]
|
|
1125
1628
|
[[5. 5. 5. 5. 5. 5.]]
|
|
1126
1629
|
[[8. 8. 8. 8. 8. 8.]]]
|
|
1127
|
-
>>> # case 4: Reduces a dimension along the
|
|
1128
|
-
>>> output = ops.
|
|
1630
|
+
>>> # case 4: Reduces a dimension along the dim 2
|
|
1631
|
+
>>> output = ops.mean_ext(x, 2, True)
|
|
1129
1632
|
>>> print(output)
|
|
1130
1633
|
[[[ 2.]
|
|
1131
1634
|
[ 2.]
|
|
@@ -1137,7 +1640,7 @@ def mean(input, axis=None, keep_dims=False, dtype=None):
|
|
|
1137
1640
|
[ 8.]
|
|
1138
1641
|
[10.]]]
|
|
1139
1642
|
"""
|
|
1140
|
-
return mean_impl(input,
|
|
1643
|
+
return mean_impl(input, dim, keepdim, dtype)
|
|
1141
1644
|
|
|
1142
1645
|
|
|
1143
1646
|
def mish(input):
|
|
@@ -1186,6 +1689,50 @@ def mish(input):
|
|
|
1186
1689
|
return mish_impl(input)
|
|
1187
1690
|
|
|
1188
1691
|
|
|
1692
|
+
def mm(input, mat2):
|
|
1693
|
+
r"""
|
|
1694
|
+
Returns the matrix product of two arrays.
|
|
1695
|
+
If `input` is a :math:`(n \times m)` Tensor, `mat2` is a
|
|
1696
|
+
:math:`(m \times p)` Tensor, `out` will be a :math:`(n \times p)` Tensor.
|
|
1697
|
+
|
|
1698
|
+
Note:
|
|
1699
|
+
This function cannot support broadcasting.
|
|
1700
|
+
Refer to :func:`mindspore.ops.matmul` instead if you need a broadcastable function.
|
|
1701
|
+
|
|
1702
|
+
.. warning::
|
|
1703
|
+
This is an experimental API that is subject to change or deletion.
|
|
1704
|
+
|
|
1705
|
+
Args:
|
|
1706
|
+
input (Tensor): The first matrix of matrix multiplication.
|
|
1707
|
+
The last dimension of `input` must be the same size as the first dimension of `mat2`.
|
|
1708
|
+
mat2 (Tensor): The second matrix of matrix multiplication.
|
|
1709
|
+
The last dimension of `input` must be the same size as the first dimension of `mat2`.
|
|
1710
|
+
|
|
1711
|
+
Returns:
|
|
1712
|
+
Tensor, the matrix product of the inputs.
|
|
1713
|
+
|
|
1714
|
+
Raises:
|
|
1715
|
+
ValueError: If the last dimension of `input` is not the same size as the
|
|
1716
|
+
second-to-last dimension of `mat2`.
|
|
1717
|
+
TypeError: If `input` or `mat2` is not a Tensor.
|
|
1718
|
+
TypeError: If dtype of `input` or `mat2` is not float16, float32 or bfloat16.
|
|
1719
|
+
|
|
1720
|
+
Supported Platforms:
|
|
1721
|
+
``Ascend``
|
|
1722
|
+
|
|
1723
|
+
Examples:
|
|
1724
|
+
>>> import mindspore as ms
|
|
1725
|
+
>>> from mindspore import ops
|
|
1726
|
+
>>> import numpy as np
|
|
1727
|
+
>>> x1 = ms.Tensor(np.random.rand(2, 3), ms.float32)
|
|
1728
|
+
>>> x2 = ms.Tensor(np.random.rand(3, 4), ms.float32)
|
|
1729
|
+
>>> out = ops.mm_ext(x1, x2)
|
|
1730
|
+
>>> print(out.shape)
|
|
1731
|
+
(2, 4)
|
|
1732
|
+
"""
|
|
1733
|
+
return mm_impl(input, mat2)
|
|
1734
|
+
|
|
1735
|
+
|
|
1189
1736
|
def mse_loss(input, target, reduction='mean'):
|
|
1190
1737
|
r"""
|
|
1191
1738
|
Calculates the mean squared error between the predicted value and the label value.
|
|
@@ -1272,34 +1819,34 @@ def outer(input, vec2):
|
|
|
1272
1819
|
return outer_impl(input, vec2)
|
|
1273
1820
|
|
|
1274
1821
|
|
|
1275
|
-
def prod(input,
|
|
1822
|
+
def prod(input, dim=None, keepdim=False, dtype=None):
|
|
1276
1823
|
r"""
|
|
1277
1824
|
Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
|
|
1278
|
-
reduce a dimension of `input` along the `
|
|
1279
|
-
same by controlling `
|
|
1825
|
+
reduce a dimension of `input` along the `dim`. Determine whether the dimensions of the output and input are the
|
|
1826
|
+
same by controlling `keepdim`.
|
|
1280
1827
|
|
|
1281
1828
|
Args:
|
|
1282
1829
|
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
1283
1830
|
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
1284
|
-
|
|
1831
|
+
dim (int): The dimensions to reduce. Default: ``None`` , reduce all dimensions.
|
|
1285
1832
|
Only constant value is allowed. Assume the rank of `input` is r, and the value range is [-r,r).
|
|
1286
|
-
|
|
1833
|
+
keepdim (bool): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
1287
1834
|
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
1288
1835
|
dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
|
|
1289
1836
|
|
|
1290
1837
|
Returns:
|
|
1291
1838
|
Tensor, has the same data type as input tensor.
|
|
1292
1839
|
|
|
1293
|
-
- If `
|
|
1840
|
+
- If `dim` is ``None`` , and `keepdim` is ``False`` ,
|
|
1294
1841
|
the output is a 0-D tensor representing the product of all elements in the input tensor.
|
|
1295
|
-
- If `
|
|
1842
|
+
- If `dim` is int, set as 1, and `keepdim` is ``False`` ,
|
|
1296
1843
|
the shape of output is :math:`(input_0, input_2, ..., input_R)`.
|
|
1297
1844
|
|
|
1298
1845
|
Raises:
|
|
1299
1846
|
TypeError: If `input` is not a Tensor.
|
|
1300
|
-
TypeError: If `
|
|
1301
|
-
TypeError: If `
|
|
1302
|
-
ValueError: If `
|
|
1847
|
+
TypeError: If `dim` is not one of the following: int or None.
|
|
1848
|
+
TypeError: If `keepdim` is not a bool.
|
|
1849
|
+
ValueError: If `dim` is out of range.
|
|
1303
1850
|
|
|
1304
1851
|
Supported Platforms:
|
|
1305
1852
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1309,7 +1856,7 @@ def prod(input, axis=None, keep_dims=False, dtype=None):
|
|
|
1309
1856
|
>>> import numpy as np
|
|
1310
1857
|
>>> from mindspore import Tensor, ops
|
|
1311
1858
|
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
|
|
1312
|
-
>>> output = ops.
|
|
1859
|
+
>>> output = ops.prod_ext(x, 1, keepdim=True)
|
|
1313
1860
|
>>> result = output.shape
|
|
1314
1861
|
>>> print(result)
|
|
1315
1862
|
(3, 1, 5, 6)
|
|
@@ -1317,25 +1864,25 @@ def prod(input, axis=None, keep_dims=False, dtype=None):
|
|
|
1317
1864
|
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
|
|
1318
1865
|
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
1319
1866
|
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
|
|
1320
|
-
>>> output = ops.
|
|
1867
|
+
>>> output = ops.prod_ext(x)
|
|
1321
1868
|
>>> print(output)
|
|
1322
1869
|
2.2833798e+33
|
|
1323
1870
|
>>> print(output.shape)
|
|
1324
1871
|
()
|
|
1325
|
-
>>> # case 2: Reduces a dimension along
|
|
1326
|
-
>>> output = ops.
|
|
1872
|
+
>>> # case 2: Reduces a dimension along dim 0.
|
|
1873
|
+
>>> output = ops.prod_ext(x, 0, True)
|
|
1327
1874
|
>>> print(output)
|
|
1328
1875
|
[[[ 28. 28. 28. 28. 28. 28.]
|
|
1329
1876
|
[ 80. 80. 80. 80. 80. 80.]
|
|
1330
1877
|
[162. 162. 162. 162. 162. 162.]]]
|
|
1331
|
-
>>> # case 3: Reduces a dimension along
|
|
1332
|
-
>>> output = ops.
|
|
1878
|
+
>>> # case 3: Reduces a dimension along dim 1.
|
|
1879
|
+
>>> output = ops.prod_ext(x, 1, True)
|
|
1333
1880
|
>>> print(output)
|
|
1334
1881
|
[[[ 6. 6. 6. 6. 6. 6.]]
|
|
1335
1882
|
[[120. 120. 120. 120. 120. 120.]]
|
|
1336
1883
|
[[504. 504. 504. 504. 504. 504.]]]
|
|
1337
|
-
>>> # case 4: Reduces a dimension along
|
|
1338
|
-
>>> output = ops.
|
|
1884
|
+
>>> # case 4: Reduces a dimension along dim 2.
|
|
1885
|
+
>>> output = ops.prod_ext(x, 2, True)
|
|
1339
1886
|
>>> print(output)
|
|
1340
1887
|
[[[1.00000e+00]
|
|
1341
1888
|
[6.40000e+01]
|
|
@@ -1347,40 +1894,7 @@ def prod(input, axis=None, keep_dims=False, dtype=None):
|
|
|
1347
1894
|
[2.62144e+05]
|
|
1348
1895
|
[5.31441e+05]]]
|
|
1349
1896
|
"""
|
|
1350
|
-
return prod_impl(input,
|
|
1351
|
-
|
|
1352
|
-
|
|
1353
|
-
def select(input, dim, index):
|
|
1354
|
-
r"""
|
|
1355
|
-
Slices the input tensor along the selected dimension at the given index.
|
|
1356
|
-
|
|
1357
|
-
.. warning::
|
|
1358
|
-
This is an experimental API that is subject to change or deletion.
|
|
1359
|
-
|
|
1360
|
-
Args:
|
|
1361
|
-
input (Tensor): the input tensor.
|
|
1362
|
-
dim (int): the dimension to slice.
|
|
1363
|
-
index (int): the index to select with.
|
|
1364
|
-
|
|
1365
|
-
Returns:
|
|
1366
|
-
Tensor.
|
|
1367
|
-
|
|
1368
|
-
Raises:
|
|
1369
|
-
TypeError: If input is not a Tensor.
|
|
1370
|
-
|
|
1371
|
-
Supported Platforms:
|
|
1372
|
-
``Ascend``
|
|
1373
|
-
|
|
1374
|
-
Examples:
|
|
1375
|
-
>>> import mindspore
|
|
1376
|
-
>>> from mindspore import Tensor, mint
|
|
1377
|
-
>>> input = Tensor([[2, 3, 4, 5],[3, 2, 4, 5]])
|
|
1378
|
-
>>> y = mint.select(input, 0, 0)
|
|
1379
|
-
>>> y = Tensor([1,2], mindspore.float32)
|
|
1380
|
-
>>> print(y)
|
|
1381
|
-
[2 3 4 5]
|
|
1382
|
-
"""
|
|
1383
|
-
return select_impl(input, dim, index)
|
|
1897
|
+
return prod_impl(input, dim, keepdim, dtype)
|
|
1384
1898
|
|
|
1385
1899
|
|
|
1386
1900
|
def selu(input):
|
|
@@ -1422,13 +1936,13 @@ def selu(input):
|
|
|
1422
1936
|
|
|
1423
1937
|
Examples:
|
|
1424
1938
|
>>> import mindspore
|
|
1425
|
-
>>> from mindspore import Tensor,
|
|
1939
|
+
>>> from mindspore import Tensor, ops
|
|
1426
1940
|
>>> import numpy as np
|
|
1427
1941
|
>>> input = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
|
|
1428
|
-
>>> output =
|
|
1942
|
+
>>> output = ops.auto_generate.selu_ext(input)
|
|
1429
1943
|
>>> print(output)
|
|
1430
1944
|
[[-1.1113307 4.202804 -1.7575096]
|
|
1431
|
-
|
|
1945
|
+
[ 2.101402 -1.7462534 9.456309 ]]
|
|
1432
1946
|
"""
|
|
1433
1947
|
return selu_impl(input)
|
|
1434
1948
|
|
|
@@ -1494,7 +2008,7 @@ def stack(tensors, dim=0):
|
|
|
1494
2008
|
|
|
1495
2009
|
Args:
|
|
1496
2010
|
tensors (Union[tuple, list]): A Tuple or list of Tensor objects with the same shape and type.
|
|
1497
|
-
dim (int): Dimension to stack. The range is [-(R+1), R+1). Default: ``0`` .
|
|
2011
|
+
dim (int, optional): Dimension to stack. The range is [-(R+1), R+1). Default: ``0`` .
|
|
1498
2012
|
|
|
1499
2013
|
Returns:
|
|
1500
2014
|
Tensor. A stacked Tensor with the same type as `tensors`.
|
|
@@ -1502,7 +2016,7 @@ def stack(tensors, dim=0):
|
|
|
1502
2016
|
Raises:
|
|
1503
2017
|
TypeError: If the data types of elements in `tensors` are not the same.
|
|
1504
2018
|
ValueError: If `dim` is out of the range [-(R+1), R+1);
|
|
1505
|
-
or if the shapes of elements in tensors are not the same.
|
|
2019
|
+
or if the shapes of elements in `tensors` are not the same.
|
|
1506
2020
|
|
|
1507
2021
|
Supported Platforms:
|
|
1508
2022
|
``Ascend``
|
|
@@ -1577,6 +2091,67 @@ def sub(input, other, alpha=1):
|
|
|
1577
2091
|
return sub_impl(input, other, alpha)
|
|
1578
2092
|
|
|
1579
2093
|
|
|
2094
|
+
def sum(input, dim=None, keepdim=False, dtype=None):
|
|
2095
|
+
r"""
|
|
2096
|
+
Calculate sum of Tensor elements over a given dim.
|
|
2097
|
+
|
|
2098
|
+
Note:
|
|
2099
|
+
The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
2100
|
+
|
|
2101
|
+
Args:
|
|
2102
|
+
input (Tensor): The input tensor.
|
|
2103
|
+
dim (Union[None, int, tuple(int), list(int), Tensor]): Dimensions along which a sum is performed.
|
|
2104
|
+
If ``None`` , sum all the elements of the input tensor.
|
|
2105
|
+
If the `dim` is a tuple or list of ints, a sum is performed on all the dimensions specified in the tuple.
|
|
2106
|
+
Must be in the range :math:`[-input.ndim, input.ndim)` . Default: ``None`` .
|
|
2107
|
+
keepdim (bool): Whether the output tensor has `dim` retained or not.
|
|
2108
|
+
If ``True`` , keep these reduced dimensions and the length is 1.
|
|
2109
|
+
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
2110
|
+
dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
|
|
2111
|
+
|
|
2112
|
+
Returns:
|
|
2113
|
+
A Tensor, sum of elements over a given `dim` in `input`.
|
|
2114
|
+
|
|
2115
|
+
Raises:
|
|
2116
|
+
TypeError: If `input` is not a Tensor.
|
|
2117
|
+
TypeError: If `dim` is not an int, tulpe(int), list(int), Tensor or None.
|
|
2118
|
+
ValueError: If `dim` is not in the range :math:`[-input.ndim, input.ndim)` .
|
|
2119
|
+
TypeError: If `keepdim` is not a bool.
|
|
2120
|
+
|
|
2121
|
+
Supported Platforms:
|
|
2122
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
2123
|
+
|
|
2124
|
+
Examples:
|
|
2125
|
+
>>> import mindspore
|
|
2126
|
+
>>> import numpy as np
|
|
2127
|
+
>>> from mindspore import Tensor, ops
|
|
2128
|
+
>>> from mindspore import dtype as mstype
|
|
2129
|
+
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
|
|
2130
|
+
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
2131
|
+
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mstype.float32)
|
|
2132
|
+
>>> out = ops.sum_ext(x)
|
|
2133
|
+
>>> print(out)
|
|
2134
|
+
270.0
|
|
2135
|
+
>>> out = ops.sum_ext(x, dim=2)
|
|
2136
|
+
>>> print(out)
|
|
2137
|
+
[[ 6. 12. 18.]
|
|
2138
|
+
[24. 30. 36.]
|
|
2139
|
+
[42. 48. 54.]]
|
|
2140
|
+
>>> out = ops.sum_ext(x, dim=2, keepdim=True)
|
|
2141
|
+
>>> print(out)
|
|
2142
|
+
[[[ 6.]
|
|
2143
|
+
[12.]
|
|
2144
|
+
[18.]]
|
|
2145
|
+
[[24.]
|
|
2146
|
+
[30.]
|
|
2147
|
+
[36.]]
|
|
2148
|
+
[[42.]
|
|
2149
|
+
[48.]
|
|
2150
|
+
[54.]]]
|
|
2151
|
+
"""
|
|
2152
|
+
return sum_impl(input, dim, keepdim, dtype)
|
|
2153
|
+
|
|
2154
|
+
|
|
1580
2155
|
def topk(input, k, dim=-1, largest=True, sorted=True):
|
|
1581
2156
|
r"""
|
|
1582
2157
|
Finds values and indices of the `k` largest or smallest entries along a given dimension.
|
|
@@ -1633,7 +2208,7 @@ def topk(input, k, dim=-1, largest=True, sorted=True):
|
|
|
1633
2208
|
(Tensor(shape=[3, 2], dtype=Float32, value=
|
|
1634
2209
|
[[ 9.67299998e-01, 5.36800027e-01],
|
|
1635
2210
|
[ 6.52499974e-01, 4.68499988e-01],
|
|
1636
|
-
[ 9.67499971e-01, 8.23000014e-01]]), Tensor(shape=[3, 2], dtype=
|
|
2211
|
+
[ 9.67499971e-01, 8.23000014e-01]]), Tensor(shape=[3, 2], dtype=Int64, value=
|
|
1637
2212
|
[[3, 0],
|
|
1638
2213
|
[1, 2],
|
|
1639
2214
|
[2, 3]]))
|
|
@@ -1642,7 +2217,7 @@ def topk(input, k, dim=-1, largest=True, sorted=True):
|
|
|
1642
2217
|
(Tensor(shape=[3, 2], dtype=Float32, value=
|
|
1643
2218
|
[[ 2.44700000e-01, 4.30200011e-01],
|
|
1644
2219
|
[ 1.86800003e-01, 4.38800007e-01],
|
|
1645
|
-
[ 3.56299996e-01, 5.15200019e-01]]), Tensor(shape=[3, 2], dtype=
|
|
2220
|
+
[ 3.56299996e-01, 5.15200019e-01]]), Tensor(shape=[3, 2], dtype=Int64, value=
|
|
1646
2221
|
[[1, 2],
|
|
1647
2222
|
[3, 0],
|
|
1648
2223
|
[0, 1]]))
|
|
@@ -1654,9 +2229,6 @@ def trace(input):
|
|
|
1654
2229
|
r"""
|
|
1655
2230
|
Returns a new tensor that is the sum of the `input` main trace.
|
|
1656
2231
|
|
|
1657
|
-
Note:
|
|
1658
|
-
Input must be tensor.
|
|
1659
|
-
|
|
1660
2232
|
Args:
|
|
1661
2233
|
input (Tensor): 2-D Tensor.
|
|
1662
2234
|
|
|
@@ -1691,44 +2263,44 @@ def trace(input):
|
|
|
1691
2263
|
return trace_impl(input)
|
|
1692
2264
|
|
|
1693
2265
|
|
|
1694
|
-
def
|
|
2266
|
+
def tril(input, diagonal=0):
|
|
2267
|
+
r"""
|
|
2268
|
+
None
|
|
2269
|
+
"""
|
|
2270
|
+
return tril_impl(input, diagonal)
|
|
2271
|
+
|
|
2272
|
+
|
|
2273
|
+
def t(input):
|
|
1695
2274
|
r"""
|
|
1696
|
-
|
|
2275
|
+
Transpose the input tensor.
|
|
1697
2276
|
|
|
1698
2277
|
.. warning::
|
|
1699
2278
|
This is an experimental API that is subject to change or deletion.
|
|
1700
2279
|
|
|
1701
2280
|
Args:
|
|
1702
|
-
input(Tensor):
|
|
1703
|
-
dim0 (int): First axis.
|
|
1704
|
-
dim1 (int): Second axis.
|
|
2281
|
+
input (Tensor): The input tensor.
|
|
1705
2282
|
|
|
1706
2283
|
Returns:
|
|
1707
|
-
|
|
2284
|
+
Tensor, transpose 2D tensor, return 1D tensor as it is.
|
|
1708
2285
|
|
|
1709
2286
|
Raises:
|
|
1710
|
-
|
|
1711
|
-
|
|
1712
|
-
|
|
2287
|
+
ValueError: If the dimension of `input` is greater than 2.
|
|
2288
|
+
ValueError: If `input` is empty.
|
|
2289
|
+
TypeError: If `input` is not a tensor.
|
|
1713
2290
|
|
|
1714
2291
|
Supported Platforms:
|
|
1715
2292
|
``Ascend``
|
|
1716
2293
|
|
|
1717
2294
|
Examples:
|
|
2295
|
+
>>> import mindspore
|
|
1718
2296
|
>>> import numpy as np
|
|
1719
|
-
>>> from mindspore import
|
|
1720
|
-
>>>
|
|
1721
|
-
>>>
|
|
1722
|
-
>>> output
|
|
1723
|
-
|
|
1724
|
-
|
|
1725
|
-
|
|
1726
|
-
return transpose_impl(input, dim0, dim1)
|
|
1727
|
-
|
|
1728
|
-
|
|
1729
|
-
def tril(input, diagonal=0):
|
|
1730
|
-
r"""
|
|
1731
|
-
None
|
|
2297
|
+
>>> from mindspore import Tensor, ops
|
|
2298
|
+
>>> input = Tensor(np.array([[1, 2, 3], [4, 5, 6]]), mindspore.float32)
|
|
2299
|
+
>>> output = ops.t_ext(input)
|
|
2300
|
+
>>> print(output)
|
|
2301
|
+
[[ 1. 4.]
|
|
2302
|
+
[ 2. 5.]
|
|
2303
|
+
[ 3. 6.]]
|
|
1732
2304
|
"""
|
|
1733
|
-
return
|
|
2305
|
+
return t_impl(input)
|
|
1734
2306
|
|