mindspore 2.4.10__cp39-cp39-win_amd64.whl → 2.6.0__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +13 -6
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -38
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +83 -0
- mindspore/_extends/parse/deprecated/__init__.py +0 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +394 -0
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +47 -198
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +229 -99
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +17 -12
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +11 -5
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +138 -43
- mindspore/common/__init__.py +6 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -2
- mindspore/common/_stub_tensor.py +30 -14
- mindspore/common/_tensor_cpp_method.py +17 -0
- mindspore/common/_tensor_docs.py +4760 -0
- mindspore/common/api.py +480 -372
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +39 -36
- mindspore/common/dump.py +9 -6
- mindspore/common/file_system.py +9 -1
- mindspore/common/generator.py +5 -0
- mindspore/common/hook_handle.py +6 -2
- mindspore/common/initializer.py +13 -10
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +9 -3
- mindspore/common/mindir_util.py +10 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +135 -52
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +975 -1981
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +52 -2
- mindspore/communication/comm_func.py +240 -181
- mindspore/communication/management.py +95 -26
- mindspore/context.py +324 -573
- mindspore/dataset/__init__.py +65 -37
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/callback/ds_callback.py +2 -1
- mindspore/dataset/core/config.py +87 -6
- mindspore/dataset/engine/cache_admin.py +3 -3
- mindspore/dataset/engine/cache_client.py +6 -5
- mindspore/dataset/engine/datasets.py +292 -267
- mindspore/dataset/engine/datasets_audio.py +22 -8
- mindspore/dataset/engine/datasets_standard_format.py +46 -27
- mindspore/dataset/engine/datasets_text.py +78 -48
- mindspore/dataset/engine/datasets_user_defined.py +183 -117
- mindspore/dataset/engine/datasets_vision.py +120 -44
- mindspore/dataset/engine/iterators.py +283 -63
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/obs/util.py +8 -0
- mindspore/dataset/engine/queue.py +40 -0
- mindspore/dataset/engine/samplers.py +289 -43
- mindspore/dataset/engine/serializer_deserializer.py +3 -2
- mindspore/dataset/engine/validators.py +53 -11
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/py_transforms_util.py +17 -0
- mindspore/dataset/transforms/transforms.py +31 -14
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/dataset/vision/validators.py +1 -2
- mindspore/device_context/__init__.py +21 -0
- mindspore/device_context/ascend/__init__.py +25 -0
- mindspore/device_context/ascend/device.py +72 -0
- mindspore/device_context/ascend/op_debug.py +153 -0
- mindspore/device_context/ascend/op_precision.py +193 -0
- mindspore/device_context/ascend/op_tuning.py +123 -0
- mindspore/{ops_generate/gen_constants.py → device_context/cpu/__init__.py} +6 -17
- mindspore/device_context/cpu/device.py +62 -0
- mindspore/device_context/cpu/op_tuning.py +43 -0
- mindspore/device_context/gpu/__init__.py +21 -0
- mindspore/device_context/gpu/device.py +70 -0
- mindspore/device_context/gpu/op_precision.py +67 -0
- mindspore/device_context/gpu/op_tuning.py +175 -0
- mindspore/device_manager.py +170 -0
- mindspore/dnnl.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/llm_boost/__init__.py +1 -0
- mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +209 -0
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
- mindspore/experimental/llm_boost/register.py +1 -0
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +6 -6
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +7 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +73 -46
- mindspore/experimental/optim/radam.py +34 -31
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -53
- mindspore/hal/event.py +52 -52
- mindspore/hal/memory.py +179 -120
- mindspore/hal/stream.py +150 -109
- mindspore/include/api/context.h +0 -1
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +50 -0
- mindspore/mindrecord/__init__.py +21 -8
- mindspore/mindrecord/config.py +17 -316
- mindspore/mindrecord/filereader.py +1 -9
- mindspore/mindrecord/filewriter.py +5 -15
- mindspore/mindrecord/mindpage.py +1 -9
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +798 -761
- mindspore/mint/distributed/__init__.py +70 -4
- mindspore/mint/distributed/distributed.py +2679 -44
- mindspore/mint/linalg/__init__.py +8 -0
- mindspore/mint/nn/__init__.py +743 -22
- mindspore/mint/nn/functional.py +716 -23
- mindspore/mint/nn/layer/__init__.py +21 -4
- mindspore/mint/nn/layer/_functions.py +334 -0
- mindspore/mint/nn/layer/activation.py +276 -1
- mindspore/mint/nn/layer/basic.py +123 -0
- mindspore/mint/nn/layer/conv.py +933 -0
- mindspore/mint/nn/layer/normalization.py +223 -28
- mindspore/mint/nn/layer/padding.py +797 -0
- mindspore/mint/nn/layer/pooling.py +235 -0
- mindspore/mint/optim/__init__.py +3 -1
- mindspore/mint/optim/adam.py +223 -0
- mindspore/mint/optim/adamw.py +26 -19
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/mint/special/__init__.py +2 -1
- mindspore/multiprocessing/__init__.py +5 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1373 -192
- mindspore/nn/dynamic_lr.py +2 -1
- mindspore/nn/layer/activation.py +29 -27
- mindspore/nn/layer/basic.py +51 -35
- mindspore/nn/layer/channel_shuffle.py +3 -3
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +53 -42
- mindspore/nn/layer/embedding.py +12 -11
- mindspore/nn/layer/normalization.py +56 -49
- mindspore/nn/layer/padding.py +4 -3
- mindspore/nn/layer/pooling.py +120 -42
- mindspore/nn/layer/rnn_cells.py +1 -1
- mindspore/nn/layer/rnns.py +2 -1
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +59 -36
- mindspore/nn/learning_rate_schedule.py +8 -4
- mindspore/nn/loss/loss.py +58 -55
- mindspore/nn/optim/ada_grad.py +7 -5
- mindspore/nn/optim/adadelta.py +11 -9
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +19 -15
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +3 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lars.py +1 -4
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +3 -3
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +11 -9
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/optim/thor.py +2 -1
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/utils/init.py +13 -11
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +181 -122
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +6 -7
- mindspore/numpy/array_creations.py +63 -65
- mindspore/numpy/array_ops.py +149 -144
- mindspore/numpy/logic_ops.py +41 -42
- mindspore/numpy/math_ops.py +361 -359
- mindspore/numpy/utils.py +17 -18
- mindspore/numpy/utils_const.py +5 -6
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +5 -3
- mindspore/ops/_grad_experimental/grad_comm_ops.py +112 -16
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -2
- mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -65
- mindspore/ops/_vmap/vmap_array_ops.py +52 -25
- mindspore/ops/_vmap/vmap_base.py +0 -2
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +21 -14
- mindspore/ops/_vmap/vmap_math_ops.py +15 -16
- mindspore/ops/_vmap/vmap_nn_ops.py +29 -42
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +258 -46
- mindspore/ops/auto_generate/gen_extend_func.py +757 -185
- mindspore/ops/auto_generate/gen_ops_def.py +4197 -2243
- mindspore/ops/auto_generate/gen_ops_prim.py +16976 -6055
- mindspore/ops/auto_generate/pyboost_inner_prim.py +221 -87
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +20 -25
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +228 -30
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -30
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +40 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +2089 -2403
- mindspore/ops/function/clip_func.py +80 -23
- mindspore/ops/function/debug_func.py +57 -57
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +2 -2
- mindspore/ops/function/linalg_func.py +47 -78
- mindspore/ops/function/math_func.py +4351 -3813
- mindspore/ops/function/nn_func.py +1712 -637
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +18 -84
- mindspore/ops/function/random_func.py +452 -387
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +6 -6
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +26 -18
- mindspore/ops/functional.py +23 -7
- mindspore/ops/functional_overload.py +1548 -0
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +23 -15
- mindspore/ops/operations/_custom_ops_utils.py +235 -0
- mindspore/ops/operations/_embedding_cache_ops.py +4 -4
- mindspore/ops/operations/_grad_ops.py +2 -43
- mindspore/ops/operations/_infer_ops.py +2 -1
- mindspore/ops/operations/_inner_ops.py +43 -84
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +81 -324
- mindspore/ops/operations/comm_ops.py +154 -108
- mindspore/ops/operations/custom_ops.py +298 -87
- mindspore/ops/operations/debug_ops.py +157 -59
- mindspore/ops/operations/inner_ops.py +7 -5
- mindspore/ops/operations/linalg_ops.py +1 -57
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +928 -180
- mindspore/ops/operations/math_ops.py +32 -234
- mindspore/ops/operations/nn_ops.py +212 -531
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +66 -53
- mindspore/ops/tensor_method.py +1895 -0
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +135 -0
- mindspore/ops_generate/aclnn/gen_aclnn_implement.py +257 -0
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/api/add_tensor_docs_generator.py +56 -0
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +105 -0
- mindspore/ops_generate/api/functional_map_cpp_generator.py +504 -0
- mindspore/ops_generate/api/functional_overload_py_generator.py +112 -0
- mindspore/ops_generate/api/functions_cc_generator.py +237 -0
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/api/op_api_proto.py +235 -0
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +461 -0
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/base_generator.py +11 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/common/gen_utils.py +348 -0
- mindspore/ops_generate/common/op_proto.py +473 -0
- mindspore/ops_generate/common/template.py +523 -0
- mindspore/ops_generate/gen_ops.py +22 -1069
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +191 -0
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +296 -0
- mindspore/ops_generate/op_def/ops_def_h_generator.py +74 -0
- mindspore/ops_generate/op_def/ops_name_h_generator.py +83 -0
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/op_def_py/op_def_py_generator.py +132 -0
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +489 -0
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +139 -0
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +93 -0
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +175 -0
- mindspore/ops_generate/pyboost/op_template_parser.py +517 -0
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +407 -0
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +100 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +148 -0
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +155 -0
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +132 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +272 -0
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +938 -0
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +357 -0
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +179 -36
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +7 -3
- mindspore/parallel/_auto_parallel_context.py +159 -40
- mindspore/parallel/_cell_wrapper.py +132 -15
- mindspore/parallel/_parallel_serialization.py +107 -5
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +199 -23
- mindspore/parallel/algo_parameter_config.py +4 -4
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +700 -35
- mindspore/parallel/cluster/process_entity/_api.py +276 -50
- mindspore/parallel/cluster/process_entity/_utils.py +41 -6
- mindspore/parallel/cluster/run.py +21 -4
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +258 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +25 -14
- mindspore/parallel/shard.py +137 -59
- mindspore/parallel/transform_safetensors.py +364 -305
- mindspore/profiler/__init__.py +22 -5
- mindspore/profiler/analysis/__init__.py +0 -0
- mindspore/profiler/analysis/parser/__init__.py +0 -0
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +170 -0
- mindspore/profiler/analysis/parser/base_parser.py +158 -0
- mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
- mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
- mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +264 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +109 -0
- mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
- mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
- mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
- mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +415 -0
- mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
- mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
- mindspore/profiler/analysis/task_manager.py +131 -0
- mindspore/profiler/analysis/time_converter.py +84 -0
- mindspore/profiler/analysis/viewer/__init__.py +0 -0
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +372 -0
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +250 -0
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +320 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +327 -0
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +376 -0
- mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
- mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +96 -0
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
- mindspore/profiler/analysis/work_flow.py +73 -0
- mindspore/profiler/common/ascend_msprof_exporter.py +139 -0
- mindspore/profiler/common/command_executor.py +90 -0
- mindspore/profiler/common/constant.py +186 -3
- mindspore/profiler/common/file_manager.py +208 -0
- mindspore/profiler/common/log.py +130 -0
- mindspore/profiler/common/msprof_cmd_tool.py +221 -0
- mindspore/profiler/common/path_manager.py +395 -0
- mindspore/profiler/common/process_bar.py +168 -0
- mindspore/profiler/common/process_pool.py +9 -3
- mindspore/profiler/common/profiler_context.py +500 -0
- mindspore/profiler/common/profiler_info.py +304 -0
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_output_path.py +284 -0
- mindspore/profiler/common/profiler_parameters.py +251 -0
- mindspore/profiler/common/profiler_path_manager.py +179 -0
- mindspore/profiler/common/record_function.py +76 -0
- mindspore/profiler/common/tlv_decoder.py +76 -0
- mindspore/profiler/common/util.py +75 -2
- mindspore/profiler/dynamic_profiler.py +341 -75
- mindspore/profiler/envprofiler.py +163 -0
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +242 -0
- mindspore/profiler/platform/__init__.py +21 -0
- mindspore/profiler/platform/base_profiler.py +40 -0
- mindspore/profiler/platform/cpu_profiler.py +124 -0
- mindspore/profiler/platform/gpu_profiler.py +74 -0
- mindspore/profiler/platform/npu_profiler.py +335 -0
- mindspore/profiler/profiler.py +1073 -90
- mindspore/profiler/profiler_action_controller.py +187 -0
- mindspore/profiler/profiler_interface.py +118 -0
- mindspore/profiler/schedule.py +243 -0
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +2 -3
- mindspore/run_check/_check_version.py +27 -20
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +37 -0
- mindspore/runtime/device.py +27 -0
- mindspore/runtime/event.py +209 -0
- mindspore/runtime/executor.py +177 -0
- mindspore/runtime/memory.py +416 -0
- mindspore/runtime/stream.py +460 -0
- mindspore/runtime/thread_bind_core.py +401 -0
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +96 -27
- mindspore/train/amp.py +9 -5
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +53 -55
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_early_stop.py +1 -1
- mindspore/train/callback/_flops_collector.py +103 -68
- mindspore/train/callback/_history.py +8 -5
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +52 -19
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +228 -108
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +15 -16
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +11 -10
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +174 -46
- mindspore/train/model.py +269 -136
- mindspore/train/serialization.py +622 -978
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +2 -3
- mindspore/train/train_thor/model_thor.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +6 -3
- mindspore/utils/dryrun.py +140 -0
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/runtime_execution_order_check.py +552 -0
- mindspore/utils/utils.py +138 -4
- mindspore/version.py +1 -1
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/METADATA +3 -3
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/RECORD +564 -395
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/entry_points.txt +1 -1
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/common/_tensor_overload.py +0 -139
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -197
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_aclnn_implement.py +0 -263
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/gen_pyboost_func.py +0 -1052
- mindspore/ops_generate/gen_utils.py +0 -209
- mindspore/ops_generate/op_proto.py +0 -145
- mindspore/ops_generate/template.py +0 -261
- mindspore/profiler/envprofiling.py +0 -254
- mindspore/profiler/profiling.py +0 -1926
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/WHEEL +0 -0
- {mindspore-2.4.10.dist-info → mindspore-2.6.0.dist-info}/top_level.txt +0 -0
mindspore/mint/__init__.py
CHANGED
|
@@ -16,16 +16,15 @@
|
|
|
16
16
|
from __future__ import absolute_import
|
|
17
17
|
import mindspore.ops as ops
|
|
18
18
|
from mindspore.ops.primitive import constexpr
|
|
19
|
-
from mindspore.common._register_for_tensor import tensor_operator_registry_for_mint
|
|
20
19
|
from mindspore.common.tensor import Tensor
|
|
21
|
-
from mindspore.ops.function.array_func import gather_ext as gather
|
|
20
|
+
from mindspore.ops.function.array_func import gather_ext as gather
|
|
22
21
|
from mindspore.ops.function.nn_func import conv2d_ext as conv2d
|
|
23
22
|
from mindspore.mint.nn.functional import sigmoid
|
|
24
23
|
from mindspore.mint.nn import functional
|
|
25
24
|
from mindspore.mint import linalg
|
|
26
25
|
from mindspore.mint import special
|
|
27
26
|
from mindspore.mint import distributed
|
|
28
|
-
from mindspore.ops import erf
|
|
27
|
+
from mindspore.ops import erf
|
|
29
28
|
from mindspore.ops.function.math_func import linspace_ext as linspace
|
|
30
29
|
from mindspore.ops.function.math_func import median_ext as median
|
|
31
30
|
from mindspore.ops.function.array_func import ones_like_ext as ones_like
|
|
@@ -33,21 +32,29 @@ from mindspore.ops.function.array_func import full_ext as full
|
|
|
33
32
|
from mindspore.ops.function.array_func import zeros_like_ext as zeros_like
|
|
34
33
|
from mindspore.ops.function.array_func import unique_ext as unique
|
|
35
34
|
from mindspore.ops.function.array_func import chunk_ext as chunk
|
|
35
|
+
from mindspore.ops.functional_overload import empty
|
|
36
|
+
from mindspore.ops.function.array_func import empty_like
|
|
36
37
|
from mindspore.ops.function.math_func import isclose
|
|
37
38
|
from mindspore.ops.auto_generate import abs
|
|
39
|
+
from mindspore.ops.auto_generate import clone
|
|
40
|
+
from mindspore.ops.function.array_func import full_like_ext as full_like
|
|
38
41
|
# 1
|
|
39
|
-
from mindspore.ops.function.math_func import divide
|
|
42
|
+
from mindspore.ops.function.math_func import divide
|
|
40
43
|
from mindspore.ops.auto_generate import topk_ext as topk
|
|
41
44
|
from mindspore.ops.function.math_func import roll
|
|
42
45
|
# 2
|
|
43
46
|
from mindspore.ops.function.math_func import sin
|
|
44
47
|
# 3
|
|
45
|
-
from mindspore.ops.
|
|
48
|
+
from mindspore.ops.functional_overload import clamp, where
|
|
49
|
+
from mindspore.ops.functional_overload import clip
|
|
50
|
+
from mindspore.ops.functional_overload import fmod
|
|
51
|
+
from mindspore.ops.functional_overload import max
|
|
52
|
+
from mindspore.ops.functional_overload import min
|
|
46
53
|
# 4
|
|
47
54
|
from mindspore.ops.auto_generate import sinc
|
|
48
55
|
from mindspore.ops.auto_generate import sinh
|
|
49
56
|
from mindspore.ops.auto_generate import cosh
|
|
50
|
-
from mindspore.ops.
|
|
57
|
+
from mindspore.ops.functional_overload import xlogy
|
|
51
58
|
# 5
|
|
52
59
|
from mindspore.ops.auto_generate import cumsum_ext as cumsum
|
|
53
60
|
# 6
|
|
@@ -56,20 +63,23 @@ from mindspore.ops.auto_generate import stack_ext as stack
|
|
|
56
63
|
# 7
|
|
57
64
|
from mindspore.ops.function.array_func import unsqueeze
|
|
58
65
|
# 8
|
|
59
|
-
from mindspore.ops.auto_generate import
|
|
66
|
+
from mindspore.ops.auto_generate import transpose_ext_view as transpose
|
|
67
|
+
from mindspore.ops.auto_generate import batch_norm_elemt
|
|
68
|
+
from mindspore.ops.auto_generate import batch_norm_gather_stats_with_counts
|
|
69
|
+
from mindspore.ops.auto_generate import batch_norm_stats
|
|
60
70
|
# 9
|
|
61
71
|
from mindspore.ops.auto_generate import masked_select
|
|
62
72
|
from mindspore.ops.function.math_func import cross
|
|
63
73
|
# 10
|
|
64
74
|
from mindspore.ops.function.math_func import ne
|
|
65
75
|
# 11
|
|
66
|
-
|
|
76
|
+
from mindspore.ops.function.math_func import cdist as cdist_
|
|
67
77
|
# 12
|
|
68
|
-
from mindspore.ops.
|
|
78
|
+
from mindspore.ops.functional_overload import repeat_interleave
|
|
69
79
|
# 13
|
|
70
80
|
from mindspore.ops.functional import flip
|
|
71
81
|
# 14
|
|
72
|
-
|
|
82
|
+
from mindspore.ops.auto_generate import mv
|
|
73
83
|
# 15
|
|
74
84
|
from mindspore.ops.auto_generate import flatten_ext as flatten
|
|
75
85
|
# 16
|
|
@@ -78,17 +88,17 @@ from mindspore.ops.auto_generate import bmm_ext as bmm
|
|
|
78
88
|
# 17
|
|
79
89
|
|
|
80
90
|
# 18
|
|
81
|
-
|
|
91
|
+
|
|
82
92
|
# 19
|
|
83
93
|
from mindspore.ops.functional import log
|
|
84
94
|
# 20
|
|
85
95
|
|
|
86
96
|
# 21
|
|
87
|
-
from mindspore.ops.
|
|
97
|
+
from mindspore.ops.function.math_func import mul_ext as mul
|
|
88
98
|
# 22
|
|
89
|
-
|
|
99
|
+
from mindspore.ops.functional import cumprod
|
|
90
100
|
# 23
|
|
91
|
-
|
|
101
|
+
from mindspore.ops.auto_generate import exp2
|
|
92
102
|
# 24
|
|
93
103
|
|
|
94
104
|
# 25
|
|
@@ -106,7 +116,7 @@ from mindspore.ops.functional import searchsorted
|
|
|
106
116
|
# 31
|
|
107
117
|
|
|
108
118
|
# 32
|
|
109
|
-
|
|
119
|
+
from mindspore.ops.function.math_func import einsum_ext as einsum
|
|
110
120
|
# 33
|
|
111
121
|
|
|
112
122
|
# 34
|
|
@@ -146,7 +156,7 @@ from mindspore.ops.functional import tile
|
|
|
146
156
|
# 51
|
|
147
157
|
|
|
148
158
|
# 52
|
|
149
|
-
|
|
159
|
+
from mindspore.ops.functional_overload import addcdiv
|
|
150
160
|
# 53
|
|
151
161
|
|
|
152
162
|
# 54
|
|
@@ -158,7 +168,8 @@ from mindspore.ops.function.math_func import norm_ext as norm
|
|
|
158
168
|
# 57
|
|
159
169
|
from mindspore.ops.functional import broadcast_to
|
|
160
170
|
# 58
|
|
161
|
-
from mindspore.ops.
|
|
171
|
+
from mindspore.ops.functional_overload import greater_equal, ge
|
|
172
|
+
|
|
162
173
|
# 59
|
|
163
174
|
from mindspore.ops.functional import square
|
|
164
175
|
# 60
|
|
@@ -170,7 +181,7 @@ from mindspore.ops.functional import maximum
|
|
|
170
181
|
# 63
|
|
171
182
|
from mindspore.ops.functional import minimum
|
|
172
183
|
# 64
|
|
173
|
-
|
|
184
|
+
from mindspore.ops.functional import ravel
|
|
174
185
|
# 65
|
|
175
186
|
from mindspore.ops.functional import logical_and
|
|
176
187
|
# 66
|
|
@@ -184,7 +195,7 @@ from mindspore.ops.functional import less_equal, le
|
|
|
184
195
|
# 70
|
|
185
196
|
from mindspore.ops.functional import negative, neg
|
|
186
197
|
# 71
|
|
187
|
-
|
|
198
|
+
|
|
188
199
|
# 72
|
|
189
200
|
|
|
190
201
|
# 73
|
|
@@ -194,7 +205,7 @@ from mindspore.ops.function.array_func import sort_ext as sort
|
|
|
194
205
|
# 75
|
|
195
206
|
from mindspore.ops.functional import less, lt
|
|
196
207
|
# 76
|
|
197
|
-
from mindspore.ops.
|
|
208
|
+
from mindspore.ops.function.math_func import pow_ext as pow
|
|
198
209
|
# 77
|
|
199
210
|
|
|
200
211
|
# 78
|
|
@@ -202,13 +213,13 @@ from mindspore.ops.function import arange_ext as arange
|
|
|
202
213
|
# 79
|
|
203
214
|
|
|
204
215
|
# 80
|
|
205
|
-
|
|
216
|
+
from mindspore.ops.functional_overload import div
|
|
206
217
|
# 81
|
|
207
218
|
from mindspore.ops.auto_generate import index_select_ext as index_select
|
|
208
219
|
# 82
|
|
209
220
|
from mindspore.ops.auto_generate import cummin_ext as cummin
|
|
210
221
|
# 83
|
|
211
|
-
from mindspore.ops.
|
|
222
|
+
from mindspore.ops.auto_generate import narrow
|
|
212
223
|
# 84
|
|
213
224
|
|
|
214
225
|
# 85
|
|
@@ -220,19 +231,19 @@ from mindspore.ops.auto_generate import trunc
|
|
|
220
231
|
# 88
|
|
221
232
|
|
|
222
233
|
# 89
|
|
223
|
-
|
|
234
|
+
from mindspore.ops.auto_generate import argsort_ext as argsort
|
|
224
235
|
# 90
|
|
225
|
-
|
|
236
|
+
from mindspore.ops.auto_generate import isinf
|
|
226
237
|
# 91
|
|
227
238
|
|
|
228
239
|
# 92
|
|
229
|
-
|
|
240
|
+
from mindspore.ops.function.math_func import polar
|
|
230
241
|
# 93
|
|
231
242
|
|
|
232
243
|
# 94
|
|
233
244
|
from mindspore.ops.function.math_func import tanh
|
|
234
245
|
# 95
|
|
235
|
-
|
|
246
|
+
from mindspore.ops.function.math_func import diff_ext as diff
|
|
236
247
|
# 96
|
|
237
248
|
|
|
238
249
|
# 97
|
|
@@ -262,7 +273,7 @@ from mindspore.ops.function.math_func import tanh
|
|
|
262
273
|
# 109
|
|
263
274
|
from mindspore.ops.auto_generate import argmin_ext as argmin
|
|
264
275
|
# 110
|
|
265
|
-
|
|
276
|
+
from mindspore.ops.function.nn_func import softmax_ext
|
|
266
277
|
# 111
|
|
267
278
|
|
|
268
279
|
# 112
|
|
@@ -282,11 +293,14 @@ from mindspore.ops.auto_generate import argmin_ext as argmin
|
|
|
282
293
|
# 119
|
|
283
294
|
|
|
284
295
|
# 120
|
|
285
|
-
|
|
296
|
+
from mindspore.ops.auto_generate import isneginf_ext as isneginf
|
|
286
297
|
# 121
|
|
287
298
|
|
|
288
299
|
# 122
|
|
289
300
|
|
|
301
|
+
# 123
|
|
302
|
+
from mindspore.ops.function.math_func import var_ext as var
|
|
303
|
+
|
|
290
304
|
# 151
|
|
291
305
|
from mindspore.ops.function.math_func import acos_ext as acos
|
|
292
306
|
from mindspore.ops.function.math_func import arccos_ext as arccos
|
|
@@ -294,6 +308,8 @@ from mindspore.ops.function.math_func import arccos_ext as arccos
|
|
|
294
308
|
from mindspore.ops.function.math_func import acosh_ext as acosh
|
|
295
309
|
from mindspore.ops.function.math_func import arccosh_ext as arccosh
|
|
296
310
|
# 172
|
|
311
|
+
from mindspore.ops.function.math_func import addcmul_ext as addcmul
|
|
312
|
+
|
|
297
313
|
from mindspore.ops.function.math_func import asin_ext as asin
|
|
298
314
|
from mindspore.ops.function.math_func import arcsin_ext as arcsin
|
|
299
315
|
# 173
|
|
@@ -315,6 +331,9 @@ from mindspore.ops.function.math_func import round
|
|
|
315
331
|
# 182
|
|
316
332
|
from mindspore.ops.function.math_func import bernoulli_ext as bernoulli
|
|
317
333
|
|
|
334
|
+
# 201
|
|
335
|
+
from mindspore.ops.auto_generate import diag_ext as diag
|
|
336
|
+
|
|
318
337
|
# 204
|
|
319
338
|
from mindspore.ops.auto_generate import erfc
|
|
320
339
|
# 207
|
|
@@ -332,24 +351,27 @@ from mindspore.ops.function.random_func import randint_like_ext as randint_like
|
|
|
332
351
|
from mindspore.ops.auto_generate import floor
|
|
333
352
|
# 231
|
|
334
353
|
from mindspore.ops.function.math_func import inverse_ext as inverse
|
|
354
|
+
# 239
|
|
355
|
+
from mindspore.ops.functional_overload import lerp
|
|
335
356
|
# 244
|
|
336
357
|
from mindspore.ops.auto_generate import log1p
|
|
337
358
|
# 261
|
|
338
359
|
from mindspore.ops.function.random_func import multinomial_ext as multinomial
|
|
339
360
|
# 275
|
|
340
|
-
from mindspore.ops.
|
|
361
|
+
from mindspore.ops.functional_overload import remainder
|
|
341
362
|
# 285
|
|
342
363
|
from mindspore.ops.function.array_func import scatter_add_ext as scatter_add
|
|
343
364
|
# 289
|
|
344
365
|
from mindspore.ops.auto_generate import sign
|
|
345
366
|
|
|
346
|
-
from mindspore.ops.auto_generate import
|
|
367
|
+
from mindspore.ops.auto_generate import select_ext_view as select
|
|
347
368
|
|
|
348
369
|
# 301
|
|
349
370
|
from mindspore.ops.function.math_func import tan
|
|
350
371
|
|
|
351
372
|
# 303
|
|
352
373
|
from mindspore.ops.auto_generate import trace_ext as trace
|
|
374
|
+
from mindspore.ops.auto_generate import gcd
|
|
353
375
|
|
|
354
376
|
from mindspore.ops.function.array_func import reshape
|
|
355
377
|
|
|
@@ -357,199 +379,261 @@ from mindspore.ops.auto_generate import outer_ext as outer
|
|
|
357
379
|
|
|
358
380
|
# 304
|
|
359
381
|
from mindspore.ops.function.array_func import tril_ext as tril
|
|
382
|
+
# 520
|
|
383
|
+
from mindspore.ops.function.math_func import bincount_ext as bincount
|
|
360
384
|
|
|
361
385
|
# 305
|
|
362
386
|
from mindspore.ops import triu
|
|
363
387
|
|
|
388
|
+
# 308
|
|
389
|
+
from mindspore.ops.auto_generate import mm_ext as mm
|
|
390
|
+
|
|
391
|
+
# 382
|
|
392
|
+
from mindspore.ops.function.math_func import dstack
|
|
393
|
+
|
|
394
|
+
# 501
|
|
395
|
+
from mindspore.ops.function.math_func import addbmm_ext as addbmm
|
|
396
|
+
|
|
397
|
+
# 502
|
|
398
|
+
from mindspore.ops.function.math_func import addmm_ext as addmm
|
|
399
|
+
|
|
400
|
+
# 505
|
|
401
|
+
from mindspore.ops.function.math_func import addmv_ext as addmv
|
|
402
|
+
|
|
403
|
+
# 510
|
|
404
|
+
from mindspore.ops.function.math_func import amax_ext as amax
|
|
405
|
+
|
|
406
|
+
# 511
|
|
407
|
+
from mindspore.ops.function.math_func import amin_ext as amin
|
|
408
|
+
|
|
409
|
+
# 521
|
|
410
|
+
from mindspore.ops.functional_overload import bitwise_not
|
|
411
|
+
|
|
412
|
+
# 526
|
|
413
|
+
from mindspore.ops.auto_generate import dot
|
|
414
|
+
|
|
415
|
+
# 533
|
|
416
|
+
from mindspore.ops.function.math_func import frac_ext as frac
|
|
417
|
+
|
|
364
418
|
# 538
|
|
365
419
|
from mindspore.ops.function.math_func import histc_ext as histc
|
|
420
|
+
# 549
|
|
421
|
+
from mindspore.ops.functional_overload import kthvalue
|
|
422
|
+
# 552
|
|
423
|
+
from mindspore.ops.auto_generate import log10_ext as log10
|
|
366
424
|
|
|
367
425
|
# 553
|
|
368
426
|
from mindspore.ops.auto_generate import logaddexp_ext as logaddexp
|
|
427
|
+
from mindspore.ops.auto_generate import logaddexp2
|
|
428
|
+
|
|
429
|
+
# 557
|
|
430
|
+
from mindspore.ops.auto_generate import logsumexp_ext as logsumexp
|
|
431
|
+
|
|
432
|
+
# 582
|
|
433
|
+
from mindspore.ops.function.math_func import std_mean_ext as std_mean
|
|
434
|
+
|
|
435
|
+
# 584
|
|
436
|
+
from mindspore.ops.function.array_func import take
|
|
437
|
+
|
|
438
|
+
# 588
|
|
439
|
+
from mindspore.ops.function.math_func import var_mean_ext as var_mean
|
|
369
440
|
|
|
370
441
|
# 610
|
|
371
442
|
from mindspore.ops.function.math_func import nan_to_num
|
|
372
443
|
|
|
444
|
+
# 613
|
|
445
|
+
from mindspore.ops.functional_overload import nansum
|
|
446
|
+
|
|
447
|
+
# 615
|
|
448
|
+
from mindspore.ops.auto_generate import triangular_solve
|
|
449
|
+
|
|
450
|
+
# 664
|
|
451
|
+
from mindspore.ops.function.array_func import meshgrid_ext as meshgrid
|
|
452
|
+
|
|
373
453
|
# 695
|
|
374
454
|
from mindspore.ops.auto_generate import count_nonzero
|
|
375
455
|
|
|
456
|
+
# 697
|
|
457
|
+
from mindspore.ops.function.math_func import float_power_ext as float_power
|
|
376
458
|
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
Adds scaled other value to input Tensor.
|
|
459
|
+
# 708
|
|
460
|
+
from mindspore.ops.function.math_func import std_ext as std
|
|
380
461
|
|
|
381
|
-
|
|
462
|
+
# 719
|
|
463
|
+
from mindspore.ops.functional_overload import add
|
|
382
464
|
|
|
383
|
-
|
|
465
|
+
# 720
|
|
466
|
+
from mindspore.ops.functional_overload import sub
|
|
384
467
|
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
they must be able to broadcast to a common shape.
|
|
388
|
-
- The two inputs and alpha comply with the implicit type conversion rules to make the data types
|
|
389
|
-
consistent.
|
|
468
|
+
# 739
|
|
469
|
+
from mindspore.ops.function.array_func import hstack
|
|
390
470
|
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
a bool or a tensor whose data type is
|
|
394
|
-
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
395
|
-
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
396
|
-
other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
|
|
397
|
-
a bool or a tensor whose data type is
|
|
398
|
-
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
399
|
-
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
471
|
+
# 826
|
|
472
|
+
from mindspore.ops.functional_overload import floor_divide
|
|
400
473
|
|
|
401
|
-
|
|
402
|
-
|
|
474
|
+
# 887
|
|
475
|
+
from mindspore.ops.auto_generate import log2_ext as log2
|
|
403
476
|
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
and the data type is the one with higher precision or higher digits among the two inputs and alpha.
|
|
477
|
+
# 889
|
|
478
|
+
from mindspore.ops.function.math_func import isnan_ext as isnan
|
|
407
479
|
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
480
|
+
# 916
|
|
481
|
+
from mindspore.ops.auto_generate import index_add_ext as index_add
|
|
482
|
+
|
|
483
|
+
# 1007
|
|
484
|
+
from mindspore.ops.auto_generate import t_ext as t
|
|
485
|
+
from mindspore.ops.auto_generate.pyboost_inner_prim import squeeze_impl
|
|
486
|
+
from mindspore.ops.auto_generate.gen_ops_prim import equal_ext_op
|
|
487
|
+
|
|
488
|
+
|
|
489
|
+
# 1023
|
|
490
|
+
from mindspore.ops.function.array_func import unbind_ext as unbind
|
|
491
|
+
|
|
492
|
+
|
|
493
|
+
def any(input, dim=None, keepdim=False):
|
|
494
|
+
r"""
|
|
495
|
+
Tests if any element in `input` evaluates to `True` along the given axes.
|
|
496
|
+
|
|
497
|
+
Args:
|
|
498
|
+
input (Tensor): The input tensor.
|
|
499
|
+
dim (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce. If ``None`` ,
|
|
500
|
+
all dimensions are reduced. Default ``None`` .
|
|
501
|
+
keepdim (bool, optional): Whether the output tensor has dim retained or not. Default ``False`` .
|
|
502
|
+
|
|
503
|
+
Returns:
|
|
504
|
+
Tensor
|
|
412
505
|
|
|
413
506
|
Supported Platforms:
|
|
414
507
|
``Ascend`` ``GPU`` ``CPU``
|
|
415
508
|
|
|
416
509
|
Examples:
|
|
417
|
-
>>> import numpy as np
|
|
418
510
|
>>> import mindspore
|
|
419
|
-
>>>
|
|
420
|
-
>>>
|
|
421
|
-
>>>
|
|
422
|
-
>>>
|
|
423
|
-
|
|
424
|
-
>>>
|
|
425
|
-
>>>
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
>>>
|
|
429
|
-
>>>
|
|
430
|
-
|
|
511
|
+
>>> input = mindspore.tensor([[True, False], [True, True]])
|
|
512
|
+
>>>
|
|
513
|
+
>>> # case 1: By default, mindspore.mint.any tests along all the axes.
|
|
514
|
+
>>> mindspore.mint.any(input)
|
|
515
|
+
Tensor(shape=[], dtype=Bool, value= True)
|
|
516
|
+
>>>
|
|
517
|
+
>>> # case 2: Reduces a dimension along dim 1, with keepdim False.
|
|
518
|
+
>>> mindspore.mint.any(input, dim=1)
|
|
519
|
+
Tensor(shape=[2], dtype=Bool, value= [ True, True])
|
|
520
|
+
>>>
|
|
521
|
+
>>> # case 3: Reduces a dimension along dim (0, 1), with keepdim False.
|
|
522
|
+
>>> mindspore.mint.any(input, dim=(0,1))
|
|
523
|
+
Tensor(shape=[], dtype=Bool, value= True)
|
|
524
|
+
>>>
|
|
525
|
+
>>> # case 4: Reduces a dimension along dim [0, 1], with keepdim True.
|
|
526
|
+
>>> mindspore.mint.any(input, dim=[0,1], keepdim=True)
|
|
527
|
+
Tensor(shape=[1, 1], dtype=Bool, value=
|
|
528
|
+
[[ True]])
|
|
431
529
|
"""
|
|
432
|
-
return ops.
|
|
530
|
+
return ops.functional.any(input, dim, keepdim)
|
|
433
531
|
|
|
434
532
|
|
|
435
|
-
def
|
|
533
|
+
def all(input, dim=None, keepdim=False):
|
|
436
534
|
r"""
|
|
437
|
-
|
|
438
|
-
reduce a dimension of `input` along the `dim`. Determine whether the dimensions of the output and input are the
|
|
439
|
-
same by controlling `keepdim`.
|
|
535
|
+
all(input) -> Tensor
|
|
440
536
|
|
|
441
|
-
|
|
442
|
-
The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
537
|
+
Tests if all element in `input` evaluates to `True`.
|
|
443
538
|
|
|
444
539
|
Args:
|
|
445
|
-
input (Tensor):
|
|
446
|
-
any number of additional dimensions.
|
|
447
|
-
dim (Union[int, tuple(int), list(int), Tensor], optional): The dimensions to reduce.
|
|
448
|
-
Suppose the rank of `input` is r, `dim` must be in the range [-rank(input), rank(input)).
|
|
449
|
-
Default: ``None`` , all dimensions are reduced.
|
|
450
|
-
keepdim (bool, optional): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
451
|
-
If ``False`` , don't keep these dimensions. Default : ``False`` .
|
|
540
|
+
input (Tensor): The input Tensor.
|
|
452
541
|
|
|
453
542
|
Returns:
|
|
454
|
-
Tensor
|
|
543
|
+
Tensor
|
|
455
544
|
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
- If `dim` is int, such as 2, and `keepdim` is ``False`` ,
|
|
459
|
-
the shape of output is :math:`(input_1, input_3, ..., input_R)`.
|
|
460
|
-
- If `dim` is tuple(int), such as (2, 3), and `keepdim` is ``False`` ,
|
|
461
|
-
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
462
|
-
- If `dim` is 1-D Tensor, such as [2, 3], and `keepdim` is ``False`` ,
|
|
463
|
-
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
545
|
+
Supported Platforms:
|
|
546
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
464
547
|
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
548
|
+
Examples:
|
|
549
|
+
>>> import mindspore
|
|
550
|
+
>>> input = mindspore.tensor([[True, False], [True, True]])
|
|
551
|
+
>>> output = mindspore.mint.all(input)
|
|
552
|
+
>>> print(output)
|
|
553
|
+
False
|
|
554
|
+
|
|
555
|
+
.. function:: all(input, dim, keepdim=False) -> Tensor
|
|
556
|
+
:noindex:
|
|
557
|
+
|
|
558
|
+
Tests if all element in `input` evaluates to `True` along the given axes.
|
|
559
|
+
|
|
560
|
+
Args:
|
|
561
|
+
input (Tensor): The input tensor.
|
|
562
|
+
dim (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce. If ``None`` ,
|
|
563
|
+
all dimensions are reduced. Default ``None`` .
|
|
564
|
+
keepdim (bool, optional): Whether the output tensor has dim retained or not. Default ``False`` .
|
|
565
|
+
|
|
566
|
+
Returns:
|
|
567
|
+
Tensor
|
|
469
568
|
|
|
470
569
|
Supported Platforms:
|
|
471
570
|
``Ascend`` ``GPU`` ``CPU``
|
|
472
571
|
|
|
473
572
|
Examples:
|
|
474
|
-
>>> import
|
|
475
|
-
>>>
|
|
476
|
-
>>>
|
|
477
|
-
>>> # case 1: Reduces a dimension
|
|
478
|
-
>>>
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
>>>
|
|
482
|
-
(
|
|
483
|
-
|
|
484
|
-
>>>
|
|
485
|
-
>>>
|
|
486
|
-
[ True
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
>>> print(output)
|
|
490
|
-
[ True True]
|
|
573
|
+
>>> import mindspore
|
|
574
|
+
>>> input = mindspore.tensor([[True, False], [True, True]])
|
|
575
|
+
>>>
|
|
576
|
+
>>> # case 1: Reduces a dimension along dim 1, with keepdim False.
|
|
577
|
+
>>> mindspore.mint.all(input, dim=1)
|
|
578
|
+
Tensor(shape=[2], dtype=Bool, value= [False, True])
|
|
579
|
+
>>>
|
|
580
|
+
>>> # case 2: Reduces a dimension along dim (0, 1), with keepdim False.
|
|
581
|
+
>>> mindspore.mint.all(input, dim=(0,1))
|
|
582
|
+
Tensor(shape=[], dtype=Bool, value= False)
|
|
583
|
+
>>>
|
|
584
|
+
>>> # case 3: Reduces a dimension along dim [0, 1], with keepdim True.
|
|
585
|
+
>>> mindspore.mint.all(input, dim=[0,1], keepdim=True)
|
|
586
|
+
Tensor(shape=[1, 1], dtype=Bool, value=
|
|
587
|
+
[[False]])
|
|
491
588
|
"""
|
|
492
|
-
return ops.
|
|
589
|
+
return ops.function.math_func.all(input, dim, keepdim)
|
|
493
590
|
|
|
494
591
|
|
|
495
|
-
def
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
same by controlling `keepdim`.
|
|
592
|
+
def allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False):
|
|
593
|
+
"""
|
|
594
|
+
Returns a new Tensor with boolean elements representing if each element of `input`
|
|
595
|
+
is “close” to the corresponding element of `other`. Closeness is defined as:
|
|
500
596
|
|
|
501
|
-
|
|
502
|
-
|
|
597
|
+
.. math::
|
|
598
|
+
|input-other| ≤ atol + rtol × |other|
|
|
599
|
+
|
|
600
|
+
.. warning::
|
|
601
|
+
This is an experimental API that is subject to change or deletion.
|
|
503
602
|
|
|
504
603
|
Args:
|
|
505
|
-
input (Tensor):
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
604
|
+
input (Tensor): First tensor to compare.
|
|
605
|
+
Support dtype: float16, float32, float64, int8, int16, int32, int64 and uint8.
|
|
606
|
+
On Ascend, more dtypes are support: bool and bfloat16.
|
|
607
|
+
other (Tensor): Second tensor to compare. Dtype must be same as `input`.
|
|
608
|
+
rtol (Union[float, int, bool], optional): Relative tolerance. Default: ``1e-05`` .
|
|
609
|
+
atol (Union[float, int, bool], optional): Absolute tolerance. Default: ``1e-08`` .
|
|
610
|
+
equal_nan (bool, optional): If ``True`` , then two NaNs will be considered equal. Default: ``False``.
|
|
512
611
|
|
|
513
612
|
Returns:
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
- If `dim` is ``None`` , and `keepdim` is ``False`` ,
|
|
517
|
-
the output is a 0-D Tensor representing the "logical AND" of all elements in the input Tensor.
|
|
518
|
-
- If `dim` is int, such as 2, and `keepdim` is ``False`` ,
|
|
519
|
-
the shape of output is :math:`(input_1, input_3, ..., input_R)`.
|
|
520
|
-
- If `dim` is tuple(int), such as (2, 3), and `keepdim` is ``False`` ,
|
|
521
|
-
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
522
|
-
- If `dim` is 1-D Tensor, such as [2, 3], and `keepdim` is ``False`` ,
|
|
523
|
-
the shape of output is :math:`(input_1, input_4, ..., input_R)`.
|
|
613
|
+
A bool Scalar.
|
|
524
614
|
|
|
525
615
|
Raises:
|
|
526
|
-
TypeError:
|
|
527
|
-
TypeError:
|
|
528
|
-
TypeError:
|
|
616
|
+
TypeError: `input` or `other` is not Tensor.
|
|
617
|
+
TypeError: `input` or `other` dtype is not support.
|
|
618
|
+
TypeError: `atol` or `rtol` is not float, int or bool.
|
|
619
|
+
TypeError: `equal_nan` is not bool.
|
|
620
|
+
TypeError: `input` and `other` have different dtypes.
|
|
621
|
+
ValueError: `input` and `other` cannot broadcast.
|
|
529
622
|
|
|
530
623
|
Supported Platforms:
|
|
531
624
|
``Ascend`` ``GPU`` ``CPU``
|
|
532
625
|
|
|
533
626
|
Examples:
|
|
627
|
+
>>> import mindspore
|
|
534
628
|
>>> import numpy as np
|
|
535
|
-
>>> from mindspore import Tensor,
|
|
536
|
-
>>>
|
|
537
|
-
>>>
|
|
538
|
-
>>> output = mint.
|
|
539
|
-
>>> print(output)
|
|
540
|
-
[[False]]
|
|
541
|
-
>>> print(output.shape)
|
|
542
|
-
(1, 1)
|
|
543
|
-
>>> # case 2: Reduces a dimension along axis 0.
|
|
544
|
-
>>> output = mint.all(x, dim=0)
|
|
545
|
-
>>> print(output)
|
|
546
|
-
[ True False]
|
|
547
|
-
>>> # case 3: Reduces a dimension along axis 1.
|
|
548
|
-
>>> output = mint.all(x, dim=1)
|
|
629
|
+
>>> from mindspore import Tensor, ops
|
|
630
|
+
>>> input = Tensor(np.array([1.3, 2.1, 3.2, 4.1, 5.1]), mindspore.float16)
|
|
631
|
+
>>> other = Tensor(np.array([1.3, 3.3, 2.3, 3.1, 5.1]), mindspore.float16)
|
|
632
|
+
>>> output = mint.allclose(input, other)
|
|
549
633
|
>>> print(output)
|
|
550
|
-
|
|
634
|
+
False
|
|
551
635
|
"""
|
|
552
|
-
return
|
|
636
|
+
return isclose(input, other, rtol, atol, equal_nan).all().item()
|
|
553
637
|
|
|
554
638
|
|
|
555
639
|
def cat(tensors, dim=0):
|
|
@@ -572,7 +656,7 @@ def cat(tensors, dim=0):
|
|
|
572
656
|
all other dimensions should be equal, that is,
|
|
573
657
|
:math:`t1.shape[1] = t2.shape[1], t1.shape[2] = t2.shape[2], ..., t1.shape[R-1] = t2.shape[R-1]`,
|
|
574
658
|
where :math:`R` represents the rank of tensor.
|
|
575
|
-
dim (int): The specified dimension, whose value is in range :math:`[-R, R)`. Default: ``0`` .
|
|
659
|
+
dim (int, optional): The specified dimension, whose value is in range :math:`[-R, R)`. Default: ``0`` .
|
|
576
660
|
|
|
577
661
|
Returns:
|
|
578
662
|
Tensor, the shape is :math:`(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)`.
|
|
@@ -611,10 +695,13 @@ def cat(tensors, dim=0):
|
|
|
611
695
|
|
|
612
696
|
def concat(tensors, dim=0):
|
|
613
697
|
r"""
|
|
698
|
+
Alias for :func:`mindspore.mint.cat`.
|
|
699
|
+
|
|
614
700
|
.. warning::
|
|
615
701
|
This is an experimental API that is subject to change or deletion.
|
|
616
702
|
|
|
617
|
-
|
|
703
|
+
Supported Platforms:
|
|
704
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
618
705
|
"""
|
|
619
706
|
return cat(tensors, dim)
|
|
620
707
|
|
|
@@ -629,6 +716,9 @@ def cummax(input, dim):
|
|
|
629
716
|
y_{i} = \max(x_{1}, x_{2}, ... , x_{i})
|
|
630
717
|
\end{array}
|
|
631
718
|
|
|
719
|
+
.. note::
|
|
720
|
+
O2 mode is not supported in Ascend.
|
|
721
|
+
|
|
632
722
|
Args:
|
|
633
723
|
input (Tensor): The input Tensor. Rank of `input` must be greater than 0.
|
|
634
724
|
dim (int): The dimension to do the operation over. The value of `dim` must be in the range
|
|
@@ -643,9 +733,6 @@ def cummax(input, dim):
|
|
|
643
733
|
TypeError: If `dim` is not an int.
|
|
644
734
|
ValueError: If `dim` is out the range of `[-input.ndim, input.ndim - 1]`.
|
|
645
735
|
|
|
646
|
-
.. note::
|
|
647
|
-
O2 mode is not supported in Ascend.
|
|
648
|
-
|
|
649
736
|
Supported Platforms:
|
|
650
737
|
``Ascend``
|
|
651
738
|
|
|
@@ -670,472 +757,99 @@ def cummax(input, dim):
|
|
|
670
757
|
return ops.auto_generate.cummax(input, dim)
|
|
671
758
|
|
|
672
759
|
|
|
673
|
-
def
|
|
674
|
-
"""Convert sublist to label."""
|
|
675
|
-
if num == Ellipsis or ell_num and num == 52:
|
|
676
|
-
return '...'
|
|
677
|
-
if 0 <= num < 26:
|
|
678
|
-
return chr(num + ord('A'))
|
|
679
|
-
if 26 <= num < 52:
|
|
680
|
-
return chr(num + ord('a') - 26)
|
|
681
|
-
raise ValueError(f'For einsum, the number in sublist must be in range [0, 52), but got {num}')
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
def _einsum_convert_label_to_index(label):
|
|
685
|
-
"""Convert label to index."""
|
|
686
|
-
label_num = ord(label)
|
|
687
|
-
if ord('A') <= label_num <= ord('Z'):
|
|
688
|
-
return label_num - ord('A')
|
|
689
|
-
if ord('a') <= label_num <= ord('z'):
|
|
690
|
-
return label_num - ord('a') + 26
|
|
691
|
-
if label_num == ord('.'):
|
|
692
|
-
return 52
|
|
693
|
-
raise ValueError(f'For einsum, the label in equation must be in [a-zA-Z] or ., but got {label}')
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
def _einsum_convert_sublist(equation, *operands):
|
|
697
|
-
"""Convert the sublist to an equation operand if the received input is a sublist format."""
|
|
698
|
-
if isinstance(equation, Tensor):
|
|
699
|
-
equation_tmp = ''
|
|
700
|
-
for i, lst in enumerate(operands):
|
|
701
|
-
if i % 2 == 0:
|
|
702
|
-
for _, num in enumerate(lst):
|
|
703
|
-
equation_tmp += _einsum_convert_sublist_to_label(num)
|
|
704
|
-
if i in (len(operands) - 1, len(operands) - 2):
|
|
705
|
-
continue
|
|
706
|
-
equation_tmp += ','
|
|
707
|
-
if len(operands) % 2 == 0:
|
|
708
|
-
equation_tmp += '->'
|
|
709
|
-
for _, num in enumerate(operands[-1]):
|
|
710
|
-
equation_tmp += _einsum_convert_sublist_to_label(num)
|
|
711
|
-
operands_tmp = list([equation]) + list(operands[1:-1:2])
|
|
712
|
-
else:
|
|
713
|
-
operands_tmp = list([equation]) + list(operands[1::2])
|
|
714
|
-
equation = equation_tmp
|
|
715
|
-
operands = tuple(operands_tmp)
|
|
716
|
-
if len(operands) == 0: # pylint: disable=len-as-condition
|
|
717
|
-
raise ValueError("For einsum, the 'operands' must have at least one operand.")
|
|
718
|
-
return equation, operands
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
def _einsum_check_inputargs(equation, operands):
|
|
722
|
-
"""Check equation and operands."""
|
|
723
|
-
if not isinstance(equation, str):
|
|
724
|
-
raise TypeError(f"For einsum, 'equation' must be a str, but got {type(equation)}.")
|
|
725
|
-
for operand in operands:
|
|
726
|
-
if not isinstance(operand, Tensor):
|
|
727
|
-
raise TypeError(f"For einsum, members of 'operands' must be Tensor, but got {type(operand)}.")
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
@constexpr
|
|
731
|
-
def _einsum_parse_equation(equation):
|
|
732
|
-
"""Parse equation."""
|
|
733
|
-
l_equation = ''
|
|
734
|
-
r_equation = ''
|
|
735
|
-
equation = equation.replace(' ', '')
|
|
736
|
-
|
|
737
|
-
if '->' in equation:
|
|
738
|
-
l_equation, r_equation = equation.split('->', 1)
|
|
739
|
-
if l_equation == '':
|
|
740
|
-
raise ValueError('For einsum, equation must contain characters to the left fo the arrow.')
|
|
741
|
-
else:
|
|
742
|
-
l_equation = equation
|
|
743
|
-
|
|
744
|
-
if ',' in l_equation:
|
|
745
|
-
l_equationlst = l_equation.split(",")
|
|
746
|
-
else:
|
|
747
|
-
l_equationlst = [l_equation]
|
|
748
|
-
|
|
749
|
-
l_equationlst = []
|
|
750
|
-
|
|
751
|
-
for subequation in l_equation.split(','):
|
|
752
|
-
if '.' in subequation and ('...' not in subequation or subequation.count('.') != 3):
|
|
753
|
-
raise ValueError(f"For einsum, an ellipsis in the equation must include three continuous \'.\', "
|
|
754
|
-
f"and can only be found once.")
|
|
755
|
-
subequation_lst = [_einsum_convert_label_to_index(label) for label in subequation.replace('...', '.')]
|
|
756
|
-
l_equationlst.append(subequation_lst)
|
|
757
|
-
|
|
758
|
-
if "." in r_equation and ('...' not in r_equation or r_equation.count('.') != 3):
|
|
759
|
-
raise ValueError(f"For einsum, an ellipsis in the equation must include three continuous \'.\', "
|
|
760
|
-
f"and can only be found once.")
|
|
761
|
-
r_equationlst = [_einsum_convert_label_to_index(label) for label in r_equation.replace('...', '.')]
|
|
762
|
-
|
|
763
|
-
return l_equationlst, r_equationlst, ('->' in equation)
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
def _einsum_parse_labels(l_equationlst, operands):
|
|
767
|
-
"""Parse left script of equation."""
|
|
768
|
-
align_rank = 0
|
|
769
|
-
max_labels = 53
|
|
770
|
-
labels_count = [0] * max_labels
|
|
771
|
-
labels2dimlst = [None] * max_labels
|
|
772
|
-
|
|
773
|
-
if len(operands) != len(l_equationlst):
|
|
774
|
-
raise ValueError(f"For einsum, 'operands' is not equal to specified in the 'equation', "
|
|
775
|
-
f"but got {len(operands)} and {len(l_equationlst)}.")
|
|
776
|
-
|
|
777
|
-
for idx, sub_equ in enumerate(l_equationlst):
|
|
778
|
-
start_dim = 0
|
|
779
|
-
label_num = 0
|
|
780
|
-
operand_shape = list(operands[idx].shape)
|
|
781
|
-
for label in sub_equ:
|
|
782
|
-
label_num += 1
|
|
783
|
-
end_dim = start_dim + 1
|
|
784
|
-
|
|
785
|
-
# Label is ellipsis
|
|
786
|
-
if label == 52:
|
|
787
|
-
end_dim = len(operand_shape) - len(sub_equ) + label_num
|
|
788
|
-
if labels2dimlst[label] is None:
|
|
789
|
-
labels2dimlst[label] = operand_shape[start_dim:end_dim]
|
|
790
|
-
align_rank += (end_dim - start_dim)
|
|
791
|
-
else:
|
|
792
|
-
if labels2dimlst[label] != operand_shape[start_dim:end_dim]:
|
|
793
|
-
raise ValueError(f"For einsum, one label in 'equation' can only represent the same dimension "
|
|
794
|
-
f"in 'operands', but '{_einsum_convert_sublist_to_label(label, True)}' "
|
|
795
|
-
f"represented different dimensions.")
|
|
796
|
-
labels_count[label] += 1
|
|
797
|
-
start_dim = end_dim
|
|
798
|
-
if label_num != len(sub_equ) or start_dim != len(operand_shape):
|
|
799
|
-
raise ValueError(f"For einsum, the numbers of labels specified in the 'equation' does not match "
|
|
800
|
-
f"'operands[{idx}]'.")
|
|
801
|
-
return labels2dimlst, labels_count, align_rank
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
def _einsum_infer_output(r_equationlst, arrow_exist, labels2dimlst, labels_count):
|
|
805
|
-
"""Parse right script of equation and infer output shape."""
|
|
806
|
-
idx = 0
|
|
807
|
-
idle_idx = -1
|
|
808
|
-
output_shape = []
|
|
809
|
-
labels_perm_idx = [idle_idx] * 53
|
|
810
|
-
|
|
811
|
-
if arrow_exist:
|
|
812
|
-
for label in r_equationlst:
|
|
813
|
-
if labels_count[label] != 0:
|
|
814
|
-
output_shape += labels2dimlst[label]
|
|
815
|
-
if labels_perm_idx[label] != idle_idx:
|
|
816
|
-
raise ValueError(f"For einsum, '{_einsum_convert_sublist_to_label(label, True)}' or {label} in "
|
|
817
|
-
f"sublist format has appears more than once in output subscript.")
|
|
818
|
-
labels_perm_idx[label] = idx
|
|
819
|
-
idx += len(labels2dimlst[label])
|
|
820
|
-
else:
|
|
821
|
-
raise ValueError(f"For einsum, the label to the right of arrow in the 'equation' must appear on "
|
|
822
|
-
f"left, but '{_einsum_convert_sublist_to_label(label, True)}' does not.")
|
|
823
|
-
else:
|
|
824
|
-
if labels_count[52] != 0:
|
|
825
|
-
output_shape += labels2dimlst[52]
|
|
826
|
-
labels_perm_idx[52] = idx
|
|
827
|
-
idx += len(labels2dimlst[52])
|
|
828
|
-
for label, count in enumerate(labels_count):
|
|
829
|
-
if count == 1:
|
|
830
|
-
output_shape += labels2dimlst[label]
|
|
831
|
-
labels_perm_idx[label] = idx
|
|
832
|
-
idx += len(labels2dimlst[label])
|
|
833
|
-
|
|
834
|
-
for label, count in enumerate(labels_count):
|
|
835
|
-
if count != 0 and labels_perm_idx[label] == idle_idx:
|
|
836
|
-
labels_perm_idx[label] = idx
|
|
837
|
-
idx += 1
|
|
838
|
-
|
|
839
|
-
return output_shape, labels_perm_idx
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
def _einsum_adjust_operands(operands, l_equationlst, labels2dimlst, labels_perm_idx, align_rank):
|
|
843
|
-
"""Align operands to output as possible."""
|
|
844
|
-
# Unsqueeze miss dimensions to make all operands has same rank, compute diagonal if operand has same label.
|
|
845
|
-
# Then use _labels_perm_idx to transpose all operands to align dimensions with output.
|
|
846
|
-
adjust_operands = []
|
|
847
|
-
for idx, operand in enumerate(operands):
|
|
848
|
-
idle_dim = -1
|
|
849
|
-
align_axis = [idle_dim] * align_rank
|
|
850
|
-
label_dims = [idle_dim] * 53
|
|
851
|
-
dim = 0
|
|
852
|
-
|
|
853
|
-
for label in l_equationlst[idx]:
|
|
854
|
-
if label_dims[label] != idle_dim:
|
|
855
|
-
operand = ops.diagonal(operand, 0, label_dims[label], dim)
|
|
856
|
-
diag_perm = []
|
|
857
|
-
diag_dim = 0
|
|
858
|
-
for i in range(len(operand.shape)):
|
|
859
|
-
if i == label_dims[label]:
|
|
860
|
-
diag_perm.append(len(operand.shape) - 1)
|
|
861
|
-
else:
|
|
862
|
-
diag_perm.append(diag_dim)
|
|
863
|
-
diag_dim += 1
|
|
864
|
-
operand = permute(operand, tuple(diag_perm))
|
|
865
|
-
else:
|
|
866
|
-
label_dims[label] = dim
|
|
867
|
-
if label == 52:
|
|
868
|
-
for ell_idx in range(len(labels2dimlst[label])):
|
|
869
|
-
align_axis[labels_perm_idx[label] + ell_idx] = dim
|
|
870
|
-
dim += 1
|
|
871
|
-
else:
|
|
872
|
-
align_axis[labels_perm_idx[label]] = dim
|
|
873
|
-
dim += 1
|
|
874
|
-
if len(operand.shape) < align_rank:
|
|
875
|
-
for i, axis in enumerate(align_axis):
|
|
876
|
-
if axis == idle_dim:
|
|
877
|
-
align_axis[i] = dim
|
|
878
|
-
dim += 1
|
|
879
|
-
missing_dims = [1] * (align_rank - len(operand.shape))
|
|
880
|
-
operand_shape = list(operand.shape) + missing_dims
|
|
881
|
-
operand = reshape(operand, operand_shape)
|
|
882
|
-
operand = permute(operand, tuple(align_axis))
|
|
883
|
-
adjust_operands.append(operand)
|
|
884
|
-
return adjust_operands
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
def _einsum_find_dimlastop(align_rank, operands, adjust_operands):
|
|
888
|
-
"""Find dim last operand."""
|
|
889
|
-
dim_last_op = [0 for _ in range(align_rank)]
|
|
890
|
-
has_zero_dim = False
|
|
891
|
-
for dim in range(align_rank):
|
|
892
|
-
broadcast_dim = adjust_operands[0].shape[dim]
|
|
893
|
-
for idx in range(1, len(adjust_operands)):
|
|
894
|
-
other_dim = adjust_operands[idx].shape[dim]
|
|
895
|
-
if broadcast_dim != other_dim and broadcast_dim != 1 and other_dim != 1:
|
|
896
|
-
err_msg = "For einsum, operands do not broadcast after align to output [shapes :origin -> adjust]:"
|
|
897
|
-
for i in range(len(operands)):
|
|
898
|
-
err_msg += f" {operands[i].shape} -> {adjust_operands[i].shape}"
|
|
899
|
-
raise ValueError(err_msg)
|
|
900
|
-
if other_dim != 1:
|
|
901
|
-
dim_last_op[dim] = idx
|
|
902
|
-
broadcast_dim = other_dim
|
|
903
|
-
has_zero_dim = has_zero_dim or broadcast_dim == 0
|
|
904
|
-
return dim_last_op, has_zero_dim
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
def _einsum_multiplication(sum_dims, l_tensor, r_tensor):
|
|
908
|
-
"""Compute bmm for einsum."""
|
|
909
|
-
batch_dims = []
|
|
910
|
-
lonly_dims = []
|
|
911
|
-
ronly_dims = []
|
|
912
|
-
batch_size = 1
|
|
913
|
-
lonly_size = 1
|
|
914
|
-
ronly_size = 1
|
|
915
|
-
sum_size = 1
|
|
916
|
-
|
|
917
|
-
l_shape = l_tensor.shape
|
|
918
|
-
r_shape = r_tensor.shape
|
|
919
|
-
|
|
920
|
-
# Compute sum if dim is in sum_dims and get shapes for bmm
|
|
921
|
-
for i in range(len(l_shape)):
|
|
922
|
-
sum_l = l_shape[i] > 1
|
|
923
|
-
sum_r = r_shape[i] > 1
|
|
924
|
-
if i in sum_dims:
|
|
925
|
-
if sum_l and sum_r:
|
|
926
|
-
sum_size *= l_shape[i]
|
|
927
|
-
elif sum_l:
|
|
928
|
-
l_tensor = sum(l_tensor, i, True)
|
|
929
|
-
elif sum_r:
|
|
930
|
-
r_tensor = sum(r_tensor, i, True)
|
|
931
|
-
elif sum_l and sum_r:
|
|
932
|
-
batch_dims.append(i)
|
|
933
|
-
batch_size *= l_shape[i]
|
|
934
|
-
elif sum_l:
|
|
935
|
-
lonly_dims.append(i)
|
|
936
|
-
lonly_size *= l_shape[i]
|
|
937
|
-
else:
|
|
938
|
-
ronly_dims.append(i)
|
|
939
|
-
ronly_size *= r_shape[i]
|
|
940
|
-
|
|
941
|
-
# Compute the einsum bmm operators pipeline.
|
|
942
|
-
# The whole operators pipline is transpose(in) -> reshape(in) -> bmm(in) -> reshape(out) -> transpose(out).
|
|
943
|
-
l_reshape_shape = (batch_size, lonly_size, sum_size)
|
|
944
|
-
r_reshape_shape = (batch_size, sum_size, ronly_size)
|
|
945
|
-
|
|
946
|
-
out_reshape_shape = [l_shape[dim] for dim in batch_dims]
|
|
947
|
-
out_reshape_shape += [l_shape[dim] for dim in lonly_dims]
|
|
948
|
-
out_reshape_shape += [1 for _ in sum_dims]
|
|
949
|
-
out_reshape_shape += [r_shape[dim] for dim in ronly_dims]
|
|
950
|
-
|
|
951
|
-
l_perm_axis = batch_dims + lonly_dims + sum_dims + ronly_dims
|
|
952
|
-
r_perm_axis = batch_dims + sum_dims + ronly_dims + lonly_dims
|
|
953
|
-
out_perm_axis = [-1] * len(out_reshape_shape)
|
|
954
|
-
|
|
955
|
-
out_dim = 0
|
|
956
|
-
for idx in range(len(l_perm_axis)):
|
|
957
|
-
out_perm_axis[l_perm_axis[idx]] = out_dim
|
|
958
|
-
out_dim += 1
|
|
959
|
-
|
|
960
|
-
l_tensor = permute(l_tensor, tuple(l_perm_axis))
|
|
961
|
-
l_tensor = reshape(l_tensor, l_reshape_shape)
|
|
962
|
-
|
|
963
|
-
r_tensor = permute(r_tensor, tuple(r_perm_axis))
|
|
964
|
-
r_tensor = reshape(r_tensor, r_reshape_shape)
|
|
965
|
-
|
|
966
|
-
output = bmm(l_tensor, r_tensor)
|
|
967
|
-
output = reshape(output, out_reshape_shape)
|
|
968
|
-
output = permute(output, tuple(out_perm_axis))
|
|
969
|
-
|
|
970
|
-
output_origin_shape = output.shape
|
|
971
|
-
output_squeeze_shape = []
|
|
972
|
-
for dim in range(len(output_origin_shape)):
|
|
973
|
-
if dim not in sum_dims:
|
|
974
|
-
output_squeeze_shape.append(output_origin_shape[dim])
|
|
975
|
-
|
|
976
|
-
return reshape(output, output_squeeze_shape)
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
def _einsum_squeeze(operand, dim):
|
|
980
|
-
'''Will be replaced by mint.squeeze in the future'''
|
|
981
|
-
operand_shape = operand.shape
|
|
982
|
-
squeeze_shape = []
|
|
983
|
-
for idx in range(len(operand_shape)):
|
|
984
|
-
if idx != dim:
|
|
985
|
-
squeeze_shape.append(operand_shape[idx])
|
|
986
|
-
return reshape(operand, squeeze_shape)
|
|
987
|
-
|
|
988
|
-
|
|
989
|
-
def _einsum(equation, operands):
|
|
990
|
-
'''Einsum main process'''
|
|
991
|
-
_l_equationlst, _r_equationlst, _arrow_exist = _einsum_parse_equation(equation)
|
|
992
|
-
_labels2dimlst, _labels_count, _align_rank = _einsum_parse_labels(_l_equationlst, operands)
|
|
993
|
-
_output_shape, _labels_perm_idx = _einsum_infer_output(_r_equationlst, _arrow_exist, _labels2dimlst, _labels_count)
|
|
994
|
-
_output_rank = len(_output_shape)
|
|
995
|
-
|
|
996
|
-
_adjust_operands = _einsum_adjust_operands(operands, _l_equationlst, _labels2dimlst, _labels_perm_idx, _align_rank)
|
|
997
|
-
_dim_last_op, _has_zero_dim = _einsum_find_dimlastop(_align_rank, operands, _adjust_operands)
|
|
998
|
-
_result = _adjust_operands[0]
|
|
999
|
-
|
|
1000
|
-
# Fast path if operands has zero dim.
|
|
1001
|
-
if _has_zero_dim:
|
|
1002
|
-
return zeros(_output_shape, dtype=_result.dtype)
|
|
1003
|
-
|
|
1004
|
-
# Sum or squeeze dimensions that is 1 for all rest operands.
|
|
1005
|
-
_reduce_dim = _output_rank
|
|
1006
|
-
for dim in range(_output_rank, _align_rank):
|
|
1007
|
-
if _dim_last_op[dim] == 0:
|
|
1008
|
-
if _result.shape[_reduce_dim] == 1:
|
|
1009
|
-
_result = _einsum_squeeze(_result, _reduce_dim)
|
|
1010
|
-
else:
|
|
1011
|
-
_result = sum(_result, _reduce_dim)
|
|
1012
|
-
else:
|
|
1013
|
-
_reduce_dim += 1
|
|
1014
|
-
|
|
1015
|
-
# Compute multiplication if operands are more than two.
|
|
1016
|
-
for i in range(1, len(_adjust_operands)):
|
|
1017
|
-
operand = _adjust_operands[i]
|
|
1018
|
-
dim = _output_rank
|
|
1019
|
-
sum_dims = []
|
|
1020
|
-
for j in range(_output_rank, _align_rank):
|
|
1021
|
-
if _dim_last_op[j] < i:
|
|
1022
|
-
operand = _einsum_squeeze(operand, dim)
|
|
1023
|
-
elif _dim_last_op[j] == i:
|
|
1024
|
-
if _result.shape[dim] == 1:
|
|
1025
|
-
operand = sum(operand, dim)
|
|
1026
|
-
_result = _einsum_squeeze(_result, dim)
|
|
1027
|
-
else:
|
|
1028
|
-
sum_dims.append(dim)
|
|
1029
|
-
dim += 1
|
|
1030
|
-
else:
|
|
1031
|
-
dim += 1
|
|
1032
|
-
|
|
1033
|
-
if sum_dims == []:
|
|
1034
|
-
_result = mul(_result, operand)
|
|
1035
|
-
elif len(sum_dims) == len(_result.shape):
|
|
1036
|
-
_result = ops.auto_generate.dot(flatten(_result), flatten(operand))
|
|
1037
|
-
else:
|
|
1038
|
-
_result = _einsum_multiplication(sum_dims, _result, operand)
|
|
1039
|
-
|
|
1040
|
-
return _result
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
def einsum(equation, *operands):
|
|
760
|
+
def not_equal(input, other):
|
|
1044
761
|
r"""
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
762
|
+
Alias for :func:`mindspore.mint.ne` .
|
|
763
|
+
|
|
764
|
+
Supported Platforms:
|
|
765
|
+
``Ascend``
|
|
766
|
+
"""
|
|
767
|
+
return ne(input, other)
|
|
768
|
+
|
|
769
|
+
|
|
770
|
+
def softmax(input, dim, *, dtype=None):
|
|
771
|
+
r"""
|
|
772
|
+
Alias for :func:`mindspore.mint.nn.functional.softmax`.
|
|
773
|
+
|
|
774
|
+
Supported Platforms:
|
|
775
|
+
``Ascend``
|
|
776
|
+
"""
|
|
777
|
+
return softmax_ext(input, dim, dtype)
|
|
778
|
+
|
|
779
|
+
|
|
780
|
+
def equal(input, other):
|
|
781
|
+
r"""
|
|
782
|
+
Computes the equivalence between two tensors.
|
|
1048
783
|
|
|
1049
784
|
Note:
|
|
1050
|
-
|
|
1051
|
-
In this format, equation can be derived by the sublists which are made up of Python's Ellipsis and list of
|
|
1052
|
-
integers in [0, 52). Each operand is followed by a sublist and an output sublist is at the end.
|
|
785
|
+
`input` and `other` comply with the implicit type conversion rules to make the data types consistent.
|
|
1053
786
|
|
|
1054
787
|
.. warning::
|
|
1055
788
|
This is an experimental API that is subject to change or deletion.
|
|
1056
789
|
|
|
1057
790
|
Args:
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
The letters represent input tensor dimension, commas represent separate tensors, ellipsis indicates
|
|
1061
|
-
the tensor dimension that you do not care about, the left of the arrow indicates the input tensors,
|
|
1062
|
-
and the right of it indicates the desired output dimension.
|
|
1063
|
-
operands (Tensor): Input tensor used for calculation. The dtype of the tensor must be the same.
|
|
791
|
+
input (Tensor): The first input.
|
|
792
|
+
other (Tensor): The second input.
|
|
1064
793
|
|
|
1065
794
|
Returns:
|
|
1066
|
-
|
|
795
|
+
bool.
|
|
1067
796
|
|
|
1068
797
|
Raises:
|
|
1069
|
-
TypeError: If `
|
|
1070
|
-
ValueError: If the number in sublist is not in [0, 52) in sublist format.
|
|
798
|
+
TypeError: If `input` or `other` is not a Tensor.
|
|
1071
799
|
|
|
1072
800
|
Supported Platforms:
|
|
1073
801
|
``Ascend``
|
|
1074
802
|
|
|
1075
803
|
Examples:
|
|
1076
804
|
>>> import mindspore
|
|
1077
|
-
>>> import numpy as np
|
|
1078
805
|
>>> from mindspore import Tensor, mint
|
|
1079
|
-
>>> x = Tensor(
|
|
1080
|
-
>>>
|
|
1081
|
-
>>> output = mint.
|
|
1082
|
-
>>> print(output)
|
|
1083
|
-
[7.]
|
|
1084
|
-
>>> x = Tensor(np.array([1.0, 2.0, 4.0]), mindspore.float32)
|
|
1085
|
-
>>> y = Tensor(np.array([2.0, 4.0, 3.0]), mindspore.float32)
|
|
1086
|
-
>>> equation = "i,i->i"
|
|
1087
|
-
>>> output = mint.einsum(equation, x, y)
|
|
1088
|
-
>>> print(output)
|
|
1089
|
-
[ 2. 8. 12.]
|
|
1090
|
-
>>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
|
|
1091
|
-
>>> y = Tensor(np.array([[2.0, 3.0], [1.0, 2.0], [4.0, 5.0]]), mindspore.float32)
|
|
1092
|
-
>>> equation = "ij,jk->ik"
|
|
1093
|
-
>>> output = mint.einsum(equation, x, y)
|
|
1094
|
-
>>> print(output)
|
|
1095
|
-
[[16. 22.]
|
|
1096
|
-
[37. 52.]]
|
|
1097
|
-
>>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
|
|
1098
|
-
>>> equation = "ij->ji"
|
|
1099
|
-
>>> output = mint.einsum(equation, x)
|
|
1100
|
-
>>> print(output)
|
|
1101
|
-
[[1. 4.]
|
|
1102
|
-
[2. 5.]
|
|
1103
|
-
[3. 6.]]
|
|
1104
|
-
>>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
|
|
1105
|
-
>>> equation = "ij->j"
|
|
1106
|
-
>>> output = mint.einsum(equation, x)
|
|
1107
|
-
>>> print(output)
|
|
1108
|
-
[5. 7. 9.]
|
|
1109
|
-
>>> x = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), mindspore.float32)
|
|
1110
|
-
>>> equation = "...->"
|
|
1111
|
-
>>> output = mint.einsum(equation, x)
|
|
806
|
+
>>> x = Tensor([1, 2, 3], mindspore.int32)
|
|
807
|
+
>>> y = Tensor([1, 2, 4], mindspore.int32)
|
|
808
|
+
>>> output = mint.equal(x, y)
|
|
1112
809
|
>>> print(output)
|
|
1113
|
-
|
|
1114
|
-
>>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
|
|
1115
|
-
>>> y = Tensor(np.array([2.0, 4.0, 1.0]), mindspore.float32)
|
|
1116
|
-
>>> equation = "j,i->ji"
|
|
1117
|
-
>>> output = mint.einsum(equation, x, y)
|
|
1118
|
-
>>> print(output)
|
|
1119
|
-
[[ 2. 4. 1.]
|
|
1120
|
-
[ 4. 8. 2.]
|
|
1121
|
-
[ 6. 12. 3.]]
|
|
1122
|
-
>>> x = mindspore.Tensor([1, 2, 3, 4], mindspore.float32)
|
|
1123
|
-
>>> y = mindspore.Tensor([1, 2], mindspore.float32)
|
|
1124
|
-
>>> output = mint.einsum(x, [..., 1], y, [..., 2], [..., 1, 2])
|
|
1125
|
-
>>> print(output)
|
|
1126
|
-
[[1. 2.]
|
|
1127
|
-
[2. 4.]
|
|
1128
|
-
[3. 6.]
|
|
1129
|
-
[4. 8.]]
|
|
810
|
+
False
|
|
1130
811
|
"""
|
|
1131
|
-
|
|
1132
|
-
|
|
812
|
+
result = equal_ext_op(input, other)
|
|
813
|
+
return result.item()
|
|
814
|
+
|
|
815
|
+
|
|
816
|
+
def isfinite(input):
|
|
817
|
+
r"""
|
|
818
|
+
Determine which elements are finite for each position. If elements are not ``NaN`` , ``-INF`` , ``INF``,
|
|
819
|
+
they are finite.
|
|
820
|
+
|
|
821
|
+
.. math::
|
|
822
|
+
out_i = \begin{cases}
|
|
823
|
+
& \text{ if } input_{i} = \text{Finite},\ \ True \\
|
|
824
|
+
& \text{ if } input_{i} \ne \text{Finite},\ \ False
|
|
825
|
+
\end{cases}
|
|
826
|
+
|
|
827
|
+
Args:
|
|
828
|
+
input (Tensor): The input tensor.
|
|
1133
829
|
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
raise ValueError(f"For einsum, the element of 'operands' can't be dynamic shape or dynamic rank.")
|
|
830
|
+
Returns:
|
|
831
|
+
Tensor, has the same shape of input, and the dtype is bool.
|
|
1137
832
|
|
|
1138
|
-
|
|
833
|
+
Raises:
|
|
834
|
+
TypeError: If input is not a Tensor.
|
|
835
|
+
|
|
836
|
+
Supported Platforms:
|
|
837
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
838
|
+
|
|
839
|
+
Examples:
|
|
840
|
+
>>> import mindspore
|
|
841
|
+
>>> import numpy as np
|
|
842
|
+
>>> from mindspore import Tensor, mint
|
|
843
|
+
>>> x = Tensor(np.array([np.log(-1), 1, np.log(0)]), mindspore.float32)
|
|
844
|
+
>>> output = mint.isfinite(x)
|
|
845
|
+
>>> print(output)
|
|
846
|
+
[False True False]
|
|
847
|
+
>>> x = Tensor(2.1, mindspore.float64)
|
|
848
|
+
>>> output = mint.isfinite(x)
|
|
849
|
+
>>> print(output)
|
|
850
|
+
True
|
|
851
|
+
"""
|
|
852
|
+
return ops.auto_generate.isfinite(input)
|
|
1139
853
|
|
|
1140
854
|
|
|
1141
855
|
def item(input):
|
|
@@ -1178,6 +892,43 @@ def item(input):
|
|
|
1178
892
|
|
|
1179
893
|
def mean(input, dim=None, keepdim=False, *, dtype=None):
|
|
1180
894
|
r"""
|
|
895
|
+
mean(input, *, dtype=None) -> Tensor
|
|
896
|
+
|
|
897
|
+
Reduces all dimension of a tensor by averaging all elements.
|
|
898
|
+
|
|
899
|
+
Args:
|
|
900
|
+
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
901
|
+
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
902
|
+
|
|
903
|
+
Keyword Args:
|
|
904
|
+
dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None`` .
|
|
905
|
+
|
|
906
|
+
Returns:
|
|
907
|
+
Tensor.
|
|
908
|
+
|
|
909
|
+
Raises:
|
|
910
|
+
TypeError: If `input` is not a Tensor.
|
|
911
|
+
|
|
912
|
+
Supported Platforms:
|
|
913
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
914
|
+
|
|
915
|
+
Examples:
|
|
916
|
+
>>> import mindspore
|
|
917
|
+
>>> import numpy as np
|
|
918
|
+
>>> from mindspore import Tensor, mint
|
|
919
|
+
>>> x = Tensor(np.array([[[2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2]],
|
|
920
|
+
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
921
|
+
... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
|
|
922
|
+
... mindspore.float32)
|
|
923
|
+
>>> output = mint.mean(x)
|
|
924
|
+
>>> print(output)
|
|
925
|
+
5.0
|
|
926
|
+
>>> print(output.shape)
|
|
927
|
+
()
|
|
928
|
+
|
|
929
|
+
.. function:: mean(input, dim, keepdim=False, *, dtype=None) -> Tensor
|
|
930
|
+
:noindex:
|
|
931
|
+
|
|
1181
932
|
Reduces all dimension of a tensor by averaging all elements in the dimension, by default.
|
|
1182
933
|
And reduce a dimension of `input` along the specified `dim`. `keepdim`
|
|
1183
934
|
determines whether the dimensions of the output and input are the same.
|
|
@@ -1188,9 +939,8 @@ def mean(input, dim=None, keepdim=False, *, dtype=None):
|
|
|
1188
939
|
Args:
|
|
1189
940
|
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
1190
941
|
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
1191
|
-
dim (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce.
|
|
1192
|
-
|
|
1193
|
-
and the value range is [-r,r).
|
|
942
|
+
dim (Union[int, tuple(int), list(int), Tensor]): The dimensions to reduce.
|
|
943
|
+
Only constant value is allowed. Assume the rank of `input` is r, and the value range is [-r,r).
|
|
1194
944
|
keepdim (bool): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
1195
945
|
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
1196
946
|
|
|
@@ -1200,8 +950,6 @@ def mean(input, dim=None, keepdim=False, *, dtype=None):
|
|
|
1200
950
|
Returns:
|
|
1201
951
|
Tensor.
|
|
1202
952
|
|
|
1203
|
-
- If `dim` is ``None`` , and `keepdim` is ``False`` ,
|
|
1204
|
-
the output is a 0-D tensor representing the product of all elements in the input tensor.
|
|
1205
953
|
- If `dim` is int, set as 1, and `keepdim` is ``False`` ,
|
|
1206
954
|
the shape of output is :math:`(input_0, input_2, ..., input_R)`.
|
|
1207
955
|
- If `dim` is tuple(int) or list(int), set as (1, 2), and `keepdim` is ``False`` ,
|
|
@@ -1222,51 +970,57 @@ def mean(input, dim=None, keepdim=False, *, dtype=None):
|
|
|
1222
970
|
>>> import mindspore
|
|
1223
971
|
>>> import numpy as np
|
|
1224
972
|
>>> from mindspore import Tensor, mint
|
|
1225
|
-
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
|
|
1226
|
-
>>> output = mint.mean(x, 1, keepdim=True)
|
|
1227
|
-
>>> result = output.shape
|
|
1228
|
-
>>> print(result)
|
|
1229
|
-
(3, 1, 5, 6)
|
|
1230
|
-
>>> # case 1: Reduces a dimension by averaging all elements in the dimension.
|
|
1231
973
|
>>> x = Tensor(np.array([[[2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2], [2, 2, 2, 2, 2, 2]],
|
|
1232
974
|
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
1233
975
|
... [[6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8], [10, 10, 10, 10, 10, 10]]]),
|
|
1234
976
|
... mindspore.float32)
|
|
1235
|
-
>>> output = mint.mean(x)
|
|
1236
|
-
>>> print(output)
|
|
1237
|
-
5.0
|
|
1238
|
-
>>> print(output.shape)
|
|
1239
|
-
()
|
|
1240
|
-
>>> # case 2: Reduces a dimension along the axis 0
|
|
1241
977
|
>>> output = mint.mean(x, 0, True)
|
|
1242
978
|
>>> print(output)
|
|
1243
979
|
[[[4. 4. 4. 4. 4. 4.]
|
|
1244
980
|
[5. 5. 5. 5. 5. 5.]
|
|
1245
981
|
[6. 6. 6. 6. 6. 6.]]]
|
|
1246
|
-
>>> # case 3: Reduces a dimension along the axis 1
|
|
1247
|
-
>>> output = mint.mean(x, 1, True)
|
|
1248
|
-
>>> print(output)
|
|
1249
|
-
[[[2. 2. 2. 2. 2. 2.]]
|
|
1250
|
-
[[5. 5. 5. 5. 5. 5.]]
|
|
1251
|
-
[[8. 8. 8. 8. 8. 8.]]]
|
|
1252
|
-
>>> # case 4: Reduces a dimension along the axis 2
|
|
1253
|
-
>>> output = mint.mean(x, 2, True)
|
|
1254
|
-
>>> print(output)
|
|
1255
|
-
[[[ 2.]
|
|
1256
|
-
[ 2.]
|
|
1257
|
-
[ 2.]]
|
|
1258
|
-
[[ 4.]
|
|
1259
|
-
[ 5.]
|
|
1260
|
-
[ 6.]]
|
|
1261
|
-
[[ 6.]
|
|
1262
|
-
[ 8.]
|
|
1263
|
-
[10.]]]
|
|
1264
982
|
"""
|
|
1265
|
-
return ops.
|
|
983
|
+
return ops.auto_generate.mean_ext(input, dim, keepdim, dtype)
|
|
1266
984
|
|
|
1267
985
|
|
|
1268
986
|
def prod(input, dim=None, keepdim=False, *, dtype=None):
|
|
1269
987
|
r"""
|
|
988
|
+
prod(input, *, dtype=None) -> Tensor
|
|
989
|
+
|
|
990
|
+
Multiplying all elements of input.
|
|
991
|
+
|
|
992
|
+
Args:
|
|
993
|
+
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
994
|
+
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
995
|
+
|
|
996
|
+
Keyword Args:
|
|
997
|
+
dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None`` .
|
|
998
|
+
|
|
999
|
+
Returns:
|
|
1000
|
+
Tensor.
|
|
1001
|
+
|
|
1002
|
+
Raises:
|
|
1003
|
+
TypeError: If `input` is not a Tensor.
|
|
1004
|
+
|
|
1005
|
+
Supported Platforms:
|
|
1006
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1007
|
+
|
|
1008
|
+
Examples:
|
|
1009
|
+
>>> import mindspore
|
|
1010
|
+
>>> import numpy as np
|
|
1011
|
+
>>> from mindspore import Tensor, mint
|
|
1012
|
+
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
|
|
1013
|
+
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
1014
|
+
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
|
|
1015
|
+
>>> output = mint.prod(x)
|
|
1016
|
+
>>> print(output)
|
|
1017
|
+
2.2833798e+33
|
|
1018
|
+
>>> print(output.shape)
|
|
1019
|
+
()
|
|
1020
|
+
|
|
1021
|
+
.. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor
|
|
1022
|
+
:noindex:
|
|
1023
|
+
|
|
1270
1024
|
Reduces a dimension of a tensor by multiplying all elements in the dimension, by default. And also can
|
|
1271
1025
|
reduce a dimension of `input` along the `dim`. Determine whether the dimensions of the output and input are the
|
|
1272
1026
|
same by controlling `keepdim`.
|
|
@@ -1274,7 +1028,7 @@ def prod(input, dim=None, keepdim=False, *, dtype=None):
|
|
|
1274
1028
|
Args:
|
|
1275
1029
|
input (Tensor[Number]): The input tensor. The dtype of the tensor to be reduced is number.
|
|
1276
1030
|
:math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
1277
|
-
dim (int): The dimensions to reduce.
|
|
1031
|
+
dim (int): The dimensions to reduce. Only constant value is allowed.
|
|
1278
1032
|
Assume the rank of `x` is r, and the value range is [-r,r).
|
|
1279
1033
|
keepdim (bool): If ``True`` , keep these reduced dimensions and the length is 1.
|
|
1280
1034
|
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
@@ -1285,8 +1039,6 @@ def prod(input, dim=None, keepdim=False, *, dtype=None):
|
|
|
1285
1039
|
Returns:
|
|
1286
1040
|
Tensor.
|
|
1287
1041
|
|
|
1288
|
-
- If `dim` is ``None`` , and `keepdim` is ``False`` ,
|
|
1289
|
-
the output is a 0-D tensor representing the product of all elements in the input tensor.
|
|
1290
1042
|
- If `dim` is int, set as 1, and `keepdim` is ``False`` ,
|
|
1291
1043
|
the shape of output is :math:`(input_0, input_2, ..., input_R)`.
|
|
1292
1044
|
|
|
@@ -1303,46 +1055,112 @@ def prod(input, dim=None, keepdim=False, *, dtype=None):
|
|
|
1303
1055
|
>>> import mindspore
|
|
1304
1056
|
>>> import numpy as np
|
|
1305
1057
|
>>> from mindspore import Tensor, mint
|
|
1306
|
-
>>> x = Tensor(np.random.randn(3, 4, 5, 6).astype(np.float32))
|
|
1307
|
-
>>> output = mint.prod(x, 1, keepdim=True)
|
|
1308
|
-
>>> result = output.shape
|
|
1309
|
-
>>> print(result)
|
|
1310
|
-
(3, 1, 5, 6)
|
|
1311
|
-
>>> # case 1: Reduces a dimension by multiplying all elements in the dimension.
|
|
1312
1058
|
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
|
|
1313
1059
|
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
1314
1060
|
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mindspore.float32)
|
|
1315
|
-
>>> output = mint.prod(x)
|
|
1316
|
-
>>> print(output)
|
|
1317
|
-
2.2833798e+33
|
|
1318
|
-
>>> print(output.shape)
|
|
1319
|
-
()
|
|
1320
|
-
>>> # case 2: Reduces a dimension along axis 0.
|
|
1321
1061
|
>>> output = mint.prod(x, 0, True)
|
|
1322
1062
|
>>> print(output)
|
|
1323
1063
|
[[[ 28. 28. 28. 28. 28. 28.]
|
|
1324
1064
|
[ 80. 80. 80. 80. 80. 80.]
|
|
1325
1065
|
[162. 162. 162. 162. 162. 162.]]]
|
|
1326
|
-
>>> # case 3: Reduces a dimension along axis 1.
|
|
1327
|
-
>>> output = mint.prod(x, 1, True)
|
|
1328
|
-
>>> print(output)
|
|
1329
|
-
[[[ 6. 6. 6. 6. 6. 6.]]
|
|
1330
|
-
[[120. 120. 120. 120. 120. 120.]]
|
|
1331
|
-
[[504. 504. 504. 504. 504. 504.]]]
|
|
1332
|
-
>>> # case 4: Reduces a dimension along axis 2.
|
|
1333
|
-
>>> output = mint.prod(x, 2, True)
|
|
1334
|
-
>>> print(output)
|
|
1335
|
-
[[[1.00000e+00]
|
|
1336
|
-
[6.40000e+01]
|
|
1337
|
-
[7.29000e+02]]
|
|
1338
|
-
[[4.09600e+03]
|
|
1339
|
-
[1.56250e+04]
|
|
1340
|
-
[4.66560e+04]]
|
|
1341
|
-
[[1.17649e+05]
|
|
1342
|
-
[2.62144e+05]
|
|
1343
|
-
[5.31441e+05]]]
|
|
1344
1066
|
"""
|
|
1345
|
-
return ops.auto_generate.prod_ext(input,
|
|
1067
|
+
return ops.auto_generate.prod_ext(input, dim, keepdim, dtype)
|
|
1068
|
+
|
|
1069
|
+
|
|
1070
|
+
def sum(input, dim=None, keepdim=False, *, dtype=None):
|
|
1071
|
+
r'''
|
|
1072
|
+
sum(input, *, dtype=None) -> Tensor
|
|
1073
|
+
|
|
1074
|
+
Calculate sum of all elements in Tensor.
|
|
1075
|
+
|
|
1076
|
+
Args:
|
|
1077
|
+
input (Tensor): The input tensor.
|
|
1078
|
+
|
|
1079
|
+
Keyword Args:
|
|
1080
|
+
dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None`` .
|
|
1081
|
+
|
|
1082
|
+
Returns:
|
|
1083
|
+
A Tensor, sum of all elements in `input`.
|
|
1084
|
+
|
|
1085
|
+
Raises:
|
|
1086
|
+
TypeError: If `input` is not a Tensor.
|
|
1087
|
+
|
|
1088
|
+
Supported Platforms:
|
|
1089
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1090
|
+
|
|
1091
|
+
Examples:
|
|
1092
|
+
>>> import mindspore
|
|
1093
|
+
>>> import numpy as np
|
|
1094
|
+
>>> from mindspore import Tensor, mint
|
|
1095
|
+
>>> from mindspore import dtype as mstype
|
|
1096
|
+
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
|
|
1097
|
+
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
1098
|
+
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mstype.float32)
|
|
1099
|
+
>>> out = mint.sum(x)
|
|
1100
|
+
>>> print(out)
|
|
1101
|
+
270.0
|
|
1102
|
+
|
|
1103
|
+
.. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor
|
|
1104
|
+
:noindex:
|
|
1105
|
+
|
|
1106
|
+
Calculate sum of Tensor elements over a given dim.
|
|
1107
|
+
|
|
1108
|
+
Note:
|
|
1109
|
+
The `dim` with tensor type is only used for compatibility with older versions and is not recommended.
|
|
1110
|
+
|
|
1111
|
+
Args:
|
|
1112
|
+
input (Tensor): The input tensor.
|
|
1113
|
+
dim (Union[int, tuple(int), list(int), Tensor]): Dimensions along which a sum is performed.
|
|
1114
|
+
If the `dim` is a tuple or list of ints, a sum is performed on all the dimensions specified in the tuple.
|
|
1115
|
+
Must be in the range :math:`[-input.ndim, input.ndim)` .
|
|
1116
|
+
keepdim (bool): Whether the output tensor has `dim` retained or not.
|
|
1117
|
+
If ``True`` , keep these reduced dimensions and the length is 1.
|
|
1118
|
+
If ``False`` , don't keep these dimensions. Default: ``False`` .
|
|
1119
|
+
|
|
1120
|
+
Keyword Args:
|
|
1121
|
+
dtype (:class:`mindspore.dtype`, optional): The desired data type of returned Tensor. Default: ``None`` .
|
|
1122
|
+
|
|
1123
|
+
Returns:
|
|
1124
|
+
A Tensor, sum of elements over a given `dim` in `input`.
|
|
1125
|
+
|
|
1126
|
+
Raises:
|
|
1127
|
+
TypeError: If `input` is not a Tensor.
|
|
1128
|
+
TypeError: If `dim` is not an int, tulpe(int), list(int) or Tensor.
|
|
1129
|
+
ValueError: If `dim` is not in the range :math:`[-input.ndim, input.ndim)` .
|
|
1130
|
+
TypeError: If `keepdim` is not a bool.
|
|
1131
|
+
|
|
1132
|
+
Supported Platforms:
|
|
1133
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1134
|
+
|
|
1135
|
+
Examples:
|
|
1136
|
+
>>> import mindspore
|
|
1137
|
+
>>> import numpy as np
|
|
1138
|
+
>>> from mindspore import Tensor, mint
|
|
1139
|
+
>>> from mindspore import dtype as mstype
|
|
1140
|
+
>>> x = Tensor(np.array([[[1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2], [3, 3, 3, 3, 3, 3]],
|
|
1141
|
+
... [[4, 4, 4, 4, 4, 4], [5, 5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]],
|
|
1142
|
+
... [[7, 7, 7, 7, 7, 7], [8, 8, 8, 8, 8, 8], [9, 9, 9, 9, 9, 9]]]), mstype.float32)
|
|
1143
|
+
>>> out = mint.sum(x)
|
|
1144
|
+
>>> print(out)
|
|
1145
|
+
270.0
|
|
1146
|
+
>>> out = mint.sum(x, dim=2)
|
|
1147
|
+
>>> print(out)
|
|
1148
|
+
[[ 6. 12. 18.]
|
|
1149
|
+
[24. 30. 36.]
|
|
1150
|
+
[42. 48. 54.]]
|
|
1151
|
+
>>> out = mint.sum(x, dim=2, keepdim=True)
|
|
1152
|
+
>>> print(out)
|
|
1153
|
+
[[[ 6.]
|
|
1154
|
+
[12.]
|
|
1155
|
+
[18.]]
|
|
1156
|
+
[[24.]
|
|
1157
|
+
[30.]
|
|
1158
|
+
[36.]]
|
|
1159
|
+
[[42.]
|
|
1160
|
+
[48.]
|
|
1161
|
+
[54.]]]
|
|
1162
|
+
'''
|
|
1163
|
+
return ops.auto_generate.sum_ext(input, dim, keepdim, dtype)
|
|
1346
1164
|
|
|
1347
1165
|
|
|
1348
1166
|
def ones(size, *, dtype=None):
|
|
@@ -1365,7 +1183,7 @@ def ones(size, *, dtype=None):
|
|
|
1365
1183
|
Tensor, whose dtype and size are defined by input.
|
|
1366
1184
|
|
|
1367
1185
|
Raises:
|
|
1368
|
-
TypeError: If `size` is neither an int nor
|
|
1186
|
+
TypeError: If `size` is neither an int nor a tuple/list/Tensor of int.
|
|
1369
1187
|
|
|
1370
1188
|
Supported Platforms:
|
|
1371
1189
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1429,7 +1247,7 @@ def split(tensor, split_size_or_sections, dim=0):
|
|
|
1429
1247
|
if `tensor.shape[dim]` is not divisible by `split_size_or_sections`.
|
|
1430
1248
|
If `split_size_or_sections` is a list type, then `tensor` will be split into len(split_size_or_sections)
|
|
1431
1249
|
chunks with sizes `split_size_or_sections` along the given `dim`.
|
|
1432
|
-
dim (int): The dim along which to split. Default: ``0`` .
|
|
1250
|
+
dim (int, optional): The dim along which to split. Default: ``0`` .
|
|
1433
1251
|
|
|
1434
1252
|
Returns:
|
|
1435
1253
|
A tuple of sub-tensors.
|
|
@@ -1437,10 +1255,10 @@ def split(tensor, split_size_or_sections, dim=0):
|
|
|
1437
1255
|
Raises:
|
|
1438
1256
|
TypeError: If argument `tensor` is not Tensor.
|
|
1439
1257
|
TypeError: If argument `dim` is not int.
|
|
1440
|
-
ValueError: If argument `dim` is out of range of
|
|
1258
|
+
ValueError: If argument `dim` is out of range of [-tensor.ndim, tensor.ndim).
|
|
1441
1259
|
TypeError: If each element in `split_size_or_sections` is not integer.
|
|
1442
1260
|
TypeError: If argument `split_size_or_sections` is not int, tuple(int) or list(int).
|
|
1443
|
-
ValueError: The sum of `split_size_or_sections` is not equal to
|
|
1261
|
+
ValueError: The sum of `split_size_or_sections` is not equal to tensor.shape[dim].
|
|
1444
1262
|
|
|
1445
1263
|
Supported Platforms:
|
|
1446
1264
|
``Ascend``
|
|
@@ -1490,67 +1308,65 @@ def sqrt(input):
|
|
|
1490
1308
|
return ops.auto_generate.sqrt(input)
|
|
1491
1309
|
|
|
1492
1310
|
|
|
1493
|
-
def
|
|
1311
|
+
def squeeze(input, dim):
|
|
1494
1312
|
r"""
|
|
1495
|
-
|
|
1313
|
+
Return the Tensor after deleting the dimension of size 1 in the specified `dim`.
|
|
1496
1314
|
|
|
1497
|
-
|
|
1498
|
-
|
|
1499
|
-
|
|
1315
|
+
If :math:`dim=()`, it will remove all the dimensions of size 1.
|
|
1316
|
+
If `dim` is specified, it will remove the dimensions of size 1 in the given `dim`.
|
|
1317
|
+
For example, if the dimension is not specified :math:`dim=()`, input shape is (A, 1, B, C, 1, D),
|
|
1318
|
+
then the shape of the output Tensor is (A, B, C, D). If the dimension is specified, the squeeze operation
|
|
1319
|
+
is only performed in the specified dimension. If input shape is (A, 1, B), when :math:`dim=0` or :math:`dim=2`,
|
|
1320
|
+
the input tensor is not changed, while when :math:`dim=1`, the input tensor shape is changed to (A, B).
|
|
1500
1321
|
|
|
1501
1322
|
Note:
|
|
1502
|
-
-
|
|
1503
|
-
|
|
1504
|
-
- The
|
|
1505
|
-
|
|
1323
|
+
- Please note that in dynamic graph mode, the output Tensor will share data with the input Tensor,
|
|
1324
|
+
and there is no Tensor data copy process.
|
|
1325
|
+
- The dimension index starts at 0 and must be in the range `[-input.ndim, input.ndim]`.
|
|
1326
|
+
- In GE mode, only support remove dimensions of size 1 from the shape of input tensor.
|
|
1506
1327
|
|
|
1507
|
-
|
|
1508
|
-
|
|
1509
|
-
a bool or a tensor whose data type is
|
|
1510
|
-
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
1511
|
-
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
1512
|
-
other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
|
|
1513
|
-
a bool or a tensor whose data type is
|
|
1514
|
-
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
1515
|
-
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
1328
|
+
.. warning::
|
|
1329
|
+
This is an experimental API that is subject to change or deletion.
|
|
1516
1330
|
|
|
1517
|
-
|
|
1518
|
-
|
|
1331
|
+
Args:
|
|
1332
|
+
input (Tensor): Used to calculate Squeeze. The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
1333
|
+
dim (Union[int, tuple(int)]): Specifies the dimension indexes of shape to be removed, which will
|
|
1334
|
+
remove all the dimensions of size 1 in the given dim parameter. If specified, it must be int32 or int64.
|
|
1519
1335
|
|
|
1520
1336
|
Returns:
|
|
1521
|
-
Tensor
|
|
1522
|
-
and the data type is the one with higher precision or higher digits among the two inputs and alpha.
|
|
1337
|
+
Tensor, the shape of tensor is :math:`(x_1, x_2, ..., x_S)`.
|
|
1523
1338
|
|
|
1524
1339
|
Raises:
|
|
1525
|
-
TypeError: If
|
|
1526
|
-
TypeError: If `
|
|
1527
|
-
TypeError: If `
|
|
1340
|
+
TypeError: If `input` is not a tensor.
|
|
1341
|
+
TypeError: If `dim` is not an int, tuple.
|
|
1342
|
+
TypeError: If `dim` is a tuple whose elements are not all int.
|
|
1528
1343
|
|
|
1529
1344
|
Supported Platforms:
|
|
1530
1345
|
``Ascend`` ``GPU`` ``CPU``
|
|
1531
1346
|
|
|
1532
1347
|
Examples:
|
|
1533
|
-
>>> import numpy as np
|
|
1534
1348
|
>>> import mindspore
|
|
1535
|
-
>>>
|
|
1536
|
-
>>> from mindspore import mint
|
|
1537
|
-
>>>
|
|
1538
|
-
>>>
|
|
1539
|
-
>>> alpha = 0.5
|
|
1540
|
-
>>> output = mint.sub(x, y, alpha=alpha)
|
|
1349
|
+
>>> import numpy as np
|
|
1350
|
+
>>> from mindspore import Tensor, mint
|
|
1351
|
+
>>> input = Tensor(np.ones(shape=[3, 2, 1]), mindspore.float32)
|
|
1352
|
+
>>> output = mint.squeeze(input, 2)
|
|
1541
1353
|
>>> print(output)
|
|
1542
|
-
[
|
|
1543
|
-
|
|
1544
|
-
|
|
1545
|
-
>>> print(output.dtype)
|
|
1546
|
-
Float32
|
|
1354
|
+
[[1. 1.]
|
|
1355
|
+
[1. 1.]
|
|
1356
|
+
[1. 1.]]
|
|
1547
1357
|
"""
|
|
1548
|
-
return
|
|
1358
|
+
return squeeze_impl(input, dim)
|
|
1549
1359
|
|
|
1550
1360
|
|
|
1551
1361
|
def swapaxes(input, axis0, axis1):
|
|
1552
1362
|
'''
|
|
1553
|
-
|
|
1363
|
+
Alias for :func:`mindspore.mint.transpose` . The `input` corresponds to the `input` in the reference interface,
|
|
1364
|
+
and the parameters `axis0` and `axis1` correspond to `dim0` and `dim1` in the reference interface respectively.
|
|
1365
|
+
|
|
1366
|
+
For more details, see :func:`mindspore.mint.transpose` .
|
|
1367
|
+
|
|
1368
|
+
.. warning::
|
|
1369
|
+
This is an experimental API that is subject to change or deletion.
|
|
1554
1370
|
|
|
1555
1371
|
Examples:
|
|
1556
1372
|
>>> import numpy as np
|
|
@@ -1564,6 +1380,67 @@ def swapaxes(input, axis0, axis1):
|
|
|
1564
1380
|
return transpose(input, axis0, axis1)
|
|
1565
1381
|
|
|
1566
1382
|
|
|
1383
|
+
def unique_consecutive(input, return_inverse=False, return_counts=False, dim=None):
|
|
1384
|
+
r"""
|
|
1385
|
+
Returns the elements that are unique in each consecutive group of equivalent elements in the input tensor.
|
|
1386
|
+
|
|
1387
|
+
When `return_inverse=True` , it returns a tensor containing the indices of the elements in the input tensor
|
|
1388
|
+
within the output tensor.
|
|
1389
|
+
|
|
1390
|
+
When `return_counts=True` , it returns a tensor representing the number of occurrences of each output element
|
|
1391
|
+
in the input.
|
|
1392
|
+
|
|
1393
|
+
.. warning::
|
|
1394
|
+
This is an experimental API that is subject to change or deletion.
|
|
1395
|
+
|
|
1396
|
+
Args:
|
|
1397
|
+
input (Tensor): The input tensor.
|
|
1398
|
+
return_inverse (bool, optional): Whether to return the index of where the element in the original input
|
|
1399
|
+
maps to the position in the output. Default: ``False`` .
|
|
1400
|
+
return_counts (bool, optional): Whether to return the counts of each unique element. Default: ``False`` .
|
|
1401
|
+
dim (int, optional): The dimension to apply unique. If ``None`` , the unique of the flattened input is
|
|
1402
|
+
returned. If the dimension is specified, it must be int32 or int64. Default: ``None`` .
|
|
1403
|
+
|
|
1404
|
+
Returns:
|
|
1405
|
+
A tensor or a tuple of tensors containing tensor objects (`output`, `inverse_indices`, `counts`).
|
|
1406
|
+
|
|
1407
|
+
- **output** (Tensor): the output tensor has the same type as `input` and represents the output list of
|
|
1408
|
+
unique scalar elements.
|
|
1409
|
+
- **inverse_indices** (Tensor, optional): if `return_inverse` is True, there will be an additional returned
|
|
1410
|
+
tensor `inverse_indices`. `inverse_indices` has the same shape as `input` and represents the index of where
|
|
1411
|
+
the element in the original input maps to the position in the output.
|
|
1412
|
+
- **counts** (Tensor, optional): if `return_counts` is True, there will be an additional returned tensor
|
|
1413
|
+
`counts`. `counts` has the same shape as `output` or `output.shape[dim]` if dim was specified and represents
|
|
1414
|
+
the number of occurrences for each unique value or tensor.
|
|
1415
|
+
|
|
1416
|
+
Raises:
|
|
1417
|
+
TypeError: If `input` is not a Tensor.
|
|
1418
|
+
TypeError: If dtype of `input` is not supported.
|
|
1419
|
+
TypeError: If `return_inverse` is not a bool.
|
|
1420
|
+
TypeError: If `return_counts` is not a bool.
|
|
1421
|
+
TypeError: If `dim` is not an int.
|
|
1422
|
+
ValueError: If `dim` is not in the range of :math:`[-ndim, ndim-1]`.
|
|
1423
|
+
|
|
1424
|
+
Supported Platforms:
|
|
1425
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1426
|
+
|
|
1427
|
+
Examples:
|
|
1428
|
+
>>> import numpy as np
|
|
1429
|
+
>>> from mindspore import Tensor, mint
|
|
1430
|
+
>>> from mindspore import dtype as mstype
|
|
1431
|
+
>>> x = Tensor(np.array([1, 1, 2, 2, 3, 1, 1, 2]), mstype.int64)
|
|
1432
|
+
>>> output, inverse_indices, counts = mint.unique_consecutive(x, True, True, None)
|
|
1433
|
+
>>> print(output)
|
|
1434
|
+
[1 2 3 1 2]
|
|
1435
|
+
>>> print(inverse_indices)
|
|
1436
|
+
[0 0 1 1 2 3 3 4]
|
|
1437
|
+
>>> print(counts)
|
|
1438
|
+
[2 2 1 2 1]
|
|
1439
|
+
"""
|
|
1440
|
+
|
|
1441
|
+
return ops.function.array_func.unique_consecutive(input, return_inverse, return_counts, dim)
|
|
1442
|
+
|
|
1443
|
+
|
|
1567
1444
|
def zeros(size, *, dtype=None):
|
|
1568
1445
|
"""
|
|
1569
1446
|
Creates a tensor filled with 0 with shape described by `size` and fills it with value 0 in type of `dtype`.
|
|
@@ -1581,7 +1458,7 @@ def zeros(size, *, dtype=None):
|
|
|
1581
1458
|
Tensor, whose dtype and size are defined by input.
|
|
1582
1459
|
|
|
1583
1460
|
Raises:
|
|
1584
|
-
TypeError: If `size` is neither an int nor
|
|
1461
|
+
TypeError: If `size` is neither an int nor a tuple/list/Tensor of int.
|
|
1585
1462
|
|
|
1586
1463
|
Supported Platforms:
|
|
1587
1464
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -1638,9 +1515,9 @@ def scatter(input, dim, index, src):
|
|
|
1638
1515
|
|
|
1639
1516
|
Raises:
|
|
1640
1517
|
TypeError: If `index` is neither int32 nor int64.
|
|
1641
|
-
ValueError: If rank of any of `input` , `index` and `src` less than 1.
|
|
1518
|
+
ValueError: If rank of any of `input` , `index` and `src` is less than 1.
|
|
1642
1519
|
ValueError: If the rank of `src` is not equal to the rank of `input` .
|
|
1643
|
-
TypeError: If the data
|
|
1520
|
+
TypeError: If the data types of `input` and `src` have different dtypes.
|
|
1644
1521
|
RuntimeError: If `index` has negative elements.
|
|
1645
1522
|
|
|
1646
1523
|
Supported Platforms:
|
|
@@ -1680,15 +1557,69 @@ def scatter(input, dim, index, src):
|
|
|
1680
1557
|
return ops.function.array_func.scatter(input, dim, index, src)
|
|
1681
1558
|
|
|
1682
1559
|
|
|
1560
|
+
def cdist(x1, x2, p=2.0, compute_mode='use_mm_for_euclid_dist_if_necessary'):
|
|
1561
|
+
"""
|
|
1562
|
+
Computes p-norm distance between each pair of row vectors of two input Tensors.
|
|
1563
|
+
|
|
1564
|
+
.. warning::
|
|
1565
|
+
This is an experimental optimizer API that is subject to change.
|
|
1566
|
+
|
|
1567
|
+
Note:
|
|
1568
|
+
On Ascend, the supported dtypes are float16 and float32.
|
|
1569
|
+
|
|
1570
|
+
Args:
|
|
1571
|
+
x1 (Tensor): Input tensor of shape :math:`(B, P, M)`.
|
|
1572
|
+
Letter :math:`B` represents 0 or positive int number.
|
|
1573
|
+
When :math:`B` is equal to 0, it means this dimension can be ignored,
|
|
1574
|
+
i.e. shape of the tensor is :math:`(P, M)`.
|
|
1575
|
+
x2 (Tensor): Input tensor of shape :math:`(B, R, M)`, has the same dtype as `x1`.
|
|
1576
|
+
p (float, optional): P value for the p-norm distance to calculate between each
|
|
1577
|
+
vector pair, P >= 0. Default: ``2.0`` .
|
|
1578
|
+
compute_mode (string, optional): Specify the cumpute mode. Setting this parameter currently has no effect.
|
|
1579
|
+
Default: ``'use_mm_for_euclid_dist_if_necessary'`` .
|
|
1580
|
+
|
|
1581
|
+
Returns:
|
|
1582
|
+
Tensor, p-norm distance, has the same dtype as `x1`, its shape is :math:`(B, P, R)`.
|
|
1583
|
+
|
|
1584
|
+
Raises:
|
|
1585
|
+
TypeError: If `x1` or `x2` is not Tensor.
|
|
1586
|
+
TypeError: If dtype of `x1` or `x2` is not listed in the "Note" above.
|
|
1587
|
+
TypeError: If `p` is not float32.
|
|
1588
|
+
ValueError: If `p` is negative.
|
|
1589
|
+
ValueError: If dimension of `x1` is not the same as `x2`.
|
|
1590
|
+
ValueError: If dimension of `x1` or `x2` is neither 2 nor 3.
|
|
1591
|
+
ValueError: If the batch dim of `x1` and `x2` can not broadcast.
|
|
1592
|
+
ValueError: If the number of columns of `x1` is not the same as that of `x2`.
|
|
1593
|
+
|
|
1594
|
+
Supported Platforms:
|
|
1595
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
1596
|
+
|
|
1597
|
+
Examples:
|
|
1598
|
+
>>> import numpy as np
|
|
1599
|
+
>>> from mindspore import Tensor, ops
|
|
1600
|
+
>>> x = Tensor(np.array([[[1.0, 1.0], [2.0, 2.0]]]).astype(np.float32))
|
|
1601
|
+
>>> y = Tensor(np.array([[[3.0, 3.0], [3.0, 3.0]]]).astype(np.float32))
|
|
1602
|
+
>>> output = ops.cdist(x, y, 2.0)
|
|
1603
|
+
>>> print(output)
|
|
1604
|
+
[[[2.8284273 2.8284273]
|
|
1605
|
+
[1.4142137 1.4142137]]]
|
|
1606
|
+
"""
|
|
1607
|
+
return cdist_(x1, x2, p)
|
|
1608
|
+
|
|
1609
|
+
|
|
1683
1610
|
__all__ = [
|
|
1684
1611
|
'conv2d',
|
|
1685
1612
|
'full',
|
|
1686
1613
|
'ones_like',
|
|
1687
1614
|
'zeros_like',
|
|
1688
1615
|
'abs',
|
|
1616
|
+
'clone',
|
|
1689
1617
|
'erf',
|
|
1690
1618
|
'where',
|
|
1691
1619
|
'isclose',
|
|
1620
|
+
'empty',
|
|
1621
|
+
'empty_like',
|
|
1622
|
+
'full_like',
|
|
1692
1623
|
# 1
|
|
1693
1624
|
'div',
|
|
1694
1625
|
'divide',
|
|
@@ -1699,6 +1630,7 @@ __all__ = [
|
|
|
1699
1630
|
# 3
|
|
1700
1631
|
'clamp',
|
|
1701
1632
|
'xlogy',
|
|
1633
|
+
'fmod',
|
|
1702
1634
|
# 4
|
|
1703
1635
|
'sinc',
|
|
1704
1636
|
'sinh',
|
|
@@ -1712,10 +1644,14 @@ __all__ = [
|
|
|
1712
1644
|
# 8
|
|
1713
1645
|
'transpose',
|
|
1714
1646
|
'swapaxes',
|
|
1647
|
+
"batch_norm_elemt",
|
|
1648
|
+
"batch_norm_gather_stats_with_counts",
|
|
1649
|
+
"batch_norm_stats",
|
|
1715
1650
|
# 9
|
|
1716
|
-
|
|
1651
|
+
'squeeze',
|
|
1717
1652
|
# 10
|
|
1718
1653
|
'ne',
|
|
1654
|
+
'not_equal',
|
|
1719
1655
|
# 11
|
|
1720
1656
|
'unsqueeze',
|
|
1721
1657
|
# 12
|
|
@@ -1723,7 +1659,7 @@ __all__ = [
|
|
|
1723
1659
|
# 13
|
|
1724
1660
|
"flip",
|
|
1725
1661
|
# 14
|
|
1726
|
-
|
|
1662
|
+
'mv',
|
|
1727
1663
|
# 15
|
|
1728
1664
|
'flatten',
|
|
1729
1665
|
# 16
|
|
@@ -1740,11 +1676,11 @@ __all__ = [
|
|
|
1740
1676
|
# 21
|
|
1741
1677
|
'mul',
|
|
1742
1678
|
# 22
|
|
1743
|
-
|
|
1679
|
+
'cumprod',
|
|
1744
1680
|
# 23
|
|
1745
|
-
|
|
1681
|
+
'exp2',
|
|
1746
1682
|
# 24
|
|
1747
|
-
|
|
1683
|
+
'cdist',
|
|
1748
1684
|
# 25
|
|
1749
1685
|
'greater',
|
|
1750
1686
|
'gt',
|
|
@@ -1804,7 +1740,7 @@ __all__ = [
|
|
|
1804
1740
|
# 51
|
|
1805
1741
|
'permute',
|
|
1806
1742
|
# 52
|
|
1807
|
-
|
|
1743
|
+
'addcdiv',
|
|
1808
1744
|
# 53
|
|
1809
1745
|
|
|
1810
1746
|
# 54
|
|
@@ -1828,7 +1764,7 @@ __all__ = [
|
|
|
1828
1764
|
# 63
|
|
1829
1765
|
'minimum',
|
|
1830
1766
|
# 64
|
|
1831
|
-
|
|
1767
|
+
'ravel',
|
|
1832
1768
|
# 65
|
|
1833
1769
|
'logical_and',
|
|
1834
1770
|
# 66
|
|
@@ -1872,7 +1808,6 @@ __all__ = [
|
|
|
1872
1808
|
# 83
|
|
1873
1809
|
'narrow',
|
|
1874
1810
|
# 84
|
|
1875
|
-
|
|
1876
1811
|
'masked_select',
|
|
1877
1812
|
|
|
1878
1813
|
# 86
|
|
@@ -1883,13 +1818,13 @@ __all__ = [
|
|
|
1883
1818
|
# 88
|
|
1884
1819
|
'chunk',
|
|
1885
1820
|
# 89
|
|
1886
|
-
|
|
1821
|
+
'argsort',
|
|
1887
1822
|
# 90
|
|
1888
|
-
|
|
1823
|
+
'isinf',
|
|
1889
1824
|
# 91
|
|
1890
1825
|
|
|
1891
1826
|
# 92
|
|
1892
|
-
|
|
1827
|
+
'polar',
|
|
1893
1828
|
# 93
|
|
1894
1829
|
|
|
1895
1830
|
# 94
|
|
@@ -1925,7 +1860,7 @@ __all__ = [
|
|
|
1925
1860
|
# 109
|
|
1926
1861
|
'argmin',
|
|
1927
1862
|
# 110
|
|
1928
|
-
|
|
1863
|
+
'softmax',
|
|
1929
1864
|
# 111
|
|
1930
1865
|
|
|
1931
1866
|
# 112
|
|
@@ -1945,11 +1880,14 @@ __all__ = [
|
|
|
1945
1880
|
# 119
|
|
1946
1881
|
|
|
1947
1882
|
# 120
|
|
1948
|
-
|
|
1883
|
+
'isneginf',
|
|
1949
1884
|
# 121
|
|
1950
1885
|
|
|
1951
1886
|
# 122
|
|
1952
1887
|
|
|
1888
|
+
# 123
|
|
1889
|
+
'var',
|
|
1890
|
+
|
|
1953
1891
|
# 151
|
|
1954
1892
|
'acos',
|
|
1955
1893
|
'arccos',
|
|
@@ -1987,14 +1925,16 @@ __all__ = [
|
|
|
1987
1925
|
|
|
1988
1926
|
# 182
|
|
1989
1927
|
'bernoulli',
|
|
1990
|
-
|
|
1928
|
+
# 201
|
|
1929
|
+
'diag',
|
|
1991
1930
|
# 207
|
|
1992
1931
|
'expm1',
|
|
1993
1932
|
# 204
|
|
1994
1933
|
'erfc',
|
|
1995
1934
|
# 208
|
|
1996
1935
|
'eye',
|
|
1997
|
-
|
|
1936
|
+
# 239
|
|
1937
|
+
'lerp',
|
|
1998
1938
|
# 256
|
|
1999
1939
|
'median',
|
|
2000
1940
|
'randperm',
|
|
@@ -2022,6 +1962,7 @@ __all__ = [
|
|
|
2022
1962
|
'tan',
|
|
2023
1963
|
# 303
|
|
2024
1964
|
'trace',
|
|
1965
|
+
'gcd',
|
|
2025
1966
|
'reshape',
|
|
2026
1967
|
'outer',
|
|
2027
1968
|
# 304
|
|
@@ -2030,41 +1971,137 @@ __all__ = [
|
|
|
2030
1971
|
# 305
|
|
2031
1972
|
'triu',
|
|
2032
1973
|
|
|
1974
|
+
# 308
|
|
1975
|
+
'mm',
|
|
1976
|
+
|
|
1977
|
+
# 382
|
|
1978
|
+
'dstack',
|
|
1979
|
+
|
|
1980
|
+
# 406
|
|
1981
|
+
'allclose',
|
|
1982
|
+
|
|
1983
|
+
# 501
|
|
1984
|
+
'addbmm',
|
|
1985
|
+
|
|
1986
|
+
# 502
|
|
1987
|
+
'addmm',
|
|
1988
|
+
|
|
1989
|
+
# 505
|
|
1990
|
+
'addmv',
|
|
1991
|
+
|
|
1992
|
+
# 510
|
|
1993
|
+
'amax',
|
|
1994
|
+
|
|
1995
|
+
# 511
|
|
1996
|
+
'amin',
|
|
1997
|
+
|
|
1998
|
+
# 520
|
|
1999
|
+
'bincount',
|
|
2000
|
+
|
|
2001
|
+
# 521
|
|
2002
|
+
'bitwise_not',
|
|
2003
|
+
|
|
2004
|
+
# 526
|
|
2005
|
+
'dot',
|
|
2006
|
+
|
|
2007
|
+
# 533
|
|
2008
|
+
'frac',
|
|
2009
|
+
|
|
2033
2010
|
# 538
|
|
2034
2011
|
'histc',
|
|
2035
2012
|
|
|
2013
|
+
# 549
|
|
2014
|
+
'kthvalue',
|
|
2015
|
+
|
|
2016
|
+
# 552
|
|
2017
|
+
'log10',
|
|
2018
|
+
|
|
2036
2019
|
# 553
|
|
2037
2020
|
'logaddexp',
|
|
2021
|
+
'logaddexp2',
|
|
2022
|
+
|
|
2023
|
+
# 557
|
|
2024
|
+
'logsumexp',
|
|
2025
|
+
|
|
2026
|
+
# 582
|
|
2027
|
+
'std_mean',
|
|
2028
|
+
|
|
2029
|
+
# 584
|
|
2030
|
+
'take',
|
|
2031
|
+
|
|
2032
|
+
# 588
|
|
2033
|
+
'var_mean',
|
|
2034
|
+
|
|
2035
|
+
# 586
|
|
2036
|
+
'unique_consecutive',
|
|
2038
2037
|
|
|
2039
2038
|
# 610
|
|
2040
2039
|
'nan_to_num',
|
|
2041
2040
|
|
|
2041
|
+
# 613
|
|
2042
|
+
'nansum',
|
|
2043
|
+
|
|
2044
|
+
# 615
|
|
2045
|
+
'triangular_solve',
|
|
2046
|
+
|
|
2047
|
+
# 664
|
|
2048
|
+
'meshgrid',
|
|
2049
|
+
|
|
2042
2050
|
# 695
|
|
2043
2051
|
'count_nonzero',
|
|
2044
|
-
]
|
|
2045
2052
|
|
|
2046
|
-
|
|
2047
|
-
|
|
2048
|
-
|
|
2049
|
-
|
|
2050
|
-
|
|
2051
|
-
|
|
2052
|
-
|
|
2053
|
-
|
|
2054
|
-
|
|
2055
|
-
|
|
2056
|
-
|
|
2057
|
-
|
|
2058
|
-
|
|
2059
|
-
|
|
2060
|
-
|
|
2061
|
-
|
|
2062
|
-
|
|
2063
|
-
|
|
2064
|
-
|
|
2065
|
-
|
|
2066
|
-
|
|
2067
|
-
|
|
2053
|
+
# 697
|
|
2054
|
+
'float_power',
|
|
2055
|
+
|
|
2056
|
+
# 708
|
|
2057
|
+
'std',
|
|
2058
|
+
|
|
2059
|
+
# 739
|
|
2060
|
+
'hstack',
|
|
2061
|
+
|
|
2062
|
+
# 826
|
|
2063
|
+
'floor_divide',
|
|
2064
|
+
|
|
2065
|
+
# 887
|
|
2066
|
+
'log2',
|
|
2067
|
+
|
|
2068
|
+
# 889
|
|
2069
|
+
'isnan',
|
|
2070
|
+
|
|
2071
|
+
# 890
|
|
2072
|
+
|
|
2073
|
+
# 891
|
|
2074
|
+
|
|
2075
|
+
# 892
|
|
2076
|
+
|
|
2077
|
+
# 893
|
|
2078
|
+
|
|
2079
|
+
# 894
|
|
2080
|
+
|
|
2081
|
+
# 895
|
|
2082
|
+
|
|
2083
|
+
# 896
|
|
2084
|
+
|
|
2085
|
+
# 897
|
|
2086
|
+
|
|
2087
|
+
# 898
|
|
2088
|
+
|
|
2089
|
+
# 899
|
|
2090
|
+
|
|
2091
|
+
# 900
|
|
2092
|
+
|
|
2093
|
+
# 916
|
|
2094
|
+
'index_add',
|
|
2095
|
+
|
|
2096
|
+
# 1007
|
|
2097
|
+
't',
|
|
2098
|
+
|
|
2099
|
+
# 1023
|
|
2100
|
+
'unbind',
|
|
2101
|
+
|
|
2102
|
+
# 1100
|
|
2103
|
+
'diff',
|
|
2104
|
+
]
|
|
2068
2105
|
|
|
2069
2106
|
__all__.extend(functional.__all__)
|
|
2070
2107
|
__all__.extend(nn.__all__)
|