mindspore 2.4.10__cp311-cp311-win_amd64.whl → 2.6.0rc1__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +13 -6
- mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -38
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +83 -0
- mindspore/_extends/parse/deprecated/__init__.py +0 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +394 -0
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +46 -197
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +217 -98
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +17 -12
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +11 -5
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +138 -43
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +6 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -2
- mindspore/common/_stub_tensor.py +30 -14
- mindspore/common/_tensor_cpp_method.py +17 -0
- mindspore/common/_tensor_docs.py +4760 -0
- mindspore/common/api.py +435 -371
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +39 -36
- mindspore/common/dump.py +9 -6
- mindspore/common/file_system.py +9 -1
- mindspore/common/generator.py +2 -0
- mindspore/common/hook_handle.py +6 -2
- mindspore/common/initializer.py +13 -10
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +9 -3
- mindspore/common/mindir_util.py +10 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +135 -52
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +951 -1992
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +52 -2
- mindspore/communication/comm_func.py +240 -181
- mindspore/communication/management.py +95 -26
- mindspore/context.py +314 -566
- mindspore/dataset/__init__.py +65 -37
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/callback/ds_callback.py +2 -1
- mindspore/dataset/core/config.py +87 -6
- mindspore/dataset/engine/cache_admin.py +3 -3
- mindspore/dataset/engine/cache_client.py +6 -5
- mindspore/dataset/engine/datasets.py +292 -267
- mindspore/dataset/engine/datasets_audio.py +22 -8
- mindspore/dataset/engine/datasets_standard_format.py +46 -27
- mindspore/dataset/engine/datasets_text.py +78 -48
- mindspore/dataset/engine/datasets_user_defined.py +182 -116
- mindspore/dataset/engine/datasets_vision.py +120 -44
- mindspore/dataset/engine/iterators.py +283 -63
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/obs/util.py +8 -0
- mindspore/dataset/engine/queue.py +40 -0
- mindspore/dataset/engine/samplers.py +289 -43
- mindspore/dataset/engine/serializer_deserializer.py +3 -2
- mindspore/dataset/engine/validators.py +53 -11
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/py_transforms_util.py +17 -0
- mindspore/dataset/transforms/transforms.py +31 -14
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/dataset/vision/validators.py +1 -2
- mindspore/device_context/__init__.py +21 -0
- mindspore/device_context/ascend/__init__.py +25 -0
- mindspore/device_context/ascend/device.py +72 -0
- mindspore/device_context/ascend/op_debug.py +153 -0
- mindspore/device_context/ascend/op_precision.py +193 -0
- mindspore/device_context/ascend/op_tuning.py +123 -0
- mindspore/{ops_generate/gen_constants.py → device_context/cpu/__init__.py} +6 -17
- mindspore/device_context/cpu/device.py +62 -0
- mindspore/device_context/cpu/op_tuning.py +43 -0
- mindspore/device_context/gpu/__init__.py +21 -0
- mindspore/device_context/gpu/device.py +70 -0
- mindspore/device_context/gpu/op_precision.py +67 -0
- mindspore/device_context/gpu/op_tuning.py +175 -0
- mindspore/device_manager.py +170 -0
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/llm_boost/__init__.py +1 -0
- mindspore/experimental/llm_boost/ascend_native/__init__.py +22 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +211 -0
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +52 -0
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/llm_boost/atb/llama_boost.py +6 -1
- mindspore/experimental/llm_boost/register.py +1 -0
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +6 -6
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +7 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +73 -46
- mindspore/experimental/optim/radam.py +34 -31
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -53
- mindspore/hal/event.py +52 -52
- mindspore/hal/memory.py +157 -117
- mindspore/hal/stream.py +150 -109
- mindspore/include/api/context.h +0 -1
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +50 -0
- mindspore/mindrecord/__init__.py +21 -8
- mindspore/mindrecord/config.py +17 -316
- mindspore/mindrecord/filereader.py +1 -9
- mindspore/mindrecord/filewriter.py +5 -15
- mindspore/mindrecord/mindpage.py +1 -9
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +796 -759
- mindspore/mint/distributed/__init__.py +70 -4
- mindspore/mint/distributed/distributed.py +2679 -44
- mindspore/mint/linalg/__init__.py +8 -0
- mindspore/mint/nn/__init__.py +743 -22
- mindspore/mint/nn/functional.py +716 -23
- mindspore/mint/nn/layer/__init__.py +21 -4
- mindspore/mint/nn/layer/_functions.py +334 -0
- mindspore/mint/nn/layer/activation.py +276 -1
- mindspore/mint/nn/layer/basic.py +123 -0
- mindspore/mint/nn/layer/conv.py +921 -0
- mindspore/mint/nn/layer/normalization.py +223 -28
- mindspore/mint/nn/layer/padding.py +797 -0
- mindspore/mint/nn/layer/pooling.py +235 -0
- mindspore/mint/optim/__init__.py +3 -1
- mindspore/mint/optim/adam.py +223 -0
- mindspore/mint/optim/adamw.py +26 -19
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/mint/special/__init__.py +2 -1
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/multiprocessing/__init__.py +5 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1370 -189
- mindspore/nn/dynamic_lr.py +2 -1
- mindspore/nn/layer/activation.py +29 -27
- mindspore/nn/layer/basic.py +51 -35
- mindspore/nn/layer/channel_shuffle.py +3 -3
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +22 -17
- mindspore/nn/layer/embedding.py +12 -11
- mindspore/nn/layer/normalization.py +56 -49
- mindspore/nn/layer/padding.py +4 -3
- mindspore/nn/layer/pooling.py +120 -42
- mindspore/nn/layer/rnn_cells.py +1 -1
- mindspore/nn/layer/rnns.py +2 -1
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +59 -36
- mindspore/nn/learning_rate_schedule.py +8 -4
- mindspore/nn/loss/loss.py +58 -55
- mindspore/nn/optim/ada_grad.py +7 -5
- mindspore/nn/optim/adadelta.py +11 -9
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +17 -13
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +1 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lars.py +1 -4
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +3 -3
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +11 -9
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/optim/thor.py +2 -1
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/utils/init.py +13 -11
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +181 -122
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +6 -7
- mindspore/numpy/array_creations.py +63 -65
- mindspore/numpy/array_ops.py +149 -144
- mindspore/numpy/logic_ops.py +41 -42
- mindspore/numpy/math_ops.py +365 -363
- mindspore/numpy/utils.py +17 -18
- mindspore/numpy/utils_const.py +5 -6
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +5 -3
- mindspore/ops/_grad_experimental/grad_comm_ops.py +112 -16
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -2
- mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +2 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/ops/_op_impl/cpu/raise_op.py +28 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -65
- mindspore/ops/_vmap/vmap_array_ops.py +27 -25
- mindspore/ops/_vmap/vmap_base.py +0 -2
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +21 -14
- mindspore/ops/_vmap/vmap_math_ops.py +15 -16
- mindspore/ops/_vmap/vmap_nn_ops.py +29 -42
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +236 -46
- mindspore/ops/auto_generate/gen_extend_func.py +764 -124
- mindspore/ops/auto_generate/gen_ops_def.py +4018 -2264
- mindspore/ops/auto_generate/gen_ops_prim.py +15463 -5037
- mindspore/ops/auto_generate/pyboost_inner_prim.py +221 -87
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +20 -25
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +228 -30
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -30
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +40 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +2089 -2403
- mindspore/ops/function/clip_func.py +80 -23
- mindspore/ops/function/debug_func.py +57 -57
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +2 -2
- mindspore/ops/function/linalg_func.py +47 -78
- mindspore/ops/function/math_func.py +4501 -3802
- mindspore/ops/function/nn_func.py +1726 -620
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +18 -84
- mindspore/ops/function/random_func.py +440 -387
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +6 -6
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +24 -17
- mindspore/ops/functional.py +22 -7
- mindspore/ops/functional_overload.py +1440 -0
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +13 -7
- mindspore/ops/operations/_custom_ops_utils.py +247 -0
- mindspore/ops/operations/_embedding_cache_ops.py +4 -4
- mindspore/ops/operations/_grad_ops.py +2 -43
- mindspore/ops/operations/_infer_ops.py +2 -1
- mindspore/ops/operations/_inner_ops.py +43 -84
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +81 -324
- mindspore/ops/operations/comm_ops.py +154 -108
- mindspore/ops/operations/custom_ops.py +232 -78
- mindspore/ops/operations/debug_ops.py +153 -59
- mindspore/ops/operations/inner_ops.py +7 -5
- mindspore/ops/operations/linalg_ops.py +1 -57
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +928 -180
- mindspore/ops/operations/math_ops.py +32 -234
- mindspore/ops/operations/nn_ops.py +210 -498
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +66 -53
- mindspore/ops/tensor_method.py +1888 -0
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +135 -0
- mindspore/ops_generate/aclnn/gen_aclnn_implement.py +257 -0
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/api/add_tensor_docs_generator.py +56 -0
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +105 -0
- mindspore/ops_generate/api/functional_map_cpp_generator.py +504 -0
- mindspore/ops_generate/api/functional_overload_py_generator.py +112 -0
- mindspore/ops_generate/api/functions_cc_generator.py +237 -0
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/api/op_api_proto.py +235 -0
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +461 -0
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/base_generator.py +11 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/common/gen_utils.py +348 -0
- mindspore/ops_generate/common/op_proto.py +473 -0
- mindspore/ops_generate/common/template.py +523 -0
- mindspore/ops_generate/gen_ops.py +22 -1069
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +191 -0
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +299 -0
- mindspore/ops_generate/op_def/ops_def_h_generator.py +74 -0
- mindspore/ops_generate/op_def/ops_name_h_generator.py +83 -0
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/op_def_py/op_def_py_generator.py +132 -0
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +489 -0
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +139 -0
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +93 -0
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +175 -0
- mindspore/ops_generate/pyboost/op_template_parser.py +517 -0
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +407 -0
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +100 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +148 -0
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +155 -0
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +132 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +272 -0
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +938 -0
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +357 -0
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +179 -36
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +7 -3
- mindspore/parallel/_auto_parallel_context.py +152 -34
- mindspore/parallel/_cell_wrapper.py +130 -15
- mindspore/parallel/_parallel_serialization.py +107 -5
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +199 -23
- mindspore/parallel/algo_parameter_config.py +4 -4
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +698 -35
- mindspore/parallel/cluster/process_entity/_api.py +276 -50
- mindspore/parallel/cluster/process_entity/_utils.py +41 -6
- mindspore/parallel/cluster/run.py +21 -4
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +259 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +25 -14
- mindspore/parallel/shard.py +137 -58
- mindspore/parallel/transform_safetensors.py +363 -305
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +22 -5
- mindspore/profiler/analysis/__init__.py +0 -0
- mindspore/profiler/analysis/parser/__init__.py +0 -0
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +170 -0
- mindspore/profiler/analysis/parser/base_parser.py +158 -0
- mindspore/profiler/analysis/parser/framework_cann_relation_parser.py +45 -0
- mindspore/profiler/analysis/parser/ms_framework_parser.py +142 -0
- mindspore/profiler/analysis/parser/ms_minddata_parser.py +145 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +264 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +40 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +106 -0
- mindspore/profiler/analysis/parser/timeline_creator/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_creator/base_timeline_creator.py +44 -0
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +90 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +76 -0
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +103 -0
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +134 -0
- mindspore/profiler/analysis/parser/timeline_event/__init__.py +0 -0
- mindspore/profiler/analysis/parser/timeline_event/base_event.py +233 -0
- mindspore/profiler/analysis/parser/timeline_event/cpu_op_event.py +47 -0
- mindspore/profiler/analysis/parser/timeline_event/flow_event.py +36 -0
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +415 -0
- mindspore/profiler/analysis/parser/timeline_event/msprof_event.py +73 -0
- mindspore/profiler/analysis/parser/timeline_event/scope_layer_event.py +53 -0
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +146 -0
- mindspore/profiler/analysis/task_manager.py +131 -0
- mindspore/profiler/analysis/time_converter.py +84 -0
- mindspore/profiler/analysis/viewer/__init__.py +0 -0
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +372 -0
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +87 -0
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +250 -0
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +320 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +327 -0
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +376 -0
- mindspore/profiler/analysis/viewer/ascend_timeline_viewer.py +58 -0
- mindspore/profiler/analysis/viewer/base_viewer.py +26 -0
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +96 -0
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +581 -0
- mindspore/profiler/analysis/work_flow.py +73 -0
- mindspore/profiler/common/ascend_msprof_exporter.py +139 -0
- mindspore/profiler/common/command_executor.py +90 -0
- mindspore/profiler/common/constant.py +186 -3
- mindspore/profiler/common/file_manager.py +208 -0
- mindspore/profiler/common/log.py +130 -0
- mindspore/profiler/common/msprof_cmd_tool.py +221 -0
- mindspore/profiler/common/path_manager.py +395 -0
- mindspore/profiler/common/process_bar.py +168 -0
- mindspore/profiler/common/process_pool.py +9 -3
- mindspore/profiler/common/profiler_context.py +500 -0
- mindspore/profiler/common/profiler_info.py +304 -0
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_output_path.py +284 -0
- mindspore/profiler/common/profiler_parameters.py +251 -0
- mindspore/profiler/common/profiler_path_manager.py +179 -0
- mindspore/profiler/common/record_function.py +76 -0
- mindspore/profiler/common/tlv_decoder.py +76 -0
- mindspore/profiler/common/util.py +75 -2
- mindspore/profiler/dynamic_profiler.py +341 -75
- mindspore/profiler/envprofiler.py +163 -0
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +242 -0
- mindspore/profiler/platform/__init__.py +21 -0
- mindspore/profiler/platform/base_profiler.py +40 -0
- mindspore/profiler/platform/cpu_profiler.py +124 -0
- mindspore/profiler/platform/gpu_profiler.py +74 -0
- mindspore/profiler/platform/npu_profiler.py +335 -0
- mindspore/profiler/profiler.py +1073 -90
- mindspore/profiler/profiler_action_controller.py +187 -0
- mindspore/profiler/profiler_interface.py +118 -0
- mindspore/profiler/schedule.py +243 -0
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +2 -3
- mindspore/run_check/_check_version.py +27 -20
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +37 -0
- mindspore/runtime/device.py +27 -0
- mindspore/runtime/event.py +209 -0
- mindspore/runtime/executor.py +177 -0
- mindspore/runtime/memory.py +409 -0
- mindspore/runtime/stream.py +460 -0
- mindspore/runtime/thread_bind_core.py +401 -0
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +88 -25
- mindspore/train/amp.py +9 -5
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +53 -55
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_early_stop.py +1 -1
- mindspore/train/callback/_flops_collector.py +103 -68
- mindspore/train/callback/_history.py +8 -5
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +52 -19
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +204 -107
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +15 -16
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +11 -10
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +174 -46
- mindspore/train/model.py +184 -113
- mindspore/train/serialization.py +622 -978
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +2 -3
- mindspore/train/train_thor/model_thor.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +6 -3
- mindspore/utils/dryrun.py +140 -0
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/runtime_execution_order_check.py +550 -0
- mindspore/utils/utils.py +138 -4
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +3 -3
- {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +587 -418
- {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +1 -1
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/common/_tensor_overload.py +0 -139
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -197
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_aclnn_implement.py +0 -263
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/gen_pyboost_func.py +0 -1052
- mindspore/ops_generate/gen_utils.py +0 -209
- mindspore/ops_generate/op_proto.py +0 -145
- mindspore/ops_generate/template.py +0 -261
- mindspore/profiler/envprofiling.py +0 -254
- mindspore/profiler/profiling.py +0 -1926
- {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.4.10.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
mindspore/mint/nn/functional.py
CHANGED
|
@@ -15,6 +15,10 @@
|
|
|
15
15
|
"""mint nn functional."""
|
|
16
16
|
from __future__ import absolute_import
|
|
17
17
|
import mindspore.ops as ops
|
|
18
|
+
import mindspore.mint as mint
|
|
19
|
+
from mindspore import log as logger
|
|
20
|
+
from mindspore import _checkparam as validator
|
|
21
|
+
from mindspore.ops.primitive import constexpr
|
|
18
22
|
from mindspore.ops.function.nn_func import max_pool2d_ext as max_pool2d
|
|
19
23
|
from mindspore.ops.functional import (
|
|
20
24
|
conv_transpose2d,
|
|
@@ -42,6 +46,9 @@ from mindspore.ops.functional import layer_norm
|
|
|
42
46
|
|
|
43
47
|
# 11
|
|
44
48
|
from mindspore.ops.functional import relu
|
|
49
|
+
|
|
50
|
+
from mindspore.ops.function.nn_func import relu_
|
|
51
|
+
|
|
45
52
|
# 12
|
|
46
53
|
|
|
47
54
|
# 13
|
|
@@ -49,7 +56,8 @@ from mindspore.ops.functional import relu
|
|
|
49
56
|
# 14
|
|
50
57
|
from mindspore.ops.function.nn_func import dropout_ext as dropout
|
|
51
58
|
# 15
|
|
52
|
-
|
|
59
|
+
from mindspore.ops.function.nn_func import conv1d_ext as conv1d
|
|
60
|
+
from mindspore.ops.function.nn_func import conv2d_ext as conv2d
|
|
53
61
|
# 16
|
|
54
62
|
from mindspore.ops.function.nn_func import log_softmax_ext as log_softmax
|
|
55
63
|
# 18
|
|
@@ -59,7 +67,7 @@ from mindspore.ops.auto_generate import prelu
|
|
|
59
67
|
# 20
|
|
60
68
|
|
|
61
69
|
# 21
|
|
62
|
-
|
|
70
|
+
from mindspore.ops.function.nn_func import conv3d_ext as conv3d
|
|
63
71
|
# 22
|
|
64
72
|
|
|
65
73
|
# 23
|
|
@@ -90,7 +98,7 @@ from mindspore.ops.function.nn_func import batch_norm_ext as batch_norm
|
|
|
90
98
|
# 35
|
|
91
99
|
|
|
92
100
|
# 36
|
|
93
|
-
from mindspore.ops.
|
|
101
|
+
from mindspore.ops.functional_overload import gelu
|
|
94
102
|
# 37
|
|
95
103
|
|
|
96
104
|
# 38
|
|
@@ -106,7 +114,7 @@ from mindspore.ops.functional import group_norm
|
|
|
106
114
|
# 43
|
|
107
115
|
|
|
108
116
|
# 44
|
|
109
|
-
|
|
117
|
+
from mindspore.ops.auto_generate import soft_margin_loss
|
|
110
118
|
# 45
|
|
111
119
|
|
|
112
120
|
# 46
|
|
@@ -126,7 +134,7 @@ from mindspore.ops.functional import embedding
|
|
|
126
134
|
# 53
|
|
127
135
|
|
|
128
136
|
# 54
|
|
129
|
-
|
|
137
|
+
from mindspore.ops.functional_overload import pixel_shuffle
|
|
130
138
|
# 55
|
|
131
139
|
|
|
132
140
|
# 56
|
|
@@ -168,7 +176,7 @@ from mindspore.ops.functional import embedding
|
|
|
168
176
|
# 74
|
|
169
177
|
|
|
170
178
|
# 75
|
|
171
|
-
|
|
179
|
+
from mindspore.ops.function.nn_func import adaptive_max_pool2d
|
|
172
180
|
# 76
|
|
173
181
|
|
|
174
182
|
# 77
|
|
@@ -196,11 +204,11 @@ from mindspore.ops.functional import embedding
|
|
|
196
204
|
# 88
|
|
197
205
|
|
|
198
206
|
# 89
|
|
199
|
-
|
|
207
|
+
from mindspore.ops.auto_generate import avg_pool1d_ext as avg_pool1d
|
|
200
208
|
# 90
|
|
201
209
|
from mindspore.ops.function.nn_func import avg_pool2d_ext as avg_pool2d
|
|
202
210
|
# 91
|
|
203
|
-
|
|
211
|
+
from mindspore.ops.function.nn_func import avg_pool3d_ext as avg_pool3d
|
|
204
212
|
# 92
|
|
205
213
|
from mindspore.ops.auto_generate import leaky_relu_ext as leaky_relu
|
|
206
214
|
# 93
|
|
@@ -219,6 +227,8 @@ from mindspore.ops.function.math_func import tanh
|
|
|
219
227
|
from mindspore.ops.auto_generate import selu_ext as selu # pylint: disable=W0611
|
|
220
228
|
# 100
|
|
221
229
|
from mindspore.ops.auto_generate import softshrink # pylint: disable=W0611
|
|
230
|
+
# 152
|
|
231
|
+
from mindspore.ops.auto_generate import adaptive_avg_pool3d_ext
|
|
222
232
|
# 220
|
|
223
233
|
from mindspore.ops.function.nn_func import hardshrink # pylint: disable=W0611
|
|
224
234
|
# 221
|
|
@@ -230,6 +240,12 @@ from mindspore.ops.auto_generate import mish_ext as mish # pylint: disable=W061
|
|
|
230
240
|
# 238
|
|
231
241
|
from mindspore.ops.auto_generate import l1_loss_ext as l1_loss # pylint: disable=W0611
|
|
232
242
|
|
|
243
|
+
#254
|
|
244
|
+
from mindspore.ops.auto_generate import max_unpool2d_ext as max_unpool2d
|
|
245
|
+
|
|
246
|
+
# 256
|
|
247
|
+
from mindspore.ops.auto_generate import inplace_threshold as threshold_
|
|
248
|
+
from mindspore.ops.auto_generate import threshold as threshold_op
|
|
233
249
|
# 257
|
|
234
250
|
|
|
235
251
|
# 258
|
|
@@ -239,14 +255,236 @@ from mindspore.ops.function.nn_func import mse_loss_ext as mse_loss
|
|
|
239
255
|
# 323
|
|
240
256
|
|
|
241
257
|
# 324
|
|
242
|
-
from mindspore.ops.auto_generate import elu_ext
|
|
243
|
-
|
|
258
|
+
from mindspore.ops.auto_generate import elu_ext
|
|
259
|
+
from mindspore.ops.auto_generate import inplace_elu
|
|
260
|
+
|
|
261
|
+
# 421
|
|
262
|
+
from mindspore.ops.auto_generate import flatten_ext as flatten
|
|
263
|
+
|
|
264
|
+
# 426
|
|
265
|
+
from mindspore.ops.function.clip_func import clamp
|
|
266
|
+
# 427
|
|
267
|
+
from mindspore.ops.function.math_func import norm_ext
|
|
268
|
+
# 428
|
|
269
|
+
from mindspore.ops.functional import broadcast_to
|
|
270
|
+
# 536
|
|
271
|
+
from mindspore.ops.function.nn_func import glu_ext as glu
|
|
272
|
+
# 537
|
|
273
|
+
from mindspore.ops.auto_generate import hardtanh as hardtanh_op
|
|
274
|
+
from mindspore.ops.auto_generate import inplace_hardtanh as hardtanh_
|
|
275
|
+
# 548
|
|
276
|
+
from mindspore.ops.function.nn_func import kl_div_ext as kl_div
|
|
244
277
|
# 556
|
|
245
278
|
from mindspore.ops.function.nn_func import logsigmoid_ext as logsigmoid
|
|
246
279
|
|
|
247
280
|
from mindspore.ops.auto_generate import adaptive_avg_pool1d
|
|
248
281
|
|
|
249
282
|
from mindspore.ops.functional import adaptive_avg_pool2d_ext as adaptive_avg_pool2d
|
|
283
|
+
from mindspore.ops.function.nn_func import cross_entropy_ext as cross_entropy
|
|
284
|
+
from mindspore.ops.function.nn_func import nll_loss_ext as nll_loss
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
def elu(input, alpha=1.0, inplace=False):
|
|
288
|
+
r"""
|
|
289
|
+
Exponential Linear Unit activation function
|
|
290
|
+
|
|
291
|
+
Applies the exponential linear unit function element-wise. The activation function is defined as:
|
|
292
|
+
|
|
293
|
+
.. math::
|
|
294
|
+
ELU_{i} =
|
|
295
|
+
\begin{cases}
|
|
296
|
+
x_i, &\text{if } x_i \geq 0; \cr
|
|
297
|
+
\alpha * (\exp(x_i) - 1), &\text{otherwise.}
|
|
298
|
+
\end{cases}
|
|
299
|
+
|
|
300
|
+
where :math:`x_i` represents the element of the input and :math:`\alpha` represents the `alpha` parameter, and
|
|
301
|
+
`alpha` represents the smoothness of the ELU.
|
|
302
|
+
|
|
303
|
+
ELU Activation Function Graph:
|
|
304
|
+
|
|
305
|
+
.. image:: ../images/ELU.png
|
|
306
|
+
:align: center
|
|
307
|
+
|
|
308
|
+
.. warning::
|
|
309
|
+
This is an experimental API that is subject to change or deletion.
|
|
310
|
+
|
|
311
|
+
Args:
|
|
312
|
+
input (Tensor): The input of ELU is a Tensor of any dimension.
|
|
313
|
+
alpha (float, optional): The alpha value of ELU, the data type is float. Default: ``1.0``.
|
|
314
|
+
inplace (bool, optional): Whether to use inplace mode, the data type is bool. Default: ``False``.
|
|
315
|
+
|
|
316
|
+
Returns:
|
|
317
|
+
Tensor, with the same shape and type as the `input`.
|
|
318
|
+
|
|
319
|
+
Raises:
|
|
320
|
+
RuntimeError: If the dtype of `input` is not float16, float32 or bfloat16.
|
|
321
|
+
TypeError: If the dtype of `alpha` is not float.
|
|
322
|
+
|
|
323
|
+
Supported Platforms:
|
|
324
|
+
``Ascend``
|
|
325
|
+
|
|
326
|
+
Examples:
|
|
327
|
+
>>> import mindspore
|
|
328
|
+
>>> from mindspore import Tensor, mint
|
|
329
|
+
>>> import numpy as np
|
|
330
|
+
>>> input = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float32)
|
|
331
|
+
>>> output = mint.nn.functional.elu(input)
|
|
332
|
+
>>> print(output)
|
|
333
|
+
[-0.63212055 -0.86466473 0. 2. 1.]
|
|
334
|
+
"""
|
|
335
|
+
if inplace:
|
|
336
|
+
return inplace_elu(input, alpha)
|
|
337
|
+
return elu_ext(input, alpha)
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
def elu_(input, alpha=1.0):
|
|
341
|
+
r"""
|
|
342
|
+
Exponential Linear Unit activation function
|
|
343
|
+
|
|
344
|
+
Applies the exponential linear unit function inplace element-wise. The activation function is defined as:
|
|
345
|
+
|
|
346
|
+
.. math::
|
|
347
|
+
ELU_{i} =
|
|
348
|
+
\begin{cases}
|
|
349
|
+
x_i, &\text{if } x_i \geq 0; \cr
|
|
350
|
+
\alpha * (\exp(x_i) - 1), &\text{otherwise.}
|
|
351
|
+
\end{cases}
|
|
352
|
+
|
|
353
|
+
where :math:`x_i` represents the element of the input and :math:`\alpha` represents the `alpha` parameter, and
|
|
354
|
+
`alpha` represents the smoothness of the ELU.
|
|
355
|
+
|
|
356
|
+
ELU Activation Function Graph:
|
|
357
|
+
|
|
358
|
+
.. image:: ../images/ELU.png
|
|
359
|
+
:align: center
|
|
360
|
+
|
|
361
|
+
.. warning::
|
|
362
|
+
This is an experimental API that is subject to change or deletion.
|
|
363
|
+
|
|
364
|
+
Args:
|
|
365
|
+
input (Tensor): The input of ELU is a Tensor of any dimension.
|
|
366
|
+
alpha (float, optional): The alpha value of ELU, the data type is float and `alpha` should be
|
|
367
|
+
greater than 0. Default: ``1.0``.
|
|
368
|
+
|
|
369
|
+
Returns:
|
|
370
|
+
Tensor, with the same shape and type as the `input`.
|
|
371
|
+
|
|
372
|
+
Raises:
|
|
373
|
+
RuntimeError: If the dtype of `input` is not float16, float32 or bfloat16.
|
|
374
|
+
TypeError: If the dtype of `alpha` is not float.
|
|
375
|
+
|
|
376
|
+
Supported Platforms:
|
|
377
|
+
``Ascend``
|
|
378
|
+
|
|
379
|
+
Examples:
|
|
380
|
+
>>> import mindspore
|
|
381
|
+
>>> from mindspore import Tensor, mint
|
|
382
|
+
>>> import numpy as np
|
|
383
|
+
>>> input = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float32)
|
|
384
|
+
>>> mint.nn.functional.elu_(input)
|
|
385
|
+
>>> print(input)
|
|
386
|
+
[-0.63212055 -0.86466473 0. 2. 1.]
|
|
387
|
+
"""
|
|
388
|
+
return inplace_elu(input, alpha)
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
def hardtanh(input, min_val=-1.0, max_val=1.0, inplace=False):
|
|
392
|
+
r"""
|
|
393
|
+
Applies the hardtanh activation function element-wise. The activation function is defined as:
|
|
394
|
+
|
|
395
|
+
.. math::
|
|
396
|
+
\text{hardtanh}(input) = \begin{cases}
|
|
397
|
+
max\_val, & \text{ if } input > max\_val \\
|
|
398
|
+
min\_val, & \text{ if } input < min\_val \\
|
|
399
|
+
input, & \text{ otherwise. }
|
|
400
|
+
\end{cases}
|
|
401
|
+
|
|
402
|
+
Linear region range :math:`[min\_val, max\_val]` can be adjusted using `min_val` and `max_val`.
|
|
403
|
+
|
|
404
|
+
Hardtanh Activation Function Graph:
|
|
405
|
+
|
|
406
|
+
.. image:: ../images/Hardtanh.png
|
|
407
|
+
:align: center
|
|
408
|
+
|
|
409
|
+
.. warning::
|
|
410
|
+
This is an experimental optimizer API that is subject to change.
|
|
411
|
+
|
|
412
|
+
Args:
|
|
413
|
+
input (Tensor): Input Tensor.
|
|
414
|
+
min_val (Union[bool, int, float], optional): Minimum value of the linear region range. Default: ``-1.0`` .
|
|
415
|
+
max_val (Union[bool, int, float], optional): Maximum value of the linear region range. Default: ``1.0`` .
|
|
416
|
+
inplace (bool, optional): Whether to apply erasing inplace. Default: ``False``.
|
|
417
|
+
|
|
418
|
+
Returns:
|
|
419
|
+
Tensor, with the same dtype and shape as `input`.
|
|
420
|
+
|
|
421
|
+
Raises:
|
|
422
|
+
TypeError: If `input` is not a Tensor.
|
|
423
|
+
TypeError: If dtype of `input` is not one of: int8, int16, int32, int64, uint8, float16, float32, bfloat16.
|
|
424
|
+
TypeError: If dtype of `min_val` is neither float nor int.
|
|
425
|
+
TypeError: If dtype of `max_val` is neither float nor int.
|
|
426
|
+
|
|
427
|
+
Supported Platforms:
|
|
428
|
+
``Ascend``
|
|
429
|
+
|
|
430
|
+
Examples:
|
|
431
|
+
>>> import mindspore
|
|
432
|
+
>>> from mindspore import Tensor, mint
|
|
433
|
+
>>> x = Tensor([-1, -2, 0, 2, 1], mindspore.float16)
|
|
434
|
+
>>> output = mint.nn.functional.hardtanh(x, min_val=-1.0, max_val=1.0, inplace=False)
|
|
435
|
+
>>> print(output)
|
|
436
|
+
[-1. -1. 0. 1. 1.]
|
|
437
|
+
"""
|
|
438
|
+
if inplace:
|
|
439
|
+
return hardtanh_(input, min_val, max_val)
|
|
440
|
+
return hardtanh_op(input, min_val, max_val)
|
|
441
|
+
|
|
442
|
+
|
|
443
|
+
def relu6(input, inplace=False):
|
|
444
|
+
r"""
|
|
445
|
+
Computes ReLU (Rectified Linear Unit) upper bounded by 6 of input tensors element-wise.
|
|
446
|
+
|
|
447
|
+
.. math::
|
|
448
|
+
|
|
449
|
+
\text{ReLU6}(input) = \min(\max(0,input), 6)
|
|
450
|
+
|
|
451
|
+
It returns :math:`\min(\max(0,input), 6)` element-wise.
|
|
452
|
+
|
|
453
|
+
ReLU6 Activation Function Graph:
|
|
454
|
+
|
|
455
|
+
.. image:: ../images/ReLU6.png
|
|
456
|
+
:align: center
|
|
457
|
+
|
|
458
|
+
.. warning::
|
|
459
|
+
This is an experimental optimizer API that is subject to change.
|
|
460
|
+
|
|
461
|
+
Args:
|
|
462
|
+
input (Tensor): input Tensor. Dtype is in int8, int16, int32, int64, uint8, float16, float32, bfloat16.
|
|
463
|
+
inplace (bool, optional): Whether to apply erasing inplace. Default: ``False``.
|
|
464
|
+
|
|
465
|
+
Returns:
|
|
466
|
+
Tensor, with the same dtype and shape as the `input`.
|
|
467
|
+
|
|
468
|
+
Raises:
|
|
469
|
+
TypeError: If `input` is not a Tensor.
|
|
470
|
+
TypeError: If dtype of `input` is not one of: int8, int16, int32, int64, uint8, float16, float32, bfloat16.
|
|
471
|
+
|
|
472
|
+
Supported Platforms:
|
|
473
|
+
``Ascend``
|
|
474
|
+
|
|
475
|
+
Examples:
|
|
476
|
+
>>> import mindspore
|
|
477
|
+
>>> import numpy as np
|
|
478
|
+
>>> from mindspore import Tensor, mint
|
|
479
|
+
>>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
|
|
480
|
+
>>> result = mint.nn.functional.relu6(x)
|
|
481
|
+
>>> print(result)
|
|
482
|
+
[[0. 4. 0.]
|
|
483
|
+
[2. 0. 6.]]
|
|
484
|
+
"""
|
|
485
|
+
if inplace:
|
|
486
|
+
return hardtanh_(input, 0, 6)
|
|
487
|
+
return hardtanh_op(input, 0, 6)
|
|
250
488
|
|
|
251
489
|
|
|
252
490
|
def binary_cross_entropy(input, target, weight=None, reduction='mean'):
|
|
@@ -323,7 +561,7 @@ def binary_cross_entropy_with_logits(input, target, weight=None, reduction='mean
|
|
|
323
561
|
r"""
|
|
324
562
|
Adds sigmoid activation function to `input` as logits, and uses this logits to compute binary cross entropy
|
|
325
563
|
between the logits and the target.
|
|
326
|
-
Consistent with the function of
|
|
564
|
+
Consistent with the function of :func:`mindspore.ops.binary_cross_entropy_with_logits` .
|
|
327
565
|
|
|
328
566
|
Sets input `input` as :math:`X`, input `target` as :math:`Y`, input `weight` as :math:`W`, output as :math:`L`.
|
|
329
567
|
Then,
|
|
@@ -424,8 +662,8 @@ def one_hot(tensor, num_classes=-1):
|
|
|
424
662
|
|
|
425
663
|
Args:
|
|
426
664
|
tensor (Tensor): A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
|
|
427
|
-
Data type must be int32 or int64.
|
|
428
|
-
num_classes (int): A scalar defining the depth of the one-hot dimension, default: ``-1``.
|
|
665
|
+
Data type must be int32 or int64. Dimension cannot be greater than 7.
|
|
666
|
+
num_classes (int, optional): A scalar defining the depth of the one-hot dimension, default: ``-1``.
|
|
429
667
|
|
|
430
668
|
Returns:
|
|
431
669
|
Tensor, one-hot tensor.
|
|
@@ -453,6 +691,427 @@ def one_hot(tensor, num_classes=-1):
|
|
|
453
691
|
return ops.function.array_func.one_hot_ext(tensor, num_classes)
|
|
454
692
|
|
|
455
693
|
|
|
694
|
+
def smooth_l1_loss(input, target, reduction='mean', beta=1.0):
|
|
695
|
+
r"""
|
|
696
|
+
Computes smooth L1 loss, a robust L1 loss.
|
|
697
|
+
|
|
698
|
+
SmoothL1Loss is a Loss similar to MSELoss but less sensitive to outliers as described in the
|
|
699
|
+
`Fast R-CNN <https://arxiv.org/abs/1504.08083>`_ by Ross Girshick.
|
|
700
|
+
|
|
701
|
+
Given two inputs :math:`x,\ y` of length :math:`N`, the SmoothL1Loss can be described
|
|
702
|
+
as follows:
|
|
703
|
+
|
|
704
|
+
.. math::
|
|
705
|
+
L_{i} =
|
|
706
|
+
\begin{cases}
|
|
707
|
+
\frac{0.5 (x_i - y_i)^{2}}{\text{beta}}, & \text{if } |x_i - y_i| < \text{beta} \\
|
|
708
|
+
|x_i - y_i| - 0.5 * \text{beta}, & \text{otherwise. }
|
|
709
|
+
\end{cases}
|
|
710
|
+
|
|
711
|
+
If `reduction` is not `none`, then:
|
|
712
|
+
|
|
713
|
+
.. math::
|
|
714
|
+
L =
|
|
715
|
+
\begin{cases}
|
|
716
|
+
\operatorname{mean}(L_{i}), & \text{if reduction} = \text{'mean';}\\
|
|
717
|
+
\operatorname{sum}(L_{i}), & \text{if reduction} = \text{'sum'.}
|
|
718
|
+
\end{cases}
|
|
719
|
+
|
|
720
|
+
Here :math:`\text{beta}` controls the point where the loss function changes from quadratic to linear.
|
|
721
|
+
:math:`\text{beta} \geq 0` , its default value is ``1.0`` . :math:`N` is the batch size.
|
|
722
|
+
|
|
723
|
+
.. warning::
|
|
724
|
+
This is an experimental optimizer API that is subject to change.
|
|
725
|
+
|
|
726
|
+
Note:
|
|
727
|
+
- Arg `input` and `target` comply with the implicit type conversion rules to make the data types consistent.
|
|
728
|
+
If they have different data types, the lower precision data type will be converted to relatively the
|
|
729
|
+
highest precision data type.
|
|
730
|
+
|
|
731
|
+
Args:
|
|
732
|
+
input (Tensor): Tensor of shape :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
733
|
+
Supported dtypes:
|
|
734
|
+
|
|
735
|
+
- Ascend: float16, float32, bfloat16.
|
|
736
|
+
|
|
737
|
+
target (Tensor): Ground truth data, tensor of shape :math:`(N, *)`, same shape as the `input`.
|
|
738
|
+
Supported dtypes:
|
|
739
|
+
|
|
740
|
+
- Ascend: float16, float32, bfloat16.
|
|
741
|
+
|
|
742
|
+
reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
|
|
743
|
+
``'sum'`` . Default: ``'mean'`` .
|
|
744
|
+
|
|
745
|
+
- ``'none'``: no reduction will be applied.
|
|
746
|
+
- ``'mean'``: compute the mean of elements in the output.
|
|
747
|
+
- ``'sum'``: the output elements will be summed.
|
|
748
|
+
beta (number, optional): A parameter used to control the point where the function will change between
|
|
749
|
+
L1 to L2 loss. The value should be greater than or equal to zero. Default: ``1.0`` .
|
|
750
|
+
|
|
751
|
+
Returns:
|
|
752
|
+
Tensor, the data type is the same as `input`.
|
|
753
|
+
If `reduction` is ``'none'``, then output is a tensor with the same shape as `input`.
|
|
754
|
+
Otherwise, the shape of output tensor is :math:`()`.
|
|
755
|
+
|
|
756
|
+
Raises:
|
|
757
|
+
TypeError: If `input` or `target` is not a Tensor.
|
|
758
|
+
RuntimeError: If dtype of `input` or `target` is not one of float16, float32, bfloat16.
|
|
759
|
+
ValueError: If shape of `input` is not the same as `target`.
|
|
760
|
+
ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
|
|
761
|
+
TypeError: If `beta` is not a float, int or bool.
|
|
762
|
+
RuntimeError: If `beta` is less than 0.
|
|
763
|
+
|
|
764
|
+
Supported Platforms:
|
|
765
|
+
``Ascend``
|
|
766
|
+
|
|
767
|
+
Examples:
|
|
768
|
+
>>> import mindspore
|
|
769
|
+
>>> import numpy as np
|
|
770
|
+
>>> from mindspore import Tensor, ops
|
|
771
|
+
>>> input = Tensor(np.array([2, 2, 3]), mindspore.float32)
|
|
772
|
+
>>> target = Tensor(np.array([2, 2, 2]), mindspore.float32)
|
|
773
|
+
>>> beta = 1.0
|
|
774
|
+
>>> reduction_1 = 'none'
|
|
775
|
+
>>> output = ops.nn.functional.smooth_l1_loss(input, target, reduction_1, beta)
|
|
776
|
+
>>> print(output)
|
|
777
|
+
[0. 0. 0.5]
|
|
778
|
+
>>> reduction_2 = 'mean'
|
|
779
|
+
>>> output = ops.nn.functional.smooth_l1_loss(input, target, reduction_2, beta)
|
|
780
|
+
>>> print(output)
|
|
781
|
+
0.16666667
|
|
782
|
+
>>> reduction_3 = 'sum'
|
|
783
|
+
>>> output = ops.nn.functional.smooth_l1_loss(input, target, reduction_3, beta)
|
|
784
|
+
>>> print(output)
|
|
785
|
+
0.5
|
|
786
|
+
"""
|
|
787
|
+
return ops.function.smooth_l1_loss(input, target, beta, reduction)
|
|
788
|
+
|
|
789
|
+
|
|
790
|
+
@constexpr
|
|
791
|
+
def log_warning(msg):
|
|
792
|
+
"""Adds warning to logger."""
|
|
793
|
+
logger.warning(msg)
|
|
794
|
+
|
|
795
|
+
|
|
796
|
+
def dropout2d(input, p=0.5, training=True):
|
|
797
|
+
r"""
|
|
798
|
+
During training, randomly zeroes some channels of the input tensor with probability `p`
|
|
799
|
+
from a Bernoulli distribution (For a 4-dimensional tensor with a shape of :math:`(N, C, H, W)`,
|
|
800
|
+
the channel feature map refers to a 2-dimensional feature map with the shape of :math:`(H, W)`).
|
|
801
|
+
|
|
802
|
+
For example, the :math:`j\_th` channel of the :math:`i\_th` sample in the batched input is a to-be-processed
|
|
803
|
+
`2D` tensor input[i,j].
|
|
804
|
+
Each channel will be zeroed out independently on every forward call which based on Bernoulli distribution
|
|
805
|
+
probability `p`.
|
|
806
|
+
The parper `Dropout: A Simple Way to Prevent Neural Networks from Overfitting
|
|
807
|
+
<http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf>`_ mentioned this technology, and it is proved that
|
|
808
|
+
it can effectively reduce over fitting and prevent neuronal coadaptation.
|
|
809
|
+
For more details, refer to `Improving neural networks by preventing co-adaptation of feature detectors
|
|
810
|
+
<https://arxiv.org/pdf/1207.0580.pdf>`_ .
|
|
811
|
+
|
|
812
|
+
`dropout2d` can improve the independence between channel feature maps.
|
|
813
|
+
|
|
814
|
+
.. warning::
|
|
815
|
+
This is an experimental API that is subject to change or deletion.
|
|
816
|
+
|
|
817
|
+
Args:
|
|
818
|
+
input (Tensor): A `4D` tensor with shape :math:`(N, C, H, W)`, where `N` is the batch size, `C` is the number
|
|
819
|
+
of channels, `H` is the feature height, and `W` is the feature width.
|
|
820
|
+
p (float, optional): The dropping probability of a channel, between 0 and 1, e.g. `p` = 0.8,
|
|
821
|
+
which means dropping out 80% of channels. Default: ``0.5`` .
|
|
822
|
+
training(bool, optional): If `training` is True, applying dropout, otherwise, not applying. Default: ``True`` .
|
|
823
|
+
|
|
824
|
+
Returns:
|
|
825
|
+
Tensor, output, with the same shape and data type as `input`.
|
|
826
|
+
|
|
827
|
+
Raises:
|
|
828
|
+
TypeError: If `input` is not a Tensor.
|
|
829
|
+
TypeError: If the data type of `p` is not float.
|
|
830
|
+
ValueError: If `p` is out of the range `[0.0, 1.0]`.
|
|
831
|
+
|
|
832
|
+
Supported Platforms:
|
|
833
|
+
``Ascend``
|
|
834
|
+
|
|
835
|
+
Examples:
|
|
836
|
+
>>> import mindspore
|
|
837
|
+
>>> import numpy as np
|
|
838
|
+
>>> from mindspore import Tensor, mint
|
|
839
|
+
>>> input = Tensor(np.ones([2, 1, 2, 3]), mindspore.float32)
|
|
840
|
+
>>> output = mint.nn.functional.dropout2d(input, 0.5)
|
|
841
|
+
>>> print(output.shape)
|
|
842
|
+
(2, 1, 2, 3)
|
|
843
|
+
"""
|
|
844
|
+
def dropout2d_impl_(input, p, training):
|
|
845
|
+
if p == 0 or not training or input.numel() == 0:
|
|
846
|
+
return input
|
|
847
|
+
|
|
848
|
+
if p == 1:
|
|
849
|
+
return mint.mul(input, mint.zeros((), dtype=input.dtype))
|
|
850
|
+
|
|
851
|
+
if input.ndim < 2:
|
|
852
|
+
raise ValueError(f'For dropout2d, input size after unsqueeze must be greater or equal to 2')
|
|
853
|
+
|
|
854
|
+
if ops.is_sequence_shape_unknown(input.shape):
|
|
855
|
+
input_tensor_shape = ops.TensorShape()(input)
|
|
856
|
+
nosie_tensor_shape = mint.ones_like(input_tensor_shape)
|
|
857
|
+
nosie_tensor_shape[0] = input_tensor_shape[0]
|
|
858
|
+
nosie_tensor_shape[1] = input_tensor_shape[1]
|
|
859
|
+
nosie_shape = ops.TensorToTuple()(nosie_tensor_shape)
|
|
860
|
+
else:
|
|
861
|
+
nosie_shape = input.shape[:2] + tuple(1 for _ in range(len(input.shape) - 2))
|
|
862
|
+
nosie = mint.full(nosie_shape, 1 - p, dtype=input.dtype)
|
|
863
|
+
nosie = mint.bernoulli(nosie)
|
|
864
|
+
nosie = mint.div(nosie, 1 - p)
|
|
865
|
+
|
|
866
|
+
return mint.mul(input, nosie)
|
|
867
|
+
|
|
868
|
+
validator.check_float_range(p, 0.0, 1.0, validator.INC_BOTH, "p", "dropout2d")
|
|
869
|
+
validator.check_bool(training, "training", "dropout2d")
|
|
870
|
+
|
|
871
|
+
if input.ndim not in (3, 4):
|
|
872
|
+
log_warning(f"dropout2d receviced a {input.ndim}-D input which is not recommended. Please use dropout instead.")
|
|
873
|
+
|
|
874
|
+
is_batched = input.ndim == 4
|
|
875
|
+
if not is_batched:
|
|
876
|
+
input_shape = input.shape
|
|
877
|
+
if ops.is_sequence_shape_unknown(input.shape):
|
|
878
|
+
input_shape = ops.TensorToTuple()(ops.TensorShape()(input))
|
|
879
|
+
input = input.reshape((1, *input_shape))
|
|
880
|
+
result = dropout2d_impl_(input, p, training)
|
|
881
|
+
result = result.reshape(input_shape)
|
|
882
|
+
else:
|
|
883
|
+
result = dropout2d_impl_(input, p, training)
|
|
884
|
+
|
|
885
|
+
return result
|
|
886
|
+
|
|
887
|
+
|
|
888
|
+
def normalize(input, p=2.0, dim=1, eps=1e-12):
|
|
889
|
+
r"""
|
|
890
|
+
Perform normalization of inputs over specified dimension
|
|
891
|
+
|
|
892
|
+
For a tensor input of sizes :math:`(n_{0},..., n_{dim},..., n_{k})`, each :math:`n_{dim}` -element vector `v`
|
|
893
|
+
along dimension `dim` is transformed as
|
|
894
|
+
|
|
895
|
+
.. math::
|
|
896
|
+
v=\frac{v}{\max(\left \| v \right \| _{p},\in )}
|
|
897
|
+
|
|
898
|
+
With the default arguments it uses the Euclidean norm over vectors along dimension ``1`` for normalization.
|
|
899
|
+
|
|
900
|
+
.. warning::
|
|
901
|
+
This is an experimental API that is subject to change or deletion.
|
|
902
|
+
|
|
903
|
+
Args:
|
|
904
|
+
input (Tensor): input tensor of any shape.
|
|
905
|
+
p (float): the exponent value in the norm formulation. default: ``2``.
|
|
906
|
+
dim (int): the dimension to reduce. default: ``1``.
|
|
907
|
+
eps (float): small value to avoid division by zero. default: ``1e-12``.
|
|
908
|
+
|
|
909
|
+
Returns:
|
|
910
|
+
Tensor, shape and data type are the same as input.
|
|
911
|
+
|
|
912
|
+
Supported Platforms:
|
|
913
|
+
``Ascend``
|
|
914
|
+
|
|
915
|
+
Examples:
|
|
916
|
+
>>> import mindspore
|
|
917
|
+
>>> import numpy as np
|
|
918
|
+
>>> from mindspore import Tensor, mint
|
|
919
|
+
>>> tensor = Tensor(np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]), mindspore.float32)
|
|
920
|
+
>>> output = mint.nn.functional.normalize(tensor)
|
|
921
|
+
>>> print(output)
|
|
922
|
+
[[0.0000 0.4472 0.8944]
|
|
923
|
+
[0.4243 0.5657 0.7071]
|
|
924
|
+
[0.4915 0.5735 0.6554]]
|
|
925
|
+
"""
|
|
926
|
+
denom = broadcast_to(clamp(norm_ext(input, p, dim, keepdim=True), min=eps), input.shape)
|
|
927
|
+
return input / denom
|
|
928
|
+
|
|
929
|
+
|
|
930
|
+
def upsample(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
|
|
931
|
+
r"""
|
|
932
|
+
Samples `input` by the given `size` or `scale_factor`.
|
|
933
|
+
|
|
934
|
+
.. warning::
|
|
935
|
+
This is an experimental API that is subject to change or deletion.
|
|
936
|
+
|
|
937
|
+
Refer to :func:`mindspore.mint.nn.functional.interpolate` for more details.
|
|
938
|
+
|
|
939
|
+
Supported Platforms:
|
|
940
|
+
``Ascend``
|
|
941
|
+
"""
|
|
942
|
+
return interpolate(input, size, scale_factor, mode, align_corners)
|
|
943
|
+
|
|
944
|
+
|
|
945
|
+
def threshold(input, threshold, value, inplace=False): # pylint: disable=W0621
|
|
946
|
+
r"""
|
|
947
|
+
Compute the Threshold activation function element-wise.
|
|
948
|
+
|
|
949
|
+
The Threshold is defined as:
|
|
950
|
+
|
|
951
|
+
.. math::
|
|
952
|
+
y =
|
|
953
|
+
\begin{cases}
|
|
954
|
+
x, &\text{ if } x > \text{threshold} \\
|
|
955
|
+
\text{value}, &\text{ otherwise }
|
|
956
|
+
\end{cases}
|
|
957
|
+
|
|
958
|
+
.. warning::
|
|
959
|
+
This is an experimental API that is subject to change or deletion.
|
|
960
|
+
|
|
961
|
+
Args:
|
|
962
|
+
input (Tensor): The input Tensor.
|
|
963
|
+
threshold (Union[int, float]): The value of the threshold.
|
|
964
|
+
value (Union[int, float]): The value to replace with when element is less than threshold.
|
|
965
|
+
inplace (bool, optional): Whether to apply erasing inplace. Default: ``False``.
|
|
966
|
+
|
|
967
|
+
Returns:
|
|
968
|
+
Tensor, the same shape and data type as the input.
|
|
969
|
+
|
|
970
|
+
Raises:
|
|
971
|
+
TypeError: If `input` is not a Tensor.
|
|
972
|
+
TypeError: If `threshold` is not a float or an int.
|
|
973
|
+
TypeError: If `value` is not a float or an int.
|
|
974
|
+
|
|
975
|
+
Supported Platforms:
|
|
976
|
+
``Ascend``
|
|
977
|
+
|
|
978
|
+
Examples:
|
|
979
|
+
>>> import mindspore
|
|
980
|
+
>>> from mindspore import Tensor, mint
|
|
981
|
+
>>> inputs = mindspore.Tensor([0.0, 2, 3], mindspore.float32)
|
|
982
|
+
>>> outputs = mint.nn.functional.threshold(inputs, 1, 100)
|
|
983
|
+
>>> print(outputs)
|
|
984
|
+
[100. 2. 3.]
|
|
985
|
+
"""
|
|
986
|
+
if inplace is True:
|
|
987
|
+
return threshold_(input, threshold, value)
|
|
988
|
+
return threshold_op(input, threshold, value)
|
|
989
|
+
|
|
990
|
+
|
|
991
|
+
def adaptive_avg_pool3d(input, output_size):
|
|
992
|
+
r"""
|
|
993
|
+
Performs 3D adaptive average pooling on a multi-plane input signal.
|
|
994
|
+
That is, for any input size, the size of the specified output is :math:`(D, H, W)`.
|
|
995
|
+
The number of output features is equal to the number of input planes.
|
|
996
|
+
|
|
997
|
+
Suppose the last 3 dimension size of x is :math:`(D_{in}, H_{in}, W_{in})`, the last 3 dimension size of output is
|
|
998
|
+
:math:`(D_{out}, H_{out}, W_{out})`.
|
|
999
|
+
|
|
1000
|
+
.. math::
|
|
1001
|
+
\begin{array}{ll} \\
|
|
1002
|
+
\forall \quad od \in [0, D_{out}-1], oh \in [0, H_{out}-1], ow \in [0, W_{out}-1] \\
|
|
1003
|
+
output[od,oh,ow] = \\
|
|
1004
|
+
\qquad mean(x[D_{istart}:D_{iend}+1,H_{istart}:H_{iend}+1,W_{istart}:W_{iend}+1]) \\
|
|
1005
|
+
where, \\
|
|
1006
|
+
\qquad D_{istart}= \left\lceil \frac{od * D_{in}}{D_{out}} \right\rceil \\
|
|
1007
|
+
\qquad D_{iend}=\left\lfloor \frac{(od+1)* D_{in}}{D_{out}} \right\rfloor \\
|
|
1008
|
+
\qquad H_{istart}=\left\lceil \frac{oh * H_{in}}{H_{out}} \right\rceil \\
|
|
1009
|
+
\qquad H_{iend}=\left\lfloor \frac{(oh+1) * H_{in}}{H_{out}} \right\rfloor \\
|
|
1010
|
+
\qquad W_{istart}=\left\lceil \frac{ow * W_{in}}{W_{out}} \right\rceil \\
|
|
1011
|
+
\qquad W_{iend}=\left\lfloor \frac{(ow+1) * W_{in}}{W_{out}} \right\rfloor
|
|
1012
|
+
\end{array}
|
|
1013
|
+
|
|
1014
|
+
.. warning::
|
|
1015
|
+
For Ascend, it is only supported on Atlas A2 Training Series Products.
|
|
1016
|
+
This is an experimental optimizer API that is subject to change or deletion.
|
|
1017
|
+
|
|
1018
|
+
Args:
|
|
1019
|
+
input (Tensor): The input of adaptive_avg_pool3d, which is a 4D or 5D Tensor.
|
|
1020
|
+
output_size (Union[int, tuple]): The target output size. `output_size` can be a tuple :math:`(D, H, W)`,
|
|
1021
|
+
or an int D for :math:`(D, D, D)`. :math:`D`, :math:`H` and :math:`W` can be int or None
|
|
1022
|
+
which means the output size is the same as that of the input.
|
|
1023
|
+
|
|
1024
|
+
Returns:
|
|
1025
|
+
Tensor, with the same type as the `input`.
|
|
1026
|
+
|
|
1027
|
+
Raises:
|
|
1028
|
+
TypeError: If `input` is not a Tensor.
|
|
1029
|
+
ValueError: If the dimension of `input` is not 4D or 5D.
|
|
1030
|
+
ValueError: If `output_size` value is not positive.
|
|
1031
|
+
|
|
1032
|
+
Supported Platforms:
|
|
1033
|
+
``Ascend``
|
|
1034
|
+
|
|
1035
|
+
Examples:
|
|
1036
|
+
>>> import mindspore
|
|
1037
|
+
>>> import numpy as np
|
|
1038
|
+
>>> from mindspore import Tensor, mint
|
|
1039
|
+
>>> # case 1: output_size=(3, 3, 4)
|
|
1040
|
+
>>> output_size=(3, 3, 4)
|
|
1041
|
+
>>> input_val = np.random.randn(4, 3, 5, 6, 7)
|
|
1042
|
+
>>> input = Tensor(input_val, mindspore.float32)
|
|
1043
|
+
>>> output = mint.nn.functional.adaptive_avg_pool3d(input, output_size)
|
|
1044
|
+
>>> print(output.shape)
|
|
1045
|
+
(4, 3, 3, 3, 4)
|
|
1046
|
+
>>> # case 2: output_size=4
|
|
1047
|
+
>>> output_size=5
|
|
1048
|
+
>>> input_val = np.random.randn(2, 3, 8, 6, 12)
|
|
1049
|
+
>>> input = Tensor(input_val, mindspore.float32)
|
|
1050
|
+
>>> output = mint.nn.functional.adaptive_avg_pool3d(input, output_size)
|
|
1051
|
+
>>> print(output.shape)
|
|
1052
|
+
(2, 3, 5, 5, 5)
|
|
1053
|
+
>>> # case 3: output_size=(None, 4, 5)
|
|
1054
|
+
>>> output_size=(None, 4, 5)
|
|
1055
|
+
>>> input_val = np.random.randn(4, 1, 9, 10, 8)
|
|
1056
|
+
>>> input = Tensor(input_val, mindspore.float32)
|
|
1057
|
+
>>> output = mint.nn.functional.adaptive_avg_pool3d(input, output_size)
|
|
1058
|
+
>>> print(output.shape)
|
|
1059
|
+
(4, 1, 9, 4, 5)
|
|
1060
|
+
"""
|
|
1061
|
+
validator.check_value_type("output_size", output_size, [int, tuple, list], "adaptive_avg_pool3d")
|
|
1062
|
+
if isinstance(output_size, int):
|
|
1063
|
+
output_size = (output_size, output_size, output_size)
|
|
1064
|
+
output_size = tuple(-1 if val is None else val for val in output_size)
|
|
1065
|
+
return adaptive_avg_pool3d_ext(input, output_size)
|
|
1066
|
+
|
|
1067
|
+
|
|
1068
|
+
def adaptive_max_pool1d(input, output_size, return_indices=False):
|
|
1069
|
+
r"""
|
|
1070
|
+
Performs 1D adaptive max pooling on a multi-plane input signal.
|
|
1071
|
+
That is, for any input size, the size of the specified output is L.
|
|
1072
|
+
The number of output features is equal to the number of input features.
|
|
1073
|
+
|
|
1074
|
+
.. warning::
|
|
1075
|
+
This is an experimental API that is subject to change or deletion.
|
|
1076
|
+
|
|
1077
|
+
Args:
|
|
1078
|
+
input (Tensor): The input of adaptive_max_pool1d, which is a 2D or 3D tensor,
|
|
1079
|
+
with float16, float32 or float64 data type.
|
|
1080
|
+
output_size (int): The target output feature size. `output_size` is an integer.
|
|
1081
|
+
return_indices (bool, optional): Whether to return the index of the maximum value. Default: ``False`` .
|
|
1082
|
+
|
|
1083
|
+
Returns:
|
|
1084
|
+
Union(Tensor, tuple(Tensor, Tensor)).
|
|
1085
|
+
|
|
1086
|
+
- If `return_indices` is False, output is a Tensor, with shape :math:`(N, C, L_{out})`. It has the same data
|
|
1087
|
+
type as `input`.
|
|
1088
|
+
- If `return_indices` is True, output is a Tuple of 2 Tensors, representing the result and where the max
|
|
1089
|
+
values are generated.
|
|
1090
|
+
|
|
1091
|
+
Raises:
|
|
1092
|
+
TypeError: If `input` is not a tensor.
|
|
1093
|
+
TypeError: If dtype of `input` is not float16, float32 or float64.
|
|
1094
|
+
TypeError: If `output_size` is not int or tuple.
|
|
1095
|
+
TypeError: If `return_indices` is not a bool.
|
|
1096
|
+
ValueError: If `output_size` is a tuple and the length of `output_size` is not 1.
|
|
1097
|
+
|
|
1098
|
+
Supported Platforms:
|
|
1099
|
+
``Ascend``
|
|
1100
|
+
|
|
1101
|
+
Examples:
|
|
1102
|
+
>>> import mindspore
|
|
1103
|
+
>>> from mindspore import Tensor, mint
|
|
1104
|
+
>>> input = Tensor([[2,3],[3,4]],dtype=mindspore.float16)
|
|
1105
|
+
>>> output = mint.nn.functional.adaptive_max_pool1d(input, 3)
|
|
1106
|
+
>>> print(output)
|
|
1107
|
+
[[2. 3. 3. ]
|
|
1108
|
+
[3. 4. 4. ]]
|
|
1109
|
+
"""
|
|
1110
|
+
if return_indices:
|
|
1111
|
+
return ops.auto_generate.gen_ops_prim.adaptive_max_pool1d_op(input, output_size)
|
|
1112
|
+
return ops.auto_generate.gen_ops_prim.adaptive_max_pool1d_op(input, output_size)[0]
|
|
1113
|
+
|
|
1114
|
+
|
|
456
1115
|
__all__ = [
|
|
457
1116
|
'conv_transpose2d',
|
|
458
1117
|
'max_pool2d',
|
|
@@ -473,11 +1132,14 @@ __all__ = [
|
|
|
473
1132
|
# 8
|
|
474
1133
|
'layer_norm',
|
|
475
1134
|
# 9
|
|
476
|
-
|
|
1135
|
+
'upsample',
|
|
477
1136
|
# 10
|
|
478
1137
|
|
|
479
1138
|
# 11
|
|
480
1139
|
'relu',
|
|
1140
|
+
|
|
1141
|
+
'relu_',
|
|
1142
|
+
|
|
481
1143
|
# 12
|
|
482
1144
|
|
|
483
1145
|
# 13
|
|
@@ -485,7 +1147,8 @@ __all__ = [
|
|
|
485
1147
|
# 14
|
|
486
1148
|
'dropout',
|
|
487
1149
|
# 15
|
|
488
|
-
|
|
1150
|
+
'conv1d',
|
|
1151
|
+
'conv2d',
|
|
489
1152
|
# 16
|
|
490
1153
|
'log_softmax',
|
|
491
1154
|
# 17
|
|
@@ -495,9 +1158,10 @@ __all__ = [
|
|
|
495
1158
|
# 19
|
|
496
1159
|
'binary_cross_entropy',
|
|
497
1160
|
# 20
|
|
498
|
-
|
|
1161
|
+
'cross_entropy',
|
|
499
1162
|
# 21
|
|
500
|
-
|
|
1163
|
+
'conv3d',
|
|
1164
|
+
'nll_loss',
|
|
501
1165
|
# 22
|
|
502
1166
|
|
|
503
1167
|
# 23
|
|
@@ -543,7 +1207,7 @@ __all__ = [
|
|
|
543
1207
|
# 43
|
|
544
1208
|
|
|
545
1209
|
# 44
|
|
546
|
-
|
|
1210
|
+
'soft_margin_loss',
|
|
547
1211
|
# 45
|
|
548
1212
|
|
|
549
1213
|
# 46
|
|
@@ -563,7 +1227,7 @@ __all__ = [
|
|
|
563
1227
|
# 53
|
|
564
1228
|
|
|
565
1229
|
# 54
|
|
566
|
-
|
|
1230
|
+
'pixel_shuffle',
|
|
567
1231
|
# 55
|
|
568
1232
|
|
|
569
1233
|
# 56
|
|
@@ -631,9 +1295,9 @@ __all__ = [
|
|
|
631
1295
|
# 87
|
|
632
1296
|
|
|
633
1297
|
# 88
|
|
634
|
-
|
|
1298
|
+
'avg_pool3d',
|
|
635
1299
|
# 89
|
|
636
|
-
|
|
1300
|
+
'avg_pool1d',
|
|
637
1301
|
# 90
|
|
638
1302
|
'avg_pool2d',
|
|
639
1303
|
# 91
|
|
@@ -656,17 +1320,32 @@ __all__ = [
|
|
|
656
1320
|
|
|
657
1321
|
# 100
|
|
658
1322
|
|
|
1323
|
+
# 152
|
|
1324
|
+
'adaptive_avg_pool3d',
|
|
1325
|
+
# 254
|
|
1326
|
+
'max_unpool2d',
|
|
1327
|
+
# 256
|
|
1328
|
+
'threshold',
|
|
1329
|
+
'threshold_',
|
|
1330
|
+
|
|
1331
|
+
# 288
|
|
1332
|
+
'adaptive_max_pool2d',
|
|
1333
|
+
|
|
1334
|
+
# 312
|
|
1335
|
+
'normalize',
|
|
1336
|
+
|
|
659
1337
|
# 323
|
|
660
1338
|
|
|
661
1339
|
# 324
|
|
662
1340
|
'elu',
|
|
1341
|
+
'elu_',
|
|
663
1342
|
# 325
|
|
664
1343
|
|
|
665
1344
|
#556
|
|
666
1345
|
'logsigmoid',
|
|
667
1346
|
|
|
668
1347
|
# 257
|
|
669
|
-
|
|
1348
|
+
'adaptive_max_pool1d',
|
|
670
1349
|
# 258
|
|
671
1350
|
'mse_loss',
|
|
672
1351
|
# 259
|
|
@@ -675,5 +1354,19 @@ __all__ = [
|
|
|
675
1354
|
|
|
676
1355
|
'adaptive_avg_pool2d',
|
|
677
1356
|
|
|
678
|
-
|
|
1357
|
+
# 350
|
|
1358
|
+
'conv1d',
|
|
1359
|
+
|
|
1360
|
+
# 393
|
|
1361
|
+
'dropout2d',
|
|
1362
|
+
# 421
|
|
1363
|
+
'flatten',
|
|
1364
|
+
# 536
|
|
1365
|
+
'glu',
|
|
1366
|
+
# 537
|
|
1367
|
+
'hardtanh',
|
|
1368
|
+
'hardtanh_',
|
|
1369
|
+
'relu6',
|
|
1370
|
+
# 548
|
|
1371
|
+
'kl_div',
|
|
679
1372
|
]
|