mindspore 2.5.0__cp310-cp310-win_amd64.whl → 2.6.0rc1__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +6 -4
- mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -33
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +19 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +24 -193
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +97 -74
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +4 -4
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +4 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -1
- mindspore/common/_stub_tensor.py +5 -10
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +1915 -3287
- mindspore/common/api.py +341 -354
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +5 -2
- mindspore/common/dump.py +7 -5
- mindspore/common/file_system.py +3 -0
- mindspore/common/hook_handle.py +5 -3
- mindspore/common/initializer.py +10 -6
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +2 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +106 -39
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +297 -714
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +47 -2
- mindspore/communication/comm_func.py +70 -53
- mindspore/communication/management.py +83 -17
- mindspore/context.py +214 -560
- mindspore/dataset/__init__.py +44 -20
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +102 -120
- mindspore/dataset/engine/datasets_audio.py +22 -22
- mindspore/dataset/engine/datasets_standard_format.py +43 -24
- mindspore/dataset/engine/datasets_text.py +78 -85
- mindspore/dataset/engine/datasets_user_defined.py +108 -76
- mindspore/dataset/engine/datasets_vision.py +111 -108
- mindspore/dataset/engine/iterators.py +5 -3
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/samplers.py +279 -57
- mindspore/dataset/engine/serializer_deserializer.py +2 -1
- mindspore/dataset/engine/validators.py +10 -0
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/device_context/ascend/op_debug.py +60 -1
- mindspore/device_context/ascend/op_tuning.py +0 -4
- mindspore/device_manager.py +39 -3
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +22 -26
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +4 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +40 -22
- mindspore/experimental/optim/radam.py +5 -5
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -81
- mindspore/hal/event.py +38 -55
- mindspore/hal/memory.py +93 -144
- mindspore/hal/stream.py +81 -125
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +40 -2
- mindspore/mindrecord/__init__.py +20 -7
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +131 -700
- mindspore/mint/distributed/__init__.py +5 -1
- mindspore/mint/distributed/distributed.py +194 -109
- mindspore/mint/linalg/__init__.py +2 -0
- mindspore/mint/nn/__init__.py +280 -18
- mindspore/mint/nn/functional.py +282 -64
- mindspore/mint/nn/layer/__init__.py +4 -0
- mindspore/mint/nn/layer/_functions.py +7 -3
- mindspore/mint/nn/layer/activation.py +120 -13
- mindspore/mint/nn/layer/conv.py +218 -24
- mindspore/mint/nn/layer/normalization.py +15 -16
- mindspore/mint/nn/layer/padding.py +1 -1
- mindspore/mint/nn/layer/pooling.py +66 -1
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1250 -176
- mindspore/nn/layer/activation.py +23 -21
- mindspore/nn/layer/basic.py +22 -16
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +22 -17
- mindspore/nn/layer/embedding.py +9 -8
- mindspore/nn/layer/normalization.py +48 -42
- mindspore/nn/layer/pooling.py +75 -31
- mindspore/nn/layer/transformer.py +11 -10
- mindspore/nn/learning_rate_schedule.py +4 -2
- mindspore/nn/loss/loss.py +27 -19
- mindspore/nn/optim/ada_grad.py +6 -5
- mindspore/nn/optim/adadelta.py +9 -7
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +16 -12
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +1 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +2 -2
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +9 -7
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +178 -117
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +3 -3
- mindspore/numpy/array_creations.py +3 -3
- mindspore/numpy/array_ops.py +1 -1
- mindspore/numpy/math_ops.py +4 -4
- mindspore/numpy/utils.py +1 -2
- mindspore/numpy/utils_const.py +1 -2
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +3 -2
- mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
- mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
- mindspore/ops/_vmap/vmap_array_ops.py +7 -6
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
- mindspore/ops/_vmap/vmap_math_ops.py +4 -7
- mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +102 -49
- mindspore/ops/auto_generate/gen_extend_func.py +281 -135
- mindspore/ops/auto_generate/gen_ops_def.py +2574 -2326
- mindspore/ops/auto_generate/gen_ops_prim.py +8566 -2755
- mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +19 -24
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -3
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +28 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +1629 -2345
- mindspore/ops/function/clip_func.py +38 -45
- mindspore/ops/function/debug_func.py +36 -44
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +46 -78
- mindspore/ops/function/math_func.py +3035 -3705
- mindspore/ops/function/nn_func.py +676 -241
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +17 -30
- mindspore/ops/function/random_func.py +204 -361
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +5 -5
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +24 -17
- mindspore/ops/functional.py +6 -4
- mindspore/ops/functional_overload.py +547 -4
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +10 -5
- mindspore/ops/operations/_custom_ops_utils.py +247 -0
- mindspore/ops/operations/_grad_ops.py +1 -10
- mindspore/ops/operations/_inner_ops.py +5 -76
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +37 -22
- mindspore/ops/operations/comm_ops.py +150 -107
- mindspore/ops/operations/custom_ops.py +221 -23
- mindspore/ops/operations/debug_ops.py +115 -16
- mindspore/ops/operations/inner_ops.py +1 -1
- mindspore/ops/operations/linalg_ops.py +1 -58
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +746 -79
- mindspore/ops/operations/math_ops.py +21 -18
- mindspore/ops/operations/nn_ops.py +65 -191
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +43 -32
- mindspore/ops/tensor_method.py +232 -13
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
- mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
- mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
- mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
- mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
- mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
- mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
- mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
- mindspore/ops_generate/{template.py → common/template.py} +96 -84
- mindspore/ops_generate/gen_ops.py +23 -325
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
- mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -7
- mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
- mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
- mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
- mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
- mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
- mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
- mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
- mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
- mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
- mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
- mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
- mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
- mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
- mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +6 -2
- mindspore/parallel/_auto_parallel_context.py +133 -6
- mindspore/parallel/_cell_wrapper.py +130 -15
- mindspore/parallel/_parallel_serialization.py +95 -4
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +198 -25
- mindspore/parallel/algo_parameter_config.py +3 -3
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +656 -37
- mindspore/parallel/cluster/process_entity/_api.py +151 -19
- mindspore/parallel/cluster/run.py +1 -1
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +259 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +24 -13
- mindspore/parallel/shard.py +137 -61
- mindspore/parallel/transform_safetensors.py +287 -95
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +9 -5
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
- mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +22 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
- mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
- mindspore/profiler/common/constant.py +12 -0
- mindspore/profiler/common/msprof_cmd_tool.py +42 -23
- mindspore/profiler/common/path_manager.py +24 -0
- mindspore/profiler/common/profiler_context.py +26 -2
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_parameters.py +59 -18
- mindspore/profiler/common/profiler_path_manager.py +66 -7
- mindspore/profiler/dynamic_profiler.py +112 -79
- mindspore/profiler/envprofiler.py +26 -1
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +57 -14
- mindspore/profiler/platform/npu_profiler.py +33 -7
- mindspore/profiler/profiler.py +541 -45
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +4 -0
- mindspore/profiler/schedule.py +57 -22
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +25 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +2 -2
- mindspore/runtime/executor.py +40 -11
- mindspore/runtime/memory.py +25 -8
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +35 -7
- mindspore/train/amp.py +1 -1
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +24 -40
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_flops_collector.py +2 -3
- mindspore/train/callback/_history.py +7 -4
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +8 -13
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +179 -103
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +4 -5
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +8 -6
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +19 -12
- mindspore/train/model.py +176 -103
- mindspore/train/serialization.py +246 -988
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +4 -2
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/utils.py +138 -4
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +2 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +483 -438
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_constants.py +0 -190
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
- /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
mindspore/mint/nn/functional.py
CHANGED
|
@@ -56,6 +56,7 @@ from mindspore.ops.function.nn_func import relu_
|
|
|
56
56
|
# 14
|
|
57
57
|
from mindspore.ops.function.nn_func import dropout_ext as dropout
|
|
58
58
|
# 15
|
|
59
|
+
from mindspore.ops.function.nn_func import conv1d_ext as conv1d
|
|
59
60
|
from mindspore.ops.function.nn_func import conv2d_ext as conv2d
|
|
60
61
|
# 16
|
|
61
62
|
from mindspore.ops.function.nn_func import log_softmax_ext as log_softmax
|
|
@@ -97,7 +98,7 @@ from mindspore.ops.function.nn_func import batch_norm_ext as batch_norm
|
|
|
97
98
|
# 35
|
|
98
99
|
|
|
99
100
|
# 36
|
|
100
|
-
from mindspore.ops.
|
|
101
|
+
from mindspore.ops.functional_overload import gelu
|
|
101
102
|
# 37
|
|
102
103
|
|
|
103
104
|
# 38
|
|
@@ -113,7 +114,7 @@ from mindspore.ops.functional import group_norm
|
|
|
113
114
|
# 43
|
|
114
115
|
|
|
115
116
|
# 44
|
|
116
|
-
|
|
117
|
+
from mindspore.ops.auto_generate import soft_margin_loss
|
|
117
118
|
# 45
|
|
118
119
|
|
|
119
120
|
# 46
|
|
@@ -133,7 +134,7 @@ from mindspore.ops.functional import embedding
|
|
|
133
134
|
# 53
|
|
134
135
|
|
|
135
136
|
# 54
|
|
136
|
-
|
|
137
|
+
from mindspore.ops.functional_overload import pixel_shuffle
|
|
137
138
|
# 55
|
|
138
139
|
|
|
139
140
|
# 56
|
|
@@ -175,7 +176,7 @@ from mindspore.ops.functional import embedding
|
|
|
175
176
|
# 74
|
|
176
177
|
|
|
177
178
|
# 75
|
|
178
|
-
|
|
179
|
+
from mindspore.ops.function.nn_func import adaptive_max_pool2d
|
|
179
180
|
# 76
|
|
180
181
|
|
|
181
182
|
# 77
|
|
@@ -207,7 +208,7 @@ from mindspore.ops.auto_generate import avg_pool1d_ext as avg_pool1d
|
|
|
207
208
|
# 90
|
|
208
209
|
from mindspore.ops.function.nn_func import avg_pool2d_ext as avg_pool2d
|
|
209
210
|
# 91
|
|
210
|
-
|
|
211
|
+
from mindspore.ops.function.nn_func import avg_pool3d_ext as avg_pool3d
|
|
211
212
|
# 92
|
|
212
213
|
from mindspore.ops.auto_generate import leaky_relu_ext as leaky_relu
|
|
213
214
|
# 93
|
|
@@ -242,6 +243,9 @@ from mindspore.ops.auto_generate import l1_loss_ext as l1_loss # pylint: disabl
|
|
|
242
243
|
#254
|
|
243
244
|
from mindspore.ops.auto_generate import max_unpool2d_ext as max_unpool2d
|
|
244
245
|
|
|
246
|
+
# 256
|
|
247
|
+
from mindspore.ops.auto_generate import inplace_threshold as threshold_
|
|
248
|
+
from mindspore.ops.auto_generate import threshold as threshold_op
|
|
245
249
|
# 257
|
|
246
250
|
|
|
247
251
|
# 258
|
|
@@ -251,7 +255,8 @@ from mindspore.ops.function.nn_func import mse_loss_ext as mse_loss
|
|
|
251
255
|
# 323
|
|
252
256
|
|
|
253
257
|
# 324
|
|
254
|
-
from mindspore.ops.auto_generate import elu_ext
|
|
258
|
+
from mindspore.ops.auto_generate import elu_ext
|
|
259
|
+
from mindspore.ops.auto_generate import inplace_elu
|
|
255
260
|
|
|
256
261
|
# 421
|
|
257
262
|
from mindspore.ops.auto_generate import flatten_ext as flatten
|
|
@@ -267,6 +272,8 @@ from mindspore.ops.function.nn_func import glu_ext as glu
|
|
|
267
272
|
# 537
|
|
268
273
|
from mindspore.ops.auto_generate import hardtanh as hardtanh_op
|
|
269
274
|
from mindspore.ops.auto_generate import inplace_hardtanh as hardtanh_
|
|
275
|
+
# 548
|
|
276
|
+
from mindspore.ops.function.nn_func import kl_div_ext as kl_div
|
|
270
277
|
# 556
|
|
271
278
|
from mindspore.ops.function.nn_func import logsigmoid_ext as logsigmoid
|
|
272
279
|
|
|
@@ -277,53 +284,157 @@ from mindspore.ops.function.nn_func import cross_entropy_ext as cross_entropy
|
|
|
277
284
|
from mindspore.ops.function.nn_func import nll_loss_ext as nll_loss
|
|
278
285
|
|
|
279
286
|
|
|
287
|
+
def elu(input, alpha=1.0, inplace=False):
|
|
288
|
+
r"""
|
|
289
|
+
Exponential Linear Unit activation function
|
|
290
|
+
|
|
291
|
+
Applies the exponential linear unit function element-wise. The activation function is defined as:
|
|
292
|
+
|
|
293
|
+
.. math::
|
|
294
|
+
ELU_{i} =
|
|
295
|
+
\begin{cases}
|
|
296
|
+
x_i, &\text{if } x_i \geq 0; \cr
|
|
297
|
+
\alpha * (\exp(x_i) - 1), &\text{otherwise.}
|
|
298
|
+
\end{cases}
|
|
299
|
+
|
|
300
|
+
where :math:`x_i` represents the element of the input and :math:`\alpha` represents the `alpha` parameter, and
|
|
301
|
+
`alpha` represents the smoothness of the ELU.
|
|
302
|
+
|
|
303
|
+
ELU Activation Function Graph:
|
|
304
|
+
|
|
305
|
+
.. image:: ../images/ELU.png
|
|
306
|
+
:align: center
|
|
307
|
+
|
|
308
|
+
.. warning::
|
|
309
|
+
This is an experimental API that is subject to change or deletion.
|
|
310
|
+
|
|
311
|
+
Args:
|
|
312
|
+
input (Tensor): The input of ELU is a Tensor of any dimension.
|
|
313
|
+
alpha (float, optional): The alpha value of ELU, the data type is float. Default: ``1.0``.
|
|
314
|
+
inplace (bool, optional): Whether to use inplace mode, the data type is bool. Default: ``False``.
|
|
315
|
+
|
|
316
|
+
Returns:
|
|
317
|
+
Tensor, with the same shape and type as the `input`.
|
|
318
|
+
|
|
319
|
+
Raises:
|
|
320
|
+
RuntimeError: If the dtype of `input` is not float16, float32 or bfloat16.
|
|
321
|
+
TypeError: If the dtype of `alpha` is not float.
|
|
322
|
+
|
|
323
|
+
Supported Platforms:
|
|
324
|
+
``Ascend``
|
|
325
|
+
|
|
326
|
+
Examples:
|
|
327
|
+
>>> import mindspore
|
|
328
|
+
>>> from mindspore import Tensor, mint
|
|
329
|
+
>>> import numpy as np
|
|
330
|
+
>>> input = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float32)
|
|
331
|
+
>>> output = mint.nn.functional.elu(input)
|
|
332
|
+
>>> print(output)
|
|
333
|
+
[-0.63212055 -0.86466473 0. 2. 1.]
|
|
334
|
+
"""
|
|
335
|
+
if inplace:
|
|
336
|
+
return inplace_elu(input, alpha)
|
|
337
|
+
return elu_ext(input, alpha)
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
def elu_(input, alpha=1.0):
|
|
341
|
+
r"""
|
|
342
|
+
Exponential Linear Unit activation function
|
|
343
|
+
|
|
344
|
+
Applies the exponential linear unit function inplace element-wise. The activation function is defined as:
|
|
345
|
+
|
|
346
|
+
.. math::
|
|
347
|
+
ELU_{i} =
|
|
348
|
+
\begin{cases}
|
|
349
|
+
x_i, &\text{if } x_i \geq 0; \cr
|
|
350
|
+
\alpha * (\exp(x_i) - 1), &\text{otherwise.}
|
|
351
|
+
\end{cases}
|
|
352
|
+
|
|
353
|
+
where :math:`x_i` represents the element of the input and :math:`\alpha` represents the `alpha` parameter, and
|
|
354
|
+
`alpha` represents the smoothness of the ELU.
|
|
355
|
+
|
|
356
|
+
ELU Activation Function Graph:
|
|
357
|
+
|
|
358
|
+
.. image:: ../images/ELU.png
|
|
359
|
+
:align: center
|
|
360
|
+
|
|
361
|
+
.. warning::
|
|
362
|
+
This is an experimental API that is subject to change or deletion.
|
|
363
|
+
|
|
364
|
+
Args:
|
|
365
|
+
input (Tensor): The input of ELU is a Tensor of any dimension.
|
|
366
|
+
alpha (float, optional): The alpha value of ELU, the data type is float and `alpha` should be
|
|
367
|
+
greater than 0. Default: ``1.0``.
|
|
368
|
+
|
|
369
|
+
Returns:
|
|
370
|
+
Tensor, with the same shape and type as the `input`.
|
|
371
|
+
|
|
372
|
+
Raises:
|
|
373
|
+
RuntimeError: If the dtype of `input` is not float16, float32 or bfloat16.
|
|
374
|
+
TypeError: If the dtype of `alpha` is not float.
|
|
375
|
+
|
|
376
|
+
Supported Platforms:
|
|
377
|
+
``Ascend``
|
|
378
|
+
|
|
379
|
+
Examples:
|
|
380
|
+
>>> import mindspore
|
|
381
|
+
>>> from mindspore import Tensor, mint
|
|
382
|
+
>>> import numpy as np
|
|
383
|
+
>>> input = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float32)
|
|
384
|
+
>>> mint.nn.functional.elu_(input)
|
|
385
|
+
>>> print(input)
|
|
386
|
+
[-0.63212055 -0.86466473 0. 2. 1.]
|
|
387
|
+
"""
|
|
388
|
+
return inplace_elu(input, alpha)
|
|
389
|
+
|
|
390
|
+
|
|
280
391
|
def hardtanh(input, min_val=-1.0, max_val=1.0, inplace=False):
|
|
281
392
|
r"""
|
|
282
|
-
|
|
393
|
+
Applies the hardtanh activation function element-wise. The activation function is defined as:
|
|
283
394
|
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
395
|
+
.. math::
|
|
396
|
+
\text{hardtanh}(input) = \begin{cases}
|
|
397
|
+
max\_val, & \text{ if } input > max\_val \\
|
|
398
|
+
min\_val, & \text{ if } input < min\_val \\
|
|
399
|
+
input, & \text{ otherwise. }
|
|
400
|
+
\end{cases}
|
|
290
401
|
|
|
291
|
-
|
|
402
|
+
Linear region range :math:`[min\_val, max\_val]` can be adjusted using `min_val` and `max_val`.
|
|
292
403
|
|
|
293
|
-
|
|
404
|
+
Hardtanh Activation Function Graph:
|
|
294
405
|
|
|
295
|
-
|
|
296
|
-
|
|
406
|
+
.. image:: ../images/Hardtanh.png
|
|
407
|
+
:align: center
|
|
297
408
|
|
|
298
409
|
.. warning::
|
|
299
410
|
This is an experimental optimizer API that is subject to change.
|
|
300
411
|
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
412
|
+
Args:
|
|
413
|
+
input (Tensor): Input Tensor.
|
|
414
|
+
min_val (Union[bool, int, float], optional): Minimum value of the linear region range. Default: ``-1.0`` .
|
|
415
|
+
max_val (Union[bool, int, float], optional): Maximum value of the linear region range. Default: ``1.0`` .
|
|
416
|
+
inplace (bool, optional): Whether to apply erasing inplace. Default: ``False``.
|
|
417
|
+
|
|
418
|
+
Returns:
|
|
419
|
+
Tensor, with the same dtype and shape as `input`.
|
|
420
|
+
|
|
421
|
+
Raises:
|
|
422
|
+
TypeError: If `input` is not a Tensor.
|
|
423
|
+
TypeError: If dtype of `input` is not one of: int8, int16, int32, int64, uint8, float16, float32, bfloat16.
|
|
424
|
+
TypeError: If dtype of `min_val` is neither float nor int.
|
|
425
|
+
TypeError: If dtype of `max_val` is neither float nor int.
|
|
426
|
+
|
|
427
|
+
Supported Platforms:
|
|
428
|
+
``Ascend``
|
|
429
|
+
|
|
430
|
+
Examples:
|
|
431
|
+
>>> import mindspore
|
|
432
|
+
>>> from mindspore import Tensor, mint
|
|
433
|
+
>>> x = Tensor([-1, -2, 0, 2, 1], mindspore.float16)
|
|
434
|
+
>>> output = mint.nn.functional.hardtanh(x, min_val=-1.0, max_val=1.0, inplace=False)
|
|
435
|
+
>>> print(output)
|
|
436
|
+
[-1. -1. 0. 1. 1.]
|
|
437
|
+
"""
|
|
327
438
|
if inplace:
|
|
328
439
|
return hardtanh_(input, min_val, max_val)
|
|
329
440
|
return hardtanh_op(input, min_val, max_val)
|
|
@@ -551,8 +662,8 @@ def one_hot(tensor, num_classes=-1):
|
|
|
551
662
|
|
|
552
663
|
Args:
|
|
553
664
|
tensor (Tensor): A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
|
|
554
|
-
Data type must be int32 or int64.
|
|
555
|
-
num_classes (int): A scalar defining the depth of the one-hot dimension, default: ``-1``.
|
|
665
|
+
Data type must be int32 or int64. Dimension cannot be greater than 7.
|
|
666
|
+
num_classes (int, optional): A scalar defining the depth of the one-hot dimension, default: ``-1``.
|
|
556
667
|
|
|
557
668
|
Returns:
|
|
558
669
|
Tensor, one-hot tensor.
|
|
@@ -685,8 +796,8 @@ def log_warning(msg):
|
|
|
685
796
|
def dropout2d(input, p=0.5, training=True):
|
|
686
797
|
r"""
|
|
687
798
|
During training, randomly zeroes some channels of the input tensor with probability `p`
|
|
688
|
-
from a Bernoulli distribution(For a 4-dimensional tensor with a shape of :math:`
|
|
689
|
-
the channel feature map refers to a 2-dimensional feature map with the shape of :math:`
|
|
799
|
+
from a Bernoulli distribution (For a 4-dimensional tensor with a shape of :math:`(N, C, H, W)`,
|
|
800
|
+
the channel feature map refers to a 2-dimensional feature map with the shape of :math:`(H, W)`).
|
|
690
801
|
|
|
691
802
|
For example, the :math:`j\_th` channel of the :math:`i\_th` sample in the batched input is a to-be-processed
|
|
692
803
|
`2D` tensor input[i,j].
|
|
@@ -706,9 +817,9 @@ def dropout2d(input, p=0.5, training=True):
|
|
|
706
817
|
Args:
|
|
707
818
|
input (Tensor): A `4D` tensor with shape :math:`(N, C, H, W)`, where `N` is the batch size, `C` is the number
|
|
708
819
|
of channels, `H` is the feature height, and `W` is the feature width.
|
|
709
|
-
p (float): The dropping probability of a channel, between 0 and 1, e.g. `p` = 0.8,
|
|
820
|
+
p (float, optional): The dropping probability of a channel, between 0 and 1, e.g. `p` = 0.8,
|
|
710
821
|
which means dropping out 80% of channels. Default: ``0.5`` .
|
|
711
|
-
training(bool): If `training` is True, applying dropout, otherwise, not applying. Default: ``True`` .
|
|
822
|
+
training(bool, optional): If `training` is True, applying dropout, otherwise, not applying. Default: ``True`` .
|
|
712
823
|
|
|
713
824
|
Returns:
|
|
714
825
|
Tensor, output, with the same shape and data type as `input`.
|
|
@@ -831,27 +942,73 @@ def upsample(input, size=None, scale_factor=None, mode="nearest", align_corners=
|
|
|
831
942
|
return interpolate(input, size, scale_factor, mode, align_corners)
|
|
832
943
|
|
|
833
944
|
|
|
945
|
+
def threshold(input, threshold, value, inplace=False): # pylint: disable=W0621
|
|
946
|
+
r"""
|
|
947
|
+
Compute the Threshold activation function element-wise.
|
|
948
|
+
|
|
949
|
+
The Threshold is defined as:
|
|
950
|
+
|
|
951
|
+
.. math::
|
|
952
|
+
y =
|
|
953
|
+
\begin{cases}
|
|
954
|
+
x, &\text{ if } x > \text{threshold} \\
|
|
955
|
+
\text{value}, &\text{ otherwise }
|
|
956
|
+
\end{cases}
|
|
957
|
+
|
|
958
|
+
.. warning::
|
|
959
|
+
This is an experimental API that is subject to change or deletion.
|
|
960
|
+
|
|
961
|
+
Args:
|
|
962
|
+
input (Tensor): The input Tensor.
|
|
963
|
+
threshold (Union[int, float]): The value of the threshold.
|
|
964
|
+
value (Union[int, float]): The value to replace with when element is less than threshold.
|
|
965
|
+
inplace (bool, optional): Whether to apply erasing inplace. Default: ``False``.
|
|
966
|
+
|
|
967
|
+
Returns:
|
|
968
|
+
Tensor, the same shape and data type as the input.
|
|
969
|
+
|
|
970
|
+
Raises:
|
|
971
|
+
TypeError: If `input` is not a Tensor.
|
|
972
|
+
TypeError: If `threshold` is not a float or an int.
|
|
973
|
+
TypeError: If `value` is not a float or an int.
|
|
974
|
+
|
|
975
|
+
Supported Platforms:
|
|
976
|
+
``Ascend``
|
|
977
|
+
|
|
978
|
+
Examples:
|
|
979
|
+
>>> import mindspore
|
|
980
|
+
>>> from mindspore import Tensor, mint
|
|
981
|
+
>>> inputs = mindspore.Tensor([0.0, 2, 3], mindspore.float32)
|
|
982
|
+
>>> outputs = mint.nn.functional.threshold(inputs, 1, 100)
|
|
983
|
+
>>> print(outputs)
|
|
984
|
+
[100. 2. 3.]
|
|
985
|
+
"""
|
|
986
|
+
if inplace is True:
|
|
987
|
+
return threshold_(input, threshold, value)
|
|
988
|
+
return threshold_op(input, threshold, value)
|
|
989
|
+
|
|
990
|
+
|
|
834
991
|
def adaptive_avg_pool3d(input, output_size):
|
|
835
992
|
r"""
|
|
836
993
|
Performs 3D adaptive average pooling on a multi-plane input signal.
|
|
837
994
|
That is, for any input size, the size of the specified output is :math:`(D, H, W)`.
|
|
838
995
|
The number of output features is equal to the number of input planes.
|
|
839
996
|
|
|
840
|
-
Suppose the last 3 dimension size of x is :math:`(
|
|
841
|
-
:math:`(
|
|
997
|
+
Suppose the last 3 dimension size of x is :math:`(D_{in}, H_{in}, W_{in})`, the last 3 dimension size of output is
|
|
998
|
+
:math:`(D_{out}, H_{out}, W_{out})`.
|
|
842
999
|
|
|
843
1000
|
.. math::
|
|
844
1001
|
\begin{array}{ll} \\
|
|
845
|
-
\forall \quad od \in [0,
|
|
1002
|
+
\forall \quad od \in [0, D_{out}-1], oh \in [0, H_{out}-1], ow \in [0, W_{out}-1] \\
|
|
846
1003
|
output[od,oh,ow] = \\
|
|
847
|
-
\qquad mean(x[
|
|
848
|
-
where
|
|
849
|
-
\qquad
|
|
850
|
-
\qquad
|
|
851
|
-
\qquad
|
|
852
|
-
\qquad
|
|
853
|
-
\qquad
|
|
854
|
-
\qquad
|
|
1004
|
+
\qquad mean(x[D_{istart}:D_{iend}+1,H_{istart}:H_{iend}+1,W_{istart}:W_{iend}+1]) \\
|
|
1005
|
+
where, \\
|
|
1006
|
+
\qquad D_{istart}= \left\lceil \frac{od * D_{in}}{D_{out}} \right\rceil \\
|
|
1007
|
+
\qquad D_{iend}=\left\lfloor \frac{(od+1)* D_{in}}{D_{out}} \right\rfloor \\
|
|
1008
|
+
\qquad H_{istart}=\left\lceil \frac{oh * H_{in}}{H_{out}} \right\rceil \\
|
|
1009
|
+
\qquad H_{iend}=\left\lfloor \frac{(oh+1) * H_{in}}{H_{out}} \right\rfloor \\
|
|
1010
|
+
\qquad W_{istart}=\left\lceil \frac{ow * W_{in}}{W_{out}} \right\rceil \\
|
|
1011
|
+
\qquad W_{iend}=\left\lfloor \frac{(ow+1) * W_{in}}{W_{out}} \right\rfloor
|
|
855
1012
|
\end{array}
|
|
856
1013
|
|
|
857
1014
|
.. warning::
|
|
@@ -908,6 +1065,53 @@ def adaptive_avg_pool3d(input, output_size):
|
|
|
908
1065
|
return adaptive_avg_pool3d_ext(input, output_size)
|
|
909
1066
|
|
|
910
1067
|
|
|
1068
|
+
def adaptive_max_pool1d(input, output_size, return_indices=False):
|
|
1069
|
+
r"""
|
|
1070
|
+
Performs 1D adaptive max pooling on a multi-plane input signal.
|
|
1071
|
+
That is, for any input size, the size of the specified output is L.
|
|
1072
|
+
The number of output features is equal to the number of input features.
|
|
1073
|
+
|
|
1074
|
+
.. warning::
|
|
1075
|
+
This is an experimental API that is subject to change or deletion.
|
|
1076
|
+
|
|
1077
|
+
Args:
|
|
1078
|
+
input (Tensor): The input of adaptive_max_pool1d, which is a 2D or 3D tensor,
|
|
1079
|
+
with float16, float32 or float64 data type.
|
|
1080
|
+
output_size (int): The target output feature size. `output_size` is an integer.
|
|
1081
|
+
return_indices (bool, optional): Whether to return the index of the maximum value. Default: ``False`` .
|
|
1082
|
+
|
|
1083
|
+
Returns:
|
|
1084
|
+
Union(Tensor, tuple(Tensor, Tensor)).
|
|
1085
|
+
|
|
1086
|
+
- If `return_indices` is False, output is a Tensor, with shape :math:`(N, C, L_{out})`. It has the same data
|
|
1087
|
+
type as `input`.
|
|
1088
|
+
- If `return_indices` is True, output is a Tuple of 2 Tensors, representing the result and where the max
|
|
1089
|
+
values are generated.
|
|
1090
|
+
|
|
1091
|
+
Raises:
|
|
1092
|
+
TypeError: If `input` is not a tensor.
|
|
1093
|
+
TypeError: If dtype of `input` is not float16, float32 or float64.
|
|
1094
|
+
TypeError: If `output_size` is not int or tuple.
|
|
1095
|
+
TypeError: If `return_indices` is not a bool.
|
|
1096
|
+
ValueError: If `output_size` is a tuple and the length of `output_size` is not 1.
|
|
1097
|
+
|
|
1098
|
+
Supported Platforms:
|
|
1099
|
+
``Ascend``
|
|
1100
|
+
|
|
1101
|
+
Examples:
|
|
1102
|
+
>>> import mindspore
|
|
1103
|
+
>>> from mindspore import Tensor, mint
|
|
1104
|
+
>>> input = Tensor([[2,3],[3,4]],dtype=mindspore.float16)
|
|
1105
|
+
>>> output = mint.nn.functional.adaptive_max_pool1d(input, 3)
|
|
1106
|
+
>>> print(output)
|
|
1107
|
+
[[2. 3. 3. ]
|
|
1108
|
+
[3. 4. 4. ]]
|
|
1109
|
+
"""
|
|
1110
|
+
if return_indices:
|
|
1111
|
+
return ops.auto_generate.gen_ops_prim.adaptive_max_pool1d_op(input, output_size)
|
|
1112
|
+
return ops.auto_generate.gen_ops_prim.adaptive_max_pool1d_op(input, output_size)[0]
|
|
1113
|
+
|
|
1114
|
+
|
|
911
1115
|
__all__ = [
|
|
912
1116
|
'conv_transpose2d',
|
|
913
1117
|
'max_pool2d',
|
|
@@ -943,6 +1147,7 @@ __all__ = [
|
|
|
943
1147
|
# 14
|
|
944
1148
|
'dropout',
|
|
945
1149
|
# 15
|
|
1150
|
+
'conv1d',
|
|
946
1151
|
'conv2d',
|
|
947
1152
|
# 16
|
|
948
1153
|
'log_softmax',
|
|
@@ -1002,7 +1207,7 @@ __all__ = [
|
|
|
1002
1207
|
# 43
|
|
1003
1208
|
|
|
1004
1209
|
# 44
|
|
1005
|
-
|
|
1210
|
+
'soft_margin_loss',
|
|
1006
1211
|
# 45
|
|
1007
1212
|
|
|
1008
1213
|
# 46
|
|
@@ -1022,7 +1227,7 @@ __all__ = [
|
|
|
1022
1227
|
# 53
|
|
1023
1228
|
|
|
1024
1229
|
# 54
|
|
1025
|
-
|
|
1230
|
+
'pixel_shuffle',
|
|
1026
1231
|
# 55
|
|
1027
1232
|
|
|
1028
1233
|
# 56
|
|
@@ -1090,7 +1295,7 @@ __all__ = [
|
|
|
1090
1295
|
# 87
|
|
1091
1296
|
|
|
1092
1297
|
# 88
|
|
1093
|
-
|
|
1298
|
+
'avg_pool3d',
|
|
1094
1299
|
# 89
|
|
1095
1300
|
'avg_pool1d',
|
|
1096
1301
|
# 90
|
|
@@ -1119,6 +1324,12 @@ __all__ = [
|
|
|
1119
1324
|
'adaptive_avg_pool3d',
|
|
1120
1325
|
# 254
|
|
1121
1326
|
'max_unpool2d',
|
|
1327
|
+
# 256
|
|
1328
|
+
'threshold',
|
|
1329
|
+
'threshold_',
|
|
1330
|
+
|
|
1331
|
+
# 288
|
|
1332
|
+
'adaptive_max_pool2d',
|
|
1122
1333
|
|
|
1123
1334
|
# 312
|
|
1124
1335
|
'normalize',
|
|
@@ -1127,13 +1338,14 @@ __all__ = [
|
|
|
1127
1338
|
|
|
1128
1339
|
# 324
|
|
1129
1340
|
'elu',
|
|
1341
|
+
'elu_',
|
|
1130
1342
|
# 325
|
|
1131
1343
|
|
|
1132
1344
|
#556
|
|
1133
1345
|
'logsigmoid',
|
|
1134
1346
|
|
|
1135
1347
|
# 257
|
|
1136
|
-
|
|
1348
|
+
'adaptive_max_pool1d',
|
|
1137
1349
|
# 258
|
|
1138
1350
|
'mse_loss',
|
|
1139
1351
|
# 259
|
|
@@ -1141,6 +1353,10 @@ __all__ = [
|
|
|
1141
1353
|
'adaptive_avg_pool1d',
|
|
1142
1354
|
|
|
1143
1355
|
'adaptive_avg_pool2d',
|
|
1356
|
+
|
|
1357
|
+
# 350
|
|
1358
|
+
'conv1d',
|
|
1359
|
+
|
|
1144
1360
|
# 393
|
|
1145
1361
|
'dropout2d',
|
|
1146
1362
|
# 421
|
|
@@ -1151,4 +1367,6 @@ __all__ = [
|
|
|
1151
1367
|
'hardtanh',
|
|
1152
1368
|
'hardtanh_',
|
|
1153
1369
|
'relu6',
|
|
1370
|
+
# 548
|
|
1371
|
+
'kl_div',
|
|
1154
1372
|
]
|
|
@@ -30,7 +30,9 @@ from mindspore.mint.nn.layer.normalization import LayerNorm
|
|
|
30
30
|
from mindspore.mint.nn.layer.normalization import SyncBatchNorm
|
|
31
31
|
from mindspore.mint.nn.layer.activation import LogSigmoid
|
|
32
32
|
from mindspore.mint.nn.layer.activation import SiLU
|
|
33
|
+
from mindspore.mint.nn.layer.activation import Threshold
|
|
33
34
|
from mindspore.mint.nn.layer.basic import Dropout2d
|
|
35
|
+
from mindspore.mint.nn.layer.pooling import AdaptiveMaxPool1d
|
|
34
36
|
from mindspore.mint.nn.layer.pooling import AdaptiveAvgPool1d
|
|
35
37
|
from mindspore.mint.nn.layer.pooling import AdaptiveAvgPool2d
|
|
36
38
|
from mindspore.mint.nn.layer.pooling import AdaptiveAvgPool3d
|
|
@@ -45,8 +47,10 @@ __all__ = [
|
|
|
45
47
|
'LogSigmoid',
|
|
46
48
|
'SiLU',
|
|
47
49
|
'Dropout2d',
|
|
50
|
+
'AdaptiveMaxPool1d',
|
|
48
51
|
'AdaptiveAvgPool1d',
|
|
49
52
|
'AdaptiveAvgPool2d',
|
|
50
53
|
'AdaptiveAvgPool3d',
|
|
51
54
|
'SyncBatchNorm',
|
|
55
|
+
'Threshold',
|
|
52
56
|
]
|
|
@@ -1,14 +1,17 @@
|
|
|
1
1
|
import mindspore
|
|
2
2
|
from mindspore import Tensor
|
|
3
3
|
from mindspore import context
|
|
4
|
+
import mindspore.communication
|
|
5
|
+
import mindspore.communication.comm_func
|
|
4
6
|
from mindspore.nn.cell import Cell
|
|
5
7
|
from mindspore.ops.auto_generate.gen_ops_prim import BatchNormReduceGrad
|
|
6
8
|
from mindspore.ops.auto_generate.gen_ops_prim import BatchNormElemtGrad
|
|
7
9
|
from mindspore.communication import GlobalComm
|
|
8
10
|
from mindspore.ops import ReduceOp
|
|
9
|
-
from mindspore._c_expression import
|
|
11
|
+
from mindspore._c_expression import TensorPy as Tensor_
|
|
10
12
|
from mindspore.communication._comm_helper import _get_size_helper, HCCL_WORLD_COMM_GROUP
|
|
11
13
|
from mindspore.ops._primitive_cache import _get_cache_prim
|
|
14
|
+
from mindspore.communication.comm_func import all_gather_into_tensor as all_gather_into_tensor_dy
|
|
12
15
|
from mindspore.ops import operations as P
|
|
13
16
|
from mindspore import ops, mint
|
|
14
17
|
|
|
@@ -111,7 +114,8 @@ def bprop_pynative(input_x, weight, bias, running_mean, running_var, eps, moment
|
|
|
111
114
|
num_channels = sum_dy_shape[0]
|
|
112
115
|
combined = mint.cat([sum_dy, sum_dy_xmu], dim=0)
|
|
113
116
|
|
|
114
|
-
new_combined = all_reduce(
|
|
117
|
+
new_combined, _ = mindspore.communication.comm_func.all_reduce(
|
|
118
|
+
combined, group=process_group)
|
|
115
119
|
|
|
116
120
|
sum_dy, sum_dy_xmu = mint.split(new_combined, num_channels)
|
|
117
121
|
|
|
@@ -227,7 +231,7 @@ def construct_pynative(input, weight, bias, running_mean, running_var, eps, mome
|
|
|
227
231
|
# batch_norm_gather_stats_with_counts calculates global mean & invstd based on
|
|
228
232
|
# all gathered mean, invstd and count.
|
|
229
233
|
# world_size * (2C + 1)
|
|
230
|
-
combined =
|
|
234
|
+
combined, _ = all_gather_into_tensor_dy(combined, process_group)
|
|
231
235
|
combined = ops.reshape(combined, [world_size, -1])
|
|
232
236
|
# world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1
|
|
233
237
|
mean_val_all, invstd_val_all, count_val_all = mint.split(
|