mindspore 2.5.0__cp311-cp311-win_amd64.whl → 2.6.0rc1__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +6 -4
- mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +3 -0
- mindspore/_checkparam.py +3 -33
- mindspore/_deprecated/__init__.py +17 -0
- mindspore/_deprecated/jit.py +198 -0
- mindspore/_extends/builtin_operations.py +1 -1
- mindspore/_extends/parse/__init__.py +6 -7
- mindspore/_extends/parse/compile_config.py +19 -0
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +22 -3
- mindspore/_extends/parse/jit_fallback_modules/__init__.py +0 -0
- mindspore/_extends/parse/jit_fallback_modules/check_utils.py +123 -0
- mindspore/_extends/parse/jit_fallback_modules/third_party_modules.py +50 -0
- mindspore/_extends/parse/parser.py +24 -193
- mindspore/_extends/parse/resources.py +1 -5
- mindspore/_extends/parse/standard_method.py +97 -74
- mindspore/_extends/pijit/__init__.py +2 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +16 -11
- mindspore/_extends/pijit/tensor_func_list.py +27 -0
- mindspore/_extends/utils.py +1 -1
- mindspore/amp.py +4 -4
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/__init__.py +2 -2
- mindspore/boost/base.py +3 -7
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +4 -3
- mindspore/common/_grad_function.py +56 -0
- mindspore/common/_pijit_context.py +14 -5
- mindspore/common/_register_for_tensor.py +1 -1
- mindspore/common/_stub_tensor.py +5 -10
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +1915 -3287
- mindspore/common/api.py +341 -354
- mindspore/common/auto_dynamic_shape.py +41 -44
- mindspore/common/dtype.py +5 -2
- mindspore/common/dump.py +7 -5
- mindspore/common/file_system.py +3 -0
- mindspore/common/hook_handle.py +5 -3
- mindspore/common/initializer.py +10 -6
- mindspore/common/jit_begin_end.py +94 -0
- mindspore/common/jit_config.py +6 -1
- mindspore/common/jit_context.py +76 -0
- mindspore/common/jit_trace.py +378 -0
- mindspore/common/lazy_inline.py +2 -2
- mindspore/common/mutable.py +5 -4
- mindspore/common/parameter.py +106 -39
- mindspore/common/seed.py +2 -2
- mindspore/common/sparse_tensor.py +23 -17
- mindspore/common/tensor.py +297 -714
- mindspore/communication/__init__.py +7 -5
- mindspore/communication/_comm_helper.py +47 -2
- mindspore/communication/comm_func.py +70 -53
- mindspore/communication/management.py +83 -17
- mindspore/context.py +214 -560
- mindspore/dataset/__init__.py +44 -20
- mindspore/dataset/audio/__init__.py +2 -8
- mindspore/dataset/audio/transforms.py +3 -17
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +102 -120
- mindspore/dataset/engine/datasets_audio.py +22 -22
- mindspore/dataset/engine/datasets_standard_format.py +43 -24
- mindspore/dataset/engine/datasets_text.py +78 -85
- mindspore/dataset/engine/datasets_user_defined.py +108 -76
- mindspore/dataset/engine/datasets_vision.py +111 -108
- mindspore/dataset/engine/iterators.py +5 -3
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +1 -1
- mindspore/dataset/engine/samplers.py +279 -57
- mindspore/dataset/engine/serializer_deserializer.py +2 -1
- mindspore/dataset/engine/validators.py +10 -0
- mindspore/dataset/text/__init__.py +7 -6
- mindspore/dataset/text/transforms.py +6 -5
- mindspore/dataset/text/utils.py +3 -3
- mindspore/dataset/transforms/__init__.py +0 -9
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/utils/browse_dataset.py +1 -1
- mindspore/dataset/vision/__init__.py +2 -9
- mindspore/dataset/vision/transforms.py +202 -158
- mindspore/dataset/vision/utils.py +7 -5
- mindspore/device_context/ascend/op_debug.py +60 -1
- mindspore/device_context/ascend/op_tuning.py +0 -4
- mindspore/device_manager.py +39 -3
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/es/embedding_service.py +35 -27
- mindspore/experimental/map_parameter.py +4 -4
- mindspore/experimental/optim/adadelta.py +22 -26
- mindspore/experimental/optim/adagrad.py +4 -4
- mindspore/experimental/optim/adam.py +4 -0
- mindspore/experimental/optim/adamax.py +4 -4
- mindspore/experimental/optim/adamw.py +4 -0
- mindspore/experimental/optim/asgd.py +1 -1
- mindspore/experimental/optim/lr_scheduler.py +40 -22
- mindspore/experimental/optim/radam.py +5 -5
- mindspore/experimental/optim/rprop.py +1 -1
- mindspore/experimental/optim/sgd.py +1 -1
- mindspore/hal/contiguous_tensors_handle.py +6 -10
- mindspore/hal/device.py +55 -81
- mindspore/hal/event.py +38 -55
- mindspore/hal/memory.py +93 -144
- mindspore/hal/stream.py +81 -125
- mindspore/include/dataset/constants.h +7 -4
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +40 -2
- mindspore/mindrecord/__init__.py +20 -7
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_backend.dll → mindspore_ops_host.dll} +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +131 -700
- mindspore/mint/distributed/__init__.py +5 -1
- mindspore/mint/distributed/distributed.py +194 -109
- mindspore/mint/linalg/__init__.py +2 -0
- mindspore/mint/nn/__init__.py +280 -18
- mindspore/mint/nn/functional.py +282 -64
- mindspore/mint/nn/layer/__init__.py +4 -0
- mindspore/mint/nn/layer/_functions.py +7 -3
- mindspore/mint/nn/layer/activation.py +120 -13
- mindspore/mint/nn/layer/conv.py +218 -24
- mindspore/mint/nn/layer/normalization.py +15 -16
- mindspore/mint/nn/layer/padding.py +1 -1
- mindspore/mint/nn/layer/pooling.py +66 -1
- mindspore/mint/optim/__init__.py +2 -1
- mindspore/mint/optim/sgd.py +171 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/__init__.py +4 -1
- mindspore/nn/cell.py +1250 -176
- mindspore/nn/layer/activation.py +23 -21
- mindspore/nn/layer/basic.py +22 -16
- mindspore/nn/layer/container.py +1 -1
- mindspore/nn/layer/conv.py +22 -17
- mindspore/nn/layer/embedding.py +9 -8
- mindspore/nn/layer/normalization.py +48 -42
- mindspore/nn/layer/pooling.py +75 -31
- mindspore/nn/layer/transformer.py +11 -10
- mindspore/nn/learning_rate_schedule.py +4 -2
- mindspore/nn/loss/loss.py +27 -19
- mindspore/nn/optim/ada_grad.py +6 -5
- mindspore/nn/optim/adadelta.py +9 -7
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +16 -12
- mindspore/nn/optim/adamax.py +8 -7
- mindspore/nn/optim/adasum.py +5 -5
- mindspore/nn/optim/asgd.py +1 -1
- mindspore/nn/optim/ftrl.py +11 -9
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/lazyadam.py +12 -10
- mindspore/nn/optim/momentum.py +7 -6
- mindspore/nn/optim/optimizer.py +2 -2
- mindspore/nn/optim/proximal_ada_grad.py +12 -10
- mindspore/nn/optim/rmsprop.py +13 -12
- mindspore/nn/optim/rprop.py +9 -7
- mindspore/nn/optim/sgd.py +9 -6
- mindspore/nn/optim/tft_wrapper.py +5 -2
- mindspore/nn/probability/bijector/bijector.py +17 -11
- mindspore/nn/probability/bijector/gumbel_cdf.py +5 -5
- mindspore/nn/probability/bijector/invert.py +2 -2
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +3 -2
- mindspore/nn/probability/distribution/beta.py +3 -3
- mindspore/nn/probability/distribution/categorical.py +1 -1
- mindspore/nn/probability/distribution/cauchy.py +4 -2
- mindspore/nn/probability/distribution/exponential.py +6 -7
- mindspore/nn/probability/distribution/gamma.py +2 -2
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/half_normal.py +5 -3
- mindspore/nn/probability/distribution/logistic.py +5 -3
- mindspore/nn/probability/distribution/poisson.py +1 -1
- mindspore/nn/probability/distribution/uniform.py +5 -3
- mindspore/nn/reinforcement/_tensors_queue.py +1 -1
- mindspore/nn/reinforcement/tensor_array.py +1 -1
- mindspore/nn/wrap/__init__.py +6 -6
- mindspore/nn/wrap/cell_wrapper.py +178 -117
- mindspore/nn/wrap/grad_reducer.py +45 -36
- mindspore/nn/wrap/loss_scale.py +3 -3
- mindspore/numpy/array_creations.py +3 -3
- mindspore/numpy/array_ops.py +1 -1
- mindspore/numpy/math_ops.py +4 -4
- mindspore/numpy/utils.py +1 -2
- mindspore/numpy/utils_const.py +1 -2
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +3 -2
- mindspore/ops/_grad_experimental/grad_comm_ops.py +18 -3
- mindspore/ops/_grad_experimental/grad_debug_ops.py +8 -1
- mindspore/ops/_grad_experimental/taylor_rule.py +29 -0
- mindspore/ops/_register_for_op.py +0 -11
- mindspore/{ops_generate → ops/_utils}/arg_dtype_cast.py +123 -4
- mindspore/{ops_generate → ops/_utils}/arg_handler.py +3 -4
- mindspore/ops/_vmap/vmap_array_ops.py +7 -6
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +2 -1
- mindspore/ops/_vmap/vmap_math_ops.py +4 -7
- mindspore/ops/_vmap/vmap_nn_ops.py +9 -8
- mindspore/ops/auto_generate/__init__.py +4 -3
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +102 -49
- mindspore/ops/auto_generate/gen_extend_func.py +281 -135
- mindspore/ops/auto_generate/gen_ops_def.py +2574 -2326
- mindspore/ops/auto_generate/gen_ops_prim.py +8566 -2755
- mindspore/ops/auto_generate/pyboost_inner_prim.py +106 -76
- mindspore/ops/composite/__init__.py +2 -1
- mindspore/ops/composite/base.py +19 -24
- mindspore/ops/composite/math_ops.py +6 -16
- mindspore/ops/composite/multitype_ops/__init__.py +5 -2
- mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -3
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -2
- mindspore/ops/composite/multitype_ops/add_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +6 -4
- mindspore/ops/composite/multitype_ops/equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/greater_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/invert_impl.py +50 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/less_impl.py +4 -3
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +3 -2
- mindspore/ops/composite/multitype_ops/negative_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +18 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/setitem_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/sub_impl.py +2 -1
- mindspore/ops/function/__init__.py +28 -2
- mindspore/ops/function/_add_attr_func.py +58 -0
- mindspore/ops/function/array_func.py +1629 -2345
- mindspore/ops/function/clip_func.py +38 -45
- mindspore/ops/function/debug_func.py +36 -44
- mindspore/ops/function/grad/__init__.py +1 -0
- mindspore/ops/function/grad/grad_func.py +104 -71
- mindspore/ops/function/image_func.py +1 -1
- mindspore/ops/function/linalg_func.py +46 -78
- mindspore/ops/function/math_func.py +3035 -3705
- mindspore/ops/function/nn_func.py +676 -241
- mindspore/ops/function/other_func.py +159 -1
- mindspore/ops/function/parameter_func.py +17 -30
- mindspore/ops/function/random_func.py +204 -361
- mindspore/ops/function/reshard_func.py +4 -70
- mindspore/ops/function/sparse_func.py +3 -3
- mindspore/ops/function/sparse_unary_func.py +5 -5
- mindspore/ops/function/spectral_func.py +25 -58
- mindspore/ops/function/vmap_func.py +24 -17
- mindspore/ops/functional.py +6 -4
- mindspore/ops/functional_overload.py +547 -4
- mindspore/ops/op_info_register.py +32 -244
- mindspore/ops/operations/__init__.py +10 -5
- mindspore/ops/operations/_custom_ops_utils.py +247 -0
- mindspore/ops/operations/_grad_ops.py +1 -10
- mindspore/ops/operations/_inner_ops.py +5 -76
- mindspore/ops/operations/_ms_kernel.py +4 -10
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +3 -2
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/array_ops.py +37 -22
- mindspore/ops/operations/comm_ops.py +150 -107
- mindspore/ops/operations/custom_ops.py +221 -23
- mindspore/ops/operations/debug_ops.py +115 -16
- mindspore/ops/operations/inner_ops.py +1 -1
- mindspore/ops/operations/linalg_ops.py +1 -58
- mindspore/ops/operations/manually_defined/_inner.py +1 -1
- mindspore/ops/operations/manually_defined/ops_def.py +746 -79
- mindspore/ops/operations/math_ops.py +21 -18
- mindspore/ops/operations/nn_ops.py +65 -191
- mindspore/ops/operations/other_ops.py +62 -9
- mindspore/ops/operations/random_ops.py +13 -7
- mindspore/ops/operations/reshard_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +2 -2
- mindspore/ops/primitive.py +43 -32
- mindspore/ops/tensor_method.py +232 -13
- mindspore/ops_generate/__init__.py +0 -5
- mindspore/ops_generate/aclnn/__init__.py +0 -0
- mindspore/ops_generate/{aclnn_kernel_register_auto_cc_generator.py → aclnn/aclnn_kernel_register_auto_cc_generator.py} +43 -18
- mindspore/ops_generate/{gen_aclnn_implement.py → aclnn/gen_aclnn_implement.py} +49 -51
- mindspore/ops_generate/api/__init__.py +0 -0
- mindspore/ops_generate/{add_tensor_docs_generator.py → api/add_tensor_docs_generator.py} +9 -7
- mindspore/ops_generate/{cpp_create_prim_instance_helper_generator.py → api/cpp_create_prim_instance_helper_generator.py} +6 -9
- mindspore/ops_generate/{functional_map_cpp_generator.py → api/functional_map_cpp_generator.py} +25 -12
- mindspore/ops_generate/{functional_overload_py_generator.py → api/functional_overload_py_generator.py} +8 -6
- mindspore/ops_generate/{functions_cc_generator.py → api/functions_cc_generator.py} +14 -10
- mindspore/ops_generate/api/gen_api.py +103 -0
- mindspore/ops_generate/{op_api_proto.py → api/op_api_proto.py} +98 -69
- mindspore/ops_generate/{tensor_func_reg_cpp_generator.py → api/tensor_func_reg_cpp_generator.py} +82 -43
- mindspore/ops_generate/common/__init__.py +0 -0
- mindspore/ops_generate/common/gen_constants.py +91 -0
- mindspore/ops_generate/{gen_utils.py → common/gen_utils.py} +72 -19
- mindspore/ops_generate/{op_proto.py → common/op_proto.py} +64 -1
- mindspore/ops_generate/{template.py → common/template.py} +96 -84
- mindspore/ops_generate/gen_ops.py +23 -325
- mindspore/ops_generate/op_def/__init__.py +0 -0
- mindspore/ops_generate/op_def/gen_op_def.py +90 -0
- mindspore/ops_generate/{lite_ops_cpp_generator.py → op_def/lite_ops_cpp_generator.py} +47 -11
- mindspore/ops_generate/{ops_def_cc_generator.py → op_def/ops_def_cc_generator.py} +18 -7
- mindspore/ops_generate/{ops_def_h_generator.py → op_def/ops_def_h_generator.py} +5 -5
- mindspore/ops_generate/{ops_name_h_generator.py → op_def/ops_name_h_generator.py} +30 -15
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +125 -0
- mindspore/ops_generate/op_def_py/__init__.py +0 -0
- mindspore/ops_generate/op_def_py/gen_op_def_py.py +47 -0
- mindspore/ops_generate/{op_def_py_generator.py → op_def_py/op_def_py_generator.py} +6 -5
- mindspore/ops_generate/{op_prim_py_generator.py → op_def_py/op_prim_py_generator.py} +24 -15
- mindspore/ops_generate/pyboost/__init__.py +0 -0
- mindspore/ops_generate/{auto_grad_impl_cc_generator.py → pyboost/auto_grad_impl_cc_generator.py} +11 -7
- mindspore/ops_generate/{auto_grad_reg_cc_generator.py → pyboost/auto_grad_reg_cc_generator.py} +7 -7
- mindspore/ops_generate/{gen_pyboost_func.py → pyboost/gen_pyboost_func.py} +40 -16
- mindspore/ops_generate/{op_template_parser.py → pyboost/op_template_parser.py} +105 -24
- mindspore/ops_generate/{pyboost_functions_cpp_generator.py → pyboost/pyboost_functions_cpp_generator.py} +55 -18
- mindspore/ops_generate/{pyboost_functions_h_generator.py → pyboost/pyboost_functions_h_generator.py} +42 -10
- mindspore/ops_generate/{pyboost_functions_py_generator.py → pyboost/pyboost_functions_py_generator.py} +6 -6
- mindspore/ops_generate/{pyboost_grad_function_cpp_generator.py → pyboost/pyboost_grad_function_cpp_generator.py} +11 -10
- mindspore/ops_generate/{pyboost_inner_prim_generator.py → pyboost/pyboost_inner_prim_generator.py} +8 -7
- mindspore/ops_generate/{pyboost_native_grad_functions_generator.py → pyboost/pyboost_native_grad_functions_generator.py} +14 -10
- mindspore/ops_generate/{pyboost_op_cpp_code_generator.py → pyboost/pyboost_op_cpp_code_generator.py} +140 -53
- mindspore/ops_generate/{pyboost_overload_functions_cpp_generator.py → pyboost/pyboost_overload_functions_cpp_generator.py} +28 -15
- mindspore/ops_generate/{pyboost_utils.py → pyboost/pyboost_utils.py} +88 -4
- mindspore/ops_generate/resources/__init__.py +0 -0
- mindspore/ops_generate/resources/resource_list.py +30 -0
- mindspore/ops_generate/resources/resource_loader.py +36 -0
- mindspore/ops_generate/resources/resource_manager.py +64 -0
- mindspore/ops_generate/resources/yaml_loader.py +88 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +122 -0
- mindspore/parallel/__init__.py +6 -2
- mindspore/parallel/_auto_parallel_context.py +133 -6
- mindspore/parallel/_cell_wrapper.py +130 -15
- mindspore/parallel/_parallel_serialization.py +95 -4
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +7 -2
- mindspore/parallel/_tensor.py +142 -18
- mindspore/parallel/_utils.py +198 -25
- mindspore/parallel/algo_parameter_config.py +3 -3
- mindspore/parallel/auto_parallel.py +732 -0
- mindspore/parallel/checkpoint_convert.py +159 -0
- mindspore/parallel/checkpoint_transform.py +656 -37
- mindspore/parallel/cluster/process_entity/_api.py +151 -19
- mindspore/parallel/cluster/run.py +1 -1
- mindspore/parallel/function/__init__.py +24 -0
- mindspore/parallel/function/reshard_func.py +259 -0
- mindspore/parallel/nn/__init__.py +25 -0
- mindspore/parallel/nn/parallel_cell_wrapper.py +263 -0
- mindspore/parallel/nn/parallel_grad_reducer.py +169 -0
- mindspore/parallel/parameter_broadcast.py +24 -13
- mindspore/parallel/shard.py +137 -61
- mindspore/parallel/transform_safetensors.py +287 -95
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +9 -5
- mindspore/profiler/analysis/parser/ascend_cann_parser.py +6 -2
- mindspore/profiler/analysis/parser/ms_framework_parser.py +4 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -4
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +22 -0
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +241 -86
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +41 -2
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +33 -35
- mindspore/profiler/analysis/viewer/ascend_memory_viewer.py +7 -0
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +8 -3
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +141 -30
- mindspore/profiler/analysis/viewer/ms_dataset_viewer.py +5 -6
- mindspore/profiler/common/ascend_msprof_exporter.py +5 -4
- mindspore/profiler/common/constant.py +12 -0
- mindspore/profiler/common/msprof_cmd_tool.py +42 -23
- mindspore/profiler/common/path_manager.py +24 -0
- mindspore/profiler/common/profiler_context.py +26 -2
- mindspore/profiler/common/profiler_meta_data.py +74 -0
- mindspore/profiler/common/profiler_parameters.py +59 -18
- mindspore/profiler/common/profiler_path_manager.py +66 -7
- mindspore/profiler/dynamic_profiler.py +112 -79
- mindspore/profiler/envprofiler.py +26 -1
- mindspore/profiler/experimental_config.py +197 -0
- mindspore/profiler/mstx.py +57 -14
- mindspore/profiler/platform/npu_profiler.py +33 -7
- mindspore/profiler/profiler.py +541 -45
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +4 -0
- mindspore/profiler/schedule.py +57 -22
- mindspore/rewrite/api/node.py +15 -13
- mindspore/rewrite/api/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +25 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/runtime/__init__.py +2 -2
- mindspore/runtime/executor.py +40 -11
- mindspore/runtime/memory.py +25 -8
- mindspore/safeguard/rewrite_obfuscation.py +12 -9
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +8 -8
- mindspore/train/_utils.py +35 -7
- mindspore/train/amp.py +1 -1
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +2 -16
- mindspore/train/callback/_checkpoint.py +24 -40
- mindspore/train/callback/_cluster_monitor.py +14 -18
- mindspore/train/callback/_flops_collector.py +2 -3
- mindspore/train/callback/_history.py +7 -4
- mindspore/train/callback/_lambda_callback.py +2 -2
- mindspore/train/callback/_landscape.py +0 -3
- mindspore/train/callback/_loss_monitor.py +2 -1
- mindspore/train/callback/_on_request_exit.py +6 -5
- mindspore/train/callback/_reduce_lr_on_plateau.py +11 -6
- mindspore/train/callback/_summary_collector.py +8 -13
- mindspore/train/callback/_time_monitor.py +2 -1
- mindspore/train/callback/{_tft_register.py → _train_fault_tolerance.py} +179 -103
- mindspore/train/data_sink.py +25 -2
- mindspore/train/dataset_helper.py +4 -5
- mindspore/train/loss_scale_manager.py +8 -7
- mindspore/train/metrics/accuracy.py +3 -3
- mindspore/train/metrics/confusion_matrix.py +9 -9
- mindspore/train/metrics/error.py +3 -3
- mindspore/train/metrics/hausdorff_distance.py +4 -4
- mindspore/train/metrics/mean_surface_distance.py +3 -3
- mindspore/train/metrics/metric.py +0 -12
- mindspore/train/metrics/occlusion_sensitivity.py +4 -2
- mindspore/train/metrics/precision.py +8 -6
- mindspore/train/metrics/recall.py +9 -9
- mindspore/train/metrics/root_mean_square_surface_distance.py +2 -2
- mindspore/train/mind_ir_pb2.py +19 -12
- mindspore/train/model.py +176 -103
- mindspore/train/serialization.py +246 -988
- mindspore/train/summary/_summary_adapter.py +2 -2
- mindspore/train/summary/summary_record.py +1 -1
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +4 -2
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/utils.py +138 -4
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/METADATA +2 -1
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/RECORD +483 -438
- mindspore/_install_custom.py +0 -43
- mindspore/common/_register_for_adapter.py +0 -74
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +0 -252
- mindspore/ops/auto_generate/gen_arg_handler.py +0 -136
- mindspore/ops/operations/_opaque_predicate_registry.py +0 -41
- mindspore/ops_generate/gen_constants.py +0 -190
- mindspore/ops_generate/gen_ops_inner_prim.py +0 -131
- mindspore/ops_generate/ops_primitive_h_generator.py +0 -81
- /mindspore/ops_generate/{base_generator.py → common/base_generator.py} +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.5.0.dist-info → mindspore-2.6.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -28,8 +28,8 @@ from mindspore.common import dtype as mstype
|
|
|
28
28
|
from mindspore.common.tensor import Tensor
|
|
29
29
|
from mindspore.ops._utils import get_broadcast_shape
|
|
30
30
|
from mindspore.ops.primitive import Primitive, PrimitiveWithInfer, PrimitiveWithCheck, prim_attr_register, _run_op
|
|
31
|
-
from mindspore._c_expression import
|
|
32
|
-
from ..auto_generate import (Add, Addcdiv, Addcmul, ReduceMean, ReduceSum, ReduceAll, ReduceAny,
|
|
31
|
+
from mindspore._c_expression import TensorPy as Tensor_
|
|
32
|
+
from ..auto_generate import (Add, Addcdiv, AddcdivExt, Addcmul, AddcmulExt, ReduceMean, ReduceSum, ReduceAll, ReduceAny,
|
|
33
33
|
ReduceMax, ReduceMin, ReduceProd, Betainc, Neg, MatMul, BatchMatMul,
|
|
34
34
|
Mul, Square, Rsqrt, Sqrt, Reciprocal, Pow, Exp, Cdist,
|
|
35
35
|
Logit, ReduceStd, Expm1, Log, Log1p, Erf, Erfc,
|
|
@@ -819,6 +819,7 @@ class InplaceIndexAdd(Primitive):
|
|
|
819
819
|
"""Initialize InplaceIndexAdd"""
|
|
820
820
|
self.init_prim_io_names(inputs=['var', 'indices', 'updates'], outputs=['var'])
|
|
821
821
|
self.axis = axis
|
|
822
|
+
self.add_prim_attr('side_effect_mem', True)
|
|
822
823
|
validator.check_value_type('axis', axis, [int], self.name)
|
|
823
824
|
|
|
824
825
|
|
|
@@ -1128,7 +1129,7 @@ class Histogram(Primitive):
|
|
|
1128
1129
|
class HistogramFixedWidth(PrimitiveWithInfer):
|
|
1129
1130
|
"""
|
|
1130
1131
|
Returns a rank 1 histogram counting the number of entries in values that fall into every bin. The bins are equal
|
|
1131
|
-
width and determined by the
|
|
1132
|
+
width and determined by the input `range` and the argument `nbins`.
|
|
1132
1133
|
|
|
1133
1134
|
Args:
|
|
1134
1135
|
nbins (int): The number of histogram bins, the type is a positive integer.
|
|
@@ -1349,7 +1350,7 @@ class MulNoNan(_MathBinaryOp):
|
|
|
1349
1350
|
int32, int64, float16, float32, float64, complex64, complex128 currently or scalar.
|
|
1350
1351
|
|
|
1351
1352
|
Outputs:
|
|
1352
|
-
Tensor, the shape is the same as the shape after broadcasting,
|
|
1353
|
+
Tensor, the shape is the same as the shape of input Tensor after broadcasting,
|
|
1353
1354
|
and the data type is the one with higher precision among the two inputs.
|
|
1354
1355
|
|
|
1355
1356
|
Raises:
|
|
@@ -1735,7 +1736,8 @@ class ApproximateEqual(_LogicBinaryOp):
|
|
|
1735
1736
|
the relatively highest precision data type.
|
|
1736
1737
|
|
|
1737
1738
|
Args:
|
|
1738
|
-
tolerance (float): The maximum deviation that two elements can be considered equal.
|
|
1739
|
+
tolerance (float, optional): The maximum deviation that two elements can be considered equal.
|
|
1740
|
+
Default: ``1e-05`` .
|
|
1739
1741
|
|
|
1740
1742
|
Inputs:
|
|
1741
1743
|
- **x** (Tensor) - A tensor. Must be one of the following types: float32, float16.
|
|
@@ -1917,14 +1919,14 @@ class NPUGetFloatStatus(Primitive):
|
|
|
1917
1919
|
:class:`mindspore.ops.NPUGetFloatStatus` updates the flag which is
|
|
1918
1920
|
the output tensor of :class:`mindspore.ops.NPUAllocFloatStatus` with the latest overflow status.
|
|
1919
1921
|
|
|
1920
|
-
|
|
1921
1922
|
Note:
|
|
1922
1923
|
The flag is a tensor whose shape is :math:`(8,)` and data type is `mindspore.dtype.float32`.
|
|
1923
1924
|
If the sum of the flag equals to 0, there is no overflow happened. If the sum of the
|
|
1924
1925
|
flag is bigger than 0, there is overflow happened.
|
|
1925
1926
|
In addition, there are strict sequencing requirements for use, i.e., before
|
|
1926
1927
|
using the NPUGetFloatStatus operator, need to ensure that the NPUClearFlotStatus
|
|
1927
|
-
and your compute has been executed. We use :class:`mindspore.ops.Depend`
|
|
1928
|
+
and your compute has been executed. We use :class:`mindspore.ops.Depend`
|
|
1929
|
+
to ensure the correct execution order.
|
|
1928
1930
|
|
|
1929
1931
|
Inputs:
|
|
1930
1932
|
- **x** (Tensor) - The output tensor of `NPUAllocFloatStatus`.
|
|
@@ -1932,7 +1934,7 @@ class NPUGetFloatStatus(Primitive):
|
|
|
1932
1934
|
:math:`(N,*)` where :math:`*` means, any number of additional dimensions, its rank should be less than 8.
|
|
1933
1935
|
|
|
1934
1936
|
Outputs:
|
|
1935
|
-
Tensor, has the same shape as `x`.
|
|
1937
|
+
Tensor, has the same shape as `x`.
|
|
1936
1938
|
|
|
1937
1939
|
Raises:
|
|
1938
1940
|
TypeError: If `x` is not a Tensor.
|
|
@@ -3386,9 +3388,8 @@ class Imag(Primitive):
|
|
|
3386
3388
|
|
|
3387
3389
|
Examples:
|
|
3388
3390
|
>>> import mindspore
|
|
3389
|
-
>>>
|
|
3390
|
-
>>>
|
|
3391
|
-
>>> x = Tensor(np.asarray(np.complex(1.3+0.4j)), mindspore.complex64)
|
|
3391
|
+
>>> from mindspore import ops
|
|
3392
|
+
>>> x = mindspore.tensor(1.3+0.4j, mindspore.complex64)
|
|
3392
3393
|
>>> imag = ops.Imag()
|
|
3393
3394
|
>>> output = imag(x)
|
|
3394
3395
|
>>> print(output)
|
|
@@ -3889,8 +3890,8 @@ class Digamma(Primitive):
|
|
|
3889
3890
|
Tensor, has the same dtype as `x`.
|
|
3890
3891
|
|
|
3891
3892
|
Raises:
|
|
3892
|
-
TypeError: If x is not a Tensor.
|
|
3893
|
-
TypeError: If dtype of input x is not float16 or float32 or float64.
|
|
3893
|
+
TypeError: If `x` is not a Tensor.
|
|
3894
|
+
TypeError: If dtype of input `x` is not float16 or float32 or float64.
|
|
3894
3895
|
|
|
3895
3896
|
Supported Platforms:
|
|
3896
3897
|
``GPU`` ``CPU``
|
|
@@ -4042,7 +4043,10 @@ class Median(Primitive):
|
|
|
4042
4043
|
axis (int, optional): The specified dimension to compute median. Default: ``0`` .
|
|
4043
4044
|
keep_dims (bool, optional): Whether the output tensor need to retain `axis` dimension or not.
|
|
4044
4045
|
Default: ``False`` .
|
|
4045
|
-
ignore_nan (bool, optional): Whether to ignore the NaN values in input Tensor.
|
|
4046
|
+
ignore_nan (bool, optional): Whether to ignore the ``NaN`` values in input Tensor. When ``False``, if the
|
|
4047
|
+
input range (determined by `global_median`) contains a ``NaN`` value, the corresponding element of
|
|
4048
|
+
`values` is ``NaN``. When ``True``, calculates the median of the remaining elements after excluding
|
|
4049
|
+
``NaN``. Default: ``False`` .
|
|
4046
4050
|
|
|
4047
4051
|
Inputs:
|
|
4048
4052
|
- **x** (Tensor) - A Tensor to calculate median with.
|
|
@@ -4433,8 +4437,7 @@ class CholeskySolve(Primitive):
|
|
|
4433
4437
|
|
|
4434
4438
|
class TrilIndices(Primitive):
|
|
4435
4439
|
r"""
|
|
4436
|
-
|
|
4437
|
-
and returns them as a 2-by-N Tensor.
|
|
4440
|
+
Computes the indices of the lower triangular elements of a 2D matrix and returns them as a Tensor.
|
|
4438
4441
|
|
|
4439
4442
|
.. warning::
|
|
4440
4443
|
This is an experimental API that is subject to change or deletion.
|
|
@@ -4635,9 +4638,9 @@ class TriuIndices(Primitive):
|
|
|
4635
4638
|
An optional data type of ``mstype.int32`` and ``mstype.int64`` . Default: ``mstype.int32`` .
|
|
4636
4639
|
|
|
4637
4640
|
Outputs:
|
|
4638
|
-
- **y** (Tensor) - indices of the elements in
|
|
4641
|
+
- **y** (Tensor) - indices of the elements in upper triangular part of matrix. The type specified by `dtype`.
|
|
4639
4642
|
The shape of output is :math:`(2, tril\_size)`, where :math:`tril\_size` is the number of elements in the
|
|
4640
|
-
|
|
4643
|
+
upper triangular matrix.
|
|
4641
4644
|
|
|
4642
4645
|
Supported Platforms:
|
|
4643
4646
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -37,11 +37,11 @@ from ..auto_generate import (CeLU, Flatten, LogSoftmax, LogSoftmaxExt, GLU, ReLU
|
|
|
37
37
|
Elu, Sigmoid, Softmax, SoftplusExt, HSwish, HSigmoid, AvgPool, BiasAdd,
|
|
38
38
|
NLLLoss, OneHot, GeLU, FastGeLU, PReLU, RmsNorm, IncreFlashAttention, MSELossExt,
|
|
39
39
|
GridSampler3D, GridSampler2D, LayerNorm, LayerNormExt, HShrink, AdamWeightDecay, Dropout,
|
|
40
|
-
ApplyRotaryPosEmb, PagedAttention, PagedAttentionMask, ReshapeAndCache,
|
|
40
|
+
ApplyRotaryPosEmb, GroupTopk, PagedAttention, PagedAttentionMask, ReshapeAndCache,
|
|
41
41
|
FlashAttentionScore, PromptFlashAttention, Embedding, UpsampleNearest1D, UpsampleNearest2D,
|
|
42
42
|
UpsampleNearest3D, UpsampleTrilinear3D,
|
|
43
|
-
UpsampleBilinear2D, UpsampleLinear1D,
|
|
44
|
-
BinaryCrossEntropy, BCEWithLogitsLoss, SoftShrink,
|
|
43
|
+
SoftMarginLoss, UpsampleBilinear2D, UpsampleLinear1D,
|
|
44
|
+
BinaryCrossEntropy, BCEWithLogitsLoss, SoftShrink, AdaptiveMaxPool2D,
|
|
45
45
|
SmoothL1Loss)
|
|
46
46
|
from .manually_defined import BatchNorm
|
|
47
47
|
|
|
@@ -249,78 +249,6 @@ class AdaptiveAvgPool2D(Primitive):
|
|
|
249
249
|
self.add_prim_attr('output_size', self.output_size)
|
|
250
250
|
|
|
251
251
|
|
|
252
|
-
class AdaptiveMaxPool2D(Primitive):
|
|
253
|
-
r"""
|
|
254
|
-
Performs 2D adaptive max pooling on a multi-plane input signal.
|
|
255
|
-
|
|
256
|
-
Refer to :func:`mindspore.ops.adaptive_max_pool2d` for more details.
|
|
257
|
-
|
|
258
|
-
Args:
|
|
259
|
-
output_size (Union[int, tuple]): The target output size. `output_size` can be a tuple :math:`(H, W)`,
|
|
260
|
-
or an int H for :math:`(H, H)`. :math:`H` and :math:`W` can be int or None.
|
|
261
|
-
If it is None, it means the output size is the same as the input size.
|
|
262
|
-
|
|
263
|
-
Inputs:
|
|
264
|
-
- **input_x** (Tensor) - The input of AdaptiveMaxPool2D, which is a 3D or 4D tensor,
|
|
265
|
-
with float16, float32 or float64 data type.
|
|
266
|
-
|
|
267
|
-
Outputs:
|
|
268
|
-
Tensor, with the same type as the `input_x`.
|
|
269
|
-
|
|
270
|
-
Supported Platforms:
|
|
271
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
272
|
-
|
|
273
|
-
Examples:
|
|
274
|
-
>>> # case 1: output_size=(None, 2)
|
|
275
|
-
>>> input_x = Tensor(np.array([[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
|
|
276
|
-
... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]],
|
|
277
|
-
... [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]]]), mindspore.float32)
|
|
278
|
-
>>> adaptive_max_pool_2d = ops.AdaptiveMaxPool2D((None, 2))
|
|
279
|
-
>>> output = adaptive_max_pool_2d(input_x)
|
|
280
|
-
>>> print(output[0])
|
|
281
|
-
[[[[2. 3.]
|
|
282
|
-
[5. 6.]
|
|
283
|
-
[8. 9.]]
|
|
284
|
-
[[2. 3.]
|
|
285
|
-
[5. 6.]
|
|
286
|
-
[8. 9.]]
|
|
287
|
-
[[2. 3.]
|
|
288
|
-
[5. 6.]
|
|
289
|
-
[8. 9.]]]]
|
|
290
|
-
>>> # case 2: output_size=2
|
|
291
|
-
>>> adaptive_max_pool_2d = ops.AdaptiveMaxPool2D(2)
|
|
292
|
-
>>> output = adaptive_max_pool_2d(input_x)
|
|
293
|
-
>>> print(output[0])
|
|
294
|
-
[[[[5. 6.]
|
|
295
|
-
[8. 9.]]
|
|
296
|
-
[[5. 6.]
|
|
297
|
-
[8. 9.]]
|
|
298
|
-
[[5. 6.]
|
|
299
|
-
[8. 9.]]]]
|
|
300
|
-
>>> # case 3: output_size=(1, 2)
|
|
301
|
-
>>> adaptive_max_pool_2d = ops.AdaptiveMaxPool2D((1, 2))
|
|
302
|
-
>>> output = adaptive_max_pool_2d(input_x)
|
|
303
|
-
>>> print(output[0])
|
|
304
|
-
[[[[8. 9.]]
|
|
305
|
-
[[8. 9.]]
|
|
306
|
-
[[8. 9.]]]]
|
|
307
|
-
"""
|
|
308
|
-
|
|
309
|
-
@prim_attr_register
|
|
310
|
-
def __init__(self, output_size):
|
|
311
|
-
"""Initialize AdaptiveMaxPool2D."""
|
|
312
|
-
validator.check_value_type("output_size", output_size, [int, tuple], self.name)
|
|
313
|
-
if isinstance(output_size, tuple):
|
|
314
|
-
validator.check_int(len(output_size), 2, validator.EQ,
|
|
315
|
-
'length of output_size', self.name)
|
|
316
|
-
self.output_size = (output_size, output_size) if isinstance(self.output_size, int) else output_size
|
|
317
|
-
self.output_size = (-1 if self.output_size[0] is None else self.output_size[0],
|
|
318
|
-
-1 if self.output_size[1] is None else self.output_size[1])
|
|
319
|
-
for size in self.output_size:
|
|
320
|
-
validator.check_number("output_size", size, -1, validator.GE, None)
|
|
321
|
-
self.add_prim_attr('output_size', self.output_size)
|
|
322
|
-
|
|
323
|
-
|
|
324
252
|
class AdaptiveMaxPool3D(Primitive):
|
|
325
253
|
r"""
|
|
326
254
|
Performs 3D adaptive max pooling on a multi-plane input signal.
|
|
@@ -883,13 +811,13 @@ class Conv2D(Primitive):
|
|
|
883
811
|
|
|
884
812
|
Inputs:
|
|
885
813
|
- **x** (Tensor) - Input tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` or
|
|
886
|
-
:math:`(N, H_{in}, W_{in}, C_{in}
|
|
814
|
+
:math:`(N, H_{in}, W_{in}, C_{in})` depending on `data_format` .
|
|
887
815
|
- **weight** (Tensor) - The convolutional kernel value, it should has shape
|
|
888
816
|
:math:`(C_{out}, C_{in} / \text{group}, \text{kernel_size[0]}, \text{kernel_size[1]})` .
|
|
889
817
|
|
|
890
818
|
Outputs:
|
|
891
819
|
Tensor, the value that applied 2D convolution. The shape is :math:`(N, C_{out}, H_{out}, W_{out})`
|
|
892
|
-
or :math:`(N, H_{out}, W_{out}, C_{out}
|
|
820
|
+
or :math:`(N, H_{out}, W_{out}, C_{out})`.
|
|
893
821
|
To see how different pad modes affect the output shape, please refer to
|
|
894
822
|
:class:`mindspore.nn.Conv2d` for more details.
|
|
895
823
|
|
|
@@ -2055,17 +1983,18 @@ class Conv2DTranspose(Conv2DBackpropInput):
|
|
|
2055
1983
|
If this mode is set, `pad` must be greater than or equal to 0.
|
|
2056
1984
|
|
|
2057
1985
|
Please refer to :class:`mindspore.nn.Conv2dTranspose` for more specifications about `pad_mode`.
|
|
2058
|
-
pad (Union[int, tuple[int]]): The pad value to be filled. Default: ``0`` .
|
|
2059
|
-
|
|
2060
|
-
|
|
2061
|
-
|
|
2062
|
-
|
|
2063
|
-
|
|
2064
|
-
|
|
2065
|
-
|
|
1986
|
+
pad (Union[int, tuple[int]], optional): The pad value to be filled. Default: ``0`` .
|
|
1987
|
+
If `pad` is an integer, the paddings
|
|
1988
|
+
of top, bottom, left and right are the same, equal to pad. If `pad` is a tuple of four integers,
|
|
1989
|
+
the padding of top, bottom, left and right equal to pad[0], pad[1], pad[2], and pad[3]
|
|
1990
|
+
correspondingly.
|
|
1991
|
+
pad_list (Union[str, None], optional): The pad list like (top, bottom, left, right). Default: ``None`` .
|
|
1992
|
+
mode (int, optional): Modes for different convolutions. The value is currently not used. Default: ``1`` .
|
|
1993
|
+
stride (Union[int, tuple[int]], optional): The stride to be applied to the convolution filter. Default: ``1`` .
|
|
1994
|
+
dilation (Union[int, tuple[int]], optional): Specifies the dilation rate to be used for the dilated convolution.
|
|
2066
1995
|
Default: ``1`` .
|
|
2067
|
-
group (int): Splits input into groups. Default: ``1`` .
|
|
2068
|
-
data_format (str): The format of input and output data. It should be ``'NHWC'`` or ``'NCHW'`` .
|
|
1996
|
+
group (int, optional): Splits input into groups. Default: ``1`` .
|
|
1997
|
+
data_format (str, optional): The format of input and output data. It should be ``'NHWC'`` or ``'NCHW'`` .
|
|
2069
1998
|
Default is ``'NCHW'`` .
|
|
2070
1999
|
|
|
2071
2000
|
Inputs:
|
|
@@ -2133,7 +2062,7 @@ class SoftmaxCrossEntropyWithLogits(Primitive):
|
|
|
2133
2062
|
- **labels** (Tensor) - Ground truth labels, with shape :math:`(N, C)`, has the same data type with `logits`.
|
|
2134
2063
|
|
|
2135
2064
|
Outputs:
|
|
2136
|
-
Tuple of 2 tensors(loss, dlogits), the `loss` shape is :math:`(N,)`,
|
|
2065
|
+
Tuple of 2 tensors( `loss` , `dlogits` ), the `loss` shape is :math:`(N,)`,
|
|
2137
2066
|
and the `dlogits` with the same shape as `logits`.
|
|
2138
2067
|
|
|
2139
2068
|
Raises:
|
|
@@ -2167,7 +2096,7 @@ class SparseSoftmaxCrossEntropyWithLogits(Primitive):
|
|
|
2167
2096
|
r"""
|
|
2168
2097
|
Computes the softmax cross-entropy value between logits and sparse encoding labels.
|
|
2169
2098
|
|
|
2170
|
-
Sets input logits as `X`, input label as `Y`, output as `loss`.
|
|
2099
|
+
Sets input logits as `X`, input label as `Y`, output as `loss`. The formula is as follows:
|
|
2171
2100
|
|
|
2172
2101
|
.. math::
|
|
2173
2102
|
\begin{array}{ll} \\
|
|
@@ -2177,7 +2106,7 @@ class SparseSoftmaxCrossEntropyWithLogits(Primitive):
|
|
|
2177
2106
|
\end{array}
|
|
2178
2107
|
|
|
2179
2108
|
Args:
|
|
2180
|
-
is_grad (bool): If ``True`` , this operation returns the computed gradient. Default: ``False`` .
|
|
2109
|
+
is_grad (bool, optional): If ``True`` , this operation returns the computed gradient. Default: ``False`` .
|
|
2181
2110
|
|
|
2182
2111
|
Inputs:
|
|
2183
2112
|
- **logits** (Tensor) - Input logits, with shape :math:`(N, C)`. Data type must be float16 or float32.
|
|
@@ -2185,7 +2114,7 @@ class SparseSoftmaxCrossEntropyWithLogits(Primitive):
|
|
|
2185
2114
|
Data type must be int32 or int64.
|
|
2186
2115
|
|
|
2187
2116
|
Outputs:
|
|
2188
|
-
Tensor, if `is_grad` is False
|
|
2117
|
+
Tensor, if `is_grad` is ``False``, the output tensor is the value of loss;
|
|
2189
2118
|
if `is_grad` is ``True`` , the output tensor is the gradient of input with the same shape as `logits`.
|
|
2190
2119
|
|
|
2191
2120
|
Raises:
|
|
@@ -2284,10 +2213,10 @@ class ApplyMomentum(Primitive):
|
|
|
2284
2213
|
Refer to :class:`mindspore.nn.Momentum` for more details about the formula and usage.
|
|
2285
2214
|
|
|
2286
2215
|
Args:
|
|
2287
|
-
use_locking (bool): Whether to enable a lock to protect the variable and accumulation tensors
|
|
2216
|
+
use_locking (bool, optional): Whether to enable a lock to protect the variable and accumulation tensors
|
|
2288
2217
|
from being updated. Default: ``False`` .
|
|
2289
|
-
use_nesterov (bool): Enable Nesterov momentum. Default: ``False`` .
|
|
2290
|
-
gradient_scale (float): The scale of the gradient. Default: ``1.0`` .
|
|
2218
|
+
use_nesterov (bool, optional): Enable Nesterov momentum. Default: ``False`` .
|
|
2219
|
+
gradient_scale (float, optional): The scale of the gradient. Default: ``1.0`` .
|
|
2291
2220
|
|
|
2292
2221
|
Inputs:
|
|
2293
2222
|
- **variable** (Union[Parameter, Tensor]) - Weights to be updated. Data type must be float64, int64, float,
|
|
@@ -2424,63 +2353,6 @@ class MultiMarginLoss(Primitive):
|
|
|
2424
2353
|
return super().__call__(x, target, weight)
|
|
2425
2354
|
|
|
2426
2355
|
|
|
2427
|
-
class SoftMarginLoss(Primitive):
|
|
2428
|
-
r"""
|
|
2429
|
-
SoftMarginLoss operation.
|
|
2430
|
-
|
|
2431
|
-
Creates a criterion that optimizes a two-class classification
|
|
2432
|
-
logistic loss between input tensor :math:`x` and target tensor :math:`y`
|
|
2433
|
-
(containing 1 or -1).
|
|
2434
|
-
|
|
2435
|
-
.. math::
|
|
2436
|
-
\text{loss}(x, y) = \sum_i \frac{\log(1 + \exp(-y[i]*x[i]))}{\text{x.nelement}()}
|
|
2437
|
-
|
|
2438
|
-
where :math:`x.nelement()` is the number of elements of x.
|
|
2439
|
-
|
|
2440
|
-
Args:
|
|
2441
|
-
reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
|
|
2442
|
-
``'sum'`` . Default: ``'mean'`` .
|
|
2443
|
-
|
|
2444
|
-
- ``'none'``: no reduction will be applied.
|
|
2445
|
-
- ``'mean'``: compute and return the mean of elements in the output.
|
|
2446
|
-
- ``'sum'``: the output elements will be summed.
|
|
2447
|
-
|
|
2448
|
-
Inputs:
|
|
2449
|
-
- **logits** (Tensor) - Predict data. Data type must be float16 or float32.
|
|
2450
|
-
- **labels** (Tensor) - Ground truth data, with the same type and shape as `logits`.
|
|
2451
|
-
|
|
2452
|
-
Outputs:
|
|
2453
|
-
Tensor or Scalar, if `reduction` is ``"none"``, its shape is the same as `logits`.
|
|
2454
|
-
Otherwise, a scalar value will be returned.
|
|
2455
|
-
|
|
2456
|
-
Raises:
|
|
2457
|
-
TypeError: If `logits` or `labels` is not a Tensor.
|
|
2458
|
-
TypeError: If dtype of `logits` or `labels` is neither float16 nor float32.
|
|
2459
|
-
ValueError: If shape of `logits` is not the same as `labels`.
|
|
2460
|
-
ValueError: If `reduction` is not one of ``"none"`` , ``"mean"`` or ``"sum"`` .
|
|
2461
|
-
|
|
2462
|
-
Supported Platforms:
|
|
2463
|
-
``Ascend`` ``GPU``
|
|
2464
|
-
|
|
2465
|
-
Examples:
|
|
2466
|
-
>>> import mindspore
|
|
2467
|
-
>>> import numpy as np
|
|
2468
|
-
>>> from mindspore import Tensor, ops
|
|
2469
|
-
>>> loss = ops.SoftMarginLoss()
|
|
2470
|
-
>>> logits = Tensor(np.array([[0.3, 0.7], [0.5, 0.5]]), mindspore.float32)
|
|
2471
|
-
>>> labels = Tensor(np.array([[-1, 1], [1, -1]]), mindspore.float32)
|
|
2472
|
-
>>> output = loss(logits, labels)
|
|
2473
|
-
>>> print(output)
|
|
2474
|
-
0.6764238
|
|
2475
|
-
"""
|
|
2476
|
-
|
|
2477
|
-
@prim_attr_register
|
|
2478
|
-
def __init__(self, reduction="mean"):
|
|
2479
|
-
"""Initialize SoftMarginLoss"""
|
|
2480
|
-
self.init_prim_io_names(inputs=['predict', 'label'], outputs=['loss'])
|
|
2481
|
-
self.reduction = validator.check_string(reduction, ['none', 'sum', 'mean'], 'reduction', self.name)
|
|
2482
|
-
|
|
2483
|
-
|
|
2484
2356
|
class L2Loss(Primitive):
|
|
2485
2357
|
r"""
|
|
2486
2358
|
Calculates half of the L2 norm, but do not square the result.
|
|
@@ -2744,12 +2616,12 @@ class ApplyRMSProp(PrimitiveWithInfer):
|
|
|
2744
2616
|
:math:`\eta` represents `learning_rate`. :math:`\nabla Q_{i}(w)` represents `grad`.
|
|
2745
2617
|
|
|
2746
2618
|
.. warning::
|
|
2747
|
-
Note that in dense implementation of this algorithm,
|
|
2748
|
-
but in this sparse implementation,
|
|
2749
|
-
in iterations during which
|
|
2619
|
+
Note that in dense implementation of this algorithm, `mean_square` and `moment` will update even if `grad` is 0,
|
|
2620
|
+
but in this sparse implementation, `mean_square` and `moment` will not update
|
|
2621
|
+
in iterations during which `grad` is 0.
|
|
2750
2622
|
|
|
2751
2623
|
Args:
|
|
2752
|
-
use_locking (bool): Whether to enable a lock to protect the variable and accumulation tensors
|
|
2624
|
+
use_locking (bool, optional): Whether to enable a lock to protect the variable and accumulation tensors
|
|
2753
2625
|
from being updated. Default: ``False`` .
|
|
2754
2626
|
|
|
2755
2627
|
Inputs:
|
|
@@ -3407,7 +3279,7 @@ class ComputeAccidentalHits(Primitive):
|
|
|
3407
3279
|
the weight is FLOAT_MAX. FLOAT_MAX indicates the max value in the type of Float
|
|
3408
3280
|
|
|
3409
3281
|
Args:
|
|
3410
|
-
num_true (int): The number of target classes per training example. Default: ``1`` .
|
|
3282
|
+
num_true (int, optional): The number of target classes per training example. Default: ``1`` .
|
|
3411
3283
|
|
|
3412
3284
|
Inputs:
|
|
3413
3285
|
- **true_classes** (Tensor) - The target classes. With data type of int64
|
|
@@ -4212,7 +4084,7 @@ class KLDivLoss(Primitive):
|
|
|
4212
4084
|
or ``'sum'``.
|
|
4213
4085
|
|
|
4214
4086
|
Args:
|
|
4215
|
-
reduction (str): Specifies the reduction to be applied to the output.
|
|
4087
|
+
reduction (str, optional): Specifies the reduction to be applied to the output.
|
|
4216
4088
|
Default: ``'mean'`` .
|
|
4217
4089
|
|
|
4218
4090
|
- ``'none'``: no reduction will be applied.
|
|
@@ -4233,7 +4105,7 @@ class KLDivLoss(Primitive):
|
|
|
4233
4105
|
TypeError: If neither `logits` nor `labels` is a Tensor.
|
|
4234
4106
|
TypeError: If dtype of `logits` or `labels` is not currently supported.
|
|
4235
4107
|
ValueError: If shape of `logits` is not the same as `labels`.
|
|
4236
|
-
RuntimeError: If `logits` or `labels` is a scalar when `reduction` is 'batchmean'
|
|
4108
|
+
RuntimeError: If `logits` or `labels` is a scalar when `reduction` is ``'batchmean'``.
|
|
4237
4109
|
|
|
4238
4110
|
Supported Platforms:
|
|
4239
4111
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4710,9 +4582,10 @@ class SparseApplyAdagradV2(Primitive):
|
|
|
4710
4582
|
Args:
|
|
4711
4583
|
lr (float): Learning rate.
|
|
4712
4584
|
epsilon (float): A small value added for numerical stability.
|
|
4713
|
-
use_locking (bool): If ``True`` , the `var` and `accum` tensors will be protected from being updated.
|
|
4585
|
+
use_locking (bool, optional): If ``True`` , the `var` and `accum` tensors will be protected from being updated.
|
|
4714
4586
|
Default: ``False`` .
|
|
4715
|
-
update_slots (bool): If ``True`` , the computation logic will be different to `False`.
|
|
4587
|
+
update_slots (bool, optional): If ``True`` , the computation logic will be different to `False`.
|
|
4588
|
+
Default: ``True`` .
|
|
4716
4589
|
|
|
4717
4590
|
Inputs:
|
|
4718
4591
|
- **var** (Union[Parameter, Tensor]) - Variable to be updated. The data type must be float16 or float32.
|
|
@@ -4801,8 +4674,8 @@ class ApplyProximalAdagrad(Primitive):
|
|
|
4801
4674
|
the relatively highest priority data type.
|
|
4802
4675
|
|
|
4803
4676
|
Args:
|
|
4804
|
-
use_locking (bool): If ``True`` , the var and accumulation tensors will be protected
|
|
4805
|
-
Default: ``False`` .
|
|
4677
|
+
use_locking (bool, optional): If ``True`` , the var and accumulation tensors will be protected
|
|
4678
|
+
from being updated. Default: ``False`` .
|
|
4806
4679
|
|
|
4807
4680
|
Inputs:
|
|
4808
4681
|
- **var** (Union[Parameter, Tensor]) - Variable to be updated. The data type must be float16 or float32.
|
|
@@ -5699,7 +5572,7 @@ class Dropout3D(PrimitiveWithInfer):
|
|
|
5699
5572
|
Dropout3D can improve the independence between channel feature maps.
|
|
5700
5573
|
|
|
5701
5574
|
Args:
|
|
5702
|
-
keep_prob (float): The keep probability of a channel, between 0 and 1, e.g. `keep_prob` = 0.8,
|
|
5575
|
+
keep_prob (float, optional): The keep probability of a channel, between 0 and 1, e.g. `keep_prob` = 0.8,
|
|
5703
5576
|
means dropping out 20% of channels. Default: ``0.5`` .
|
|
5704
5577
|
|
|
5705
5578
|
Inputs:
|
|
@@ -5751,12 +5624,14 @@ class CTCLoss(Primitive):
|
|
|
5751
5624
|
such that the length of target series must be less than or equal to the length of input.
|
|
5752
5625
|
|
|
5753
5626
|
Args:
|
|
5754
|
-
preprocess_collapse_repeated (bool): If ``True`` , repeated labels will be collapsed prior to the CTC
|
|
5627
|
+
preprocess_collapse_repeated (bool, optional): If ``True`` , repeated labels will be collapsed prior to the CTC
|
|
5755
5628
|
calculation. Default: ``False`` .
|
|
5756
|
-
ctc_merge_repeated (bool): If ``False`` , during CTC calculation,
|
|
5629
|
+
ctc_merge_repeated (bool, optional): If ``False`` , during CTC calculation,
|
|
5630
|
+
repeated non-blank labels will not be merged
|
|
5757
5631
|
and these labels will be interpreted as individual ones. This is a simplified
|
|
5758
5632
|
version of CTC. Default: ``True`` .
|
|
5759
|
-
ignore_longer_outputs_than_inputs (bool): If ``True`` ,
|
|
5633
|
+
ignore_longer_outputs_than_inputs (bool, optional): If ``True`` ,
|
|
5634
|
+
sequences with longer outputs than inputs will be
|
|
5760
5635
|
ignored. Default: ``False`` .
|
|
5761
5636
|
|
|
5762
5637
|
Inputs:
|
|
@@ -6330,10 +6205,7 @@ class AvgPool3D(Primitive):
|
|
|
6330
6205
|
|
|
6331
6206
|
Typically the input is of shape :math:`(N, C, D_{in}, H_{in}, W_{in})`, AvgPool3D outputs
|
|
6332
6207
|
regional average in the :math:`(D_{in}, H_{in}, W_{in})`-dimension. Given kernel size
|
|
6333
|
-
:math:`ks = (d_{ker}, h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1, s_2)`, the operation is as follows
|
|
6334
|
-
|
|
6335
|
-
.. warning::
|
|
6336
|
-
"kernel_size" is in the range [1, 255]. "strides" is in the range [1, 63].
|
|
6208
|
+
:math:`ks = (d_{ker}, h_{ker}, w_{ker})` and stride :math:`s = (s_0, s_1, s_2)`, the operation is as follows:
|
|
6337
6209
|
|
|
6338
6210
|
.. math::
|
|
6339
6211
|
\text{output}(N_i, C_j, d, h, w) =
|
|
@@ -6344,12 +6216,13 @@ class AvgPool3D(Primitive):
|
|
|
6344
6216
|
This interface currently does not support Atlas A2 training series products.
|
|
6345
6217
|
|
|
6346
6218
|
Args:
|
|
6347
|
-
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the average value,
|
|
6219
|
+
kernel_size (Union[int, tuple[int]], optional): The size of kernel used to take the average value,
|
|
6348
6220
|
is an int number that represents depth, height and width are both kernel_size, or a tuple
|
|
6349
|
-
of three int numbers that represent depth, height and width respectively.
|
|
6350
|
-
|
|
6221
|
+
of three int numbers that represent depth, height and width respectively.
|
|
6222
|
+
Default: ``1`` . The value range is: [1, 255].
|
|
6223
|
+
strides (Union[int, tuple[int]], optional): The distance of kernel moving, an int number that represents
|
|
6351
6224
|
the depth, height and width of movement are both strides, or a tuple of three int numbers that
|
|
6352
|
-
represent depth, height and width of movement respectively. Default: ``1`` .
|
|
6225
|
+
represent depth, height and width of movement respectively. Default: ``1`` . The value range is: [1, 63].
|
|
6353
6226
|
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
6354
6227
|
``"same"`` , ``"valid"`` or ``"pad"`` . Default: ``"valid"`` .
|
|
6355
6228
|
|
|
@@ -6366,16 +6239,18 @@ class AvgPool3D(Primitive):
|
|
|
6366
6239
|
in the depth, height and width dimension is determined by the `pad` parameter.
|
|
6367
6240
|
If this mode is set, `pad` must be greater than or equal to 0.
|
|
6368
6241
|
|
|
6369
|
-
pad (Union(int, tuple[int], list[int])): The pad value to be filled. Default: ``0`` .
|
|
6242
|
+
pad (Union(int, tuple[int], list[int]), optional): The pad value to be filled. Default: ``0`` .
|
|
6243
|
+
If `pad` is an integer,
|
|
6370
6244
|
the paddings of head, tail, top, bottom, left and right are the same, equal to pad.
|
|
6371
6245
|
If `pad` is a tuple of six integers, the padding of head, tail, top, bottom, left and right equal to
|
|
6372
6246
|
pad[0], pad[1], pad[2], pad[3], pad[4] and pad[5] correspondingly.
|
|
6373
|
-
ceil_mode (bool): If ``True`` , ceil instead of floor to compute the output shape.
|
|
6374
|
-
|
|
6247
|
+
ceil_mode (bool, optional): If ``True`` , ceil instead of floor to compute the output shape.
|
|
6248
|
+
Default: ``False`` .
|
|
6249
|
+
count_include_pad (bool, optional): If ``True`` , averaging calculation will include the zero-padding.
|
|
6375
6250
|
Default: ``True`` .
|
|
6376
|
-
divisor_override (int): If specified, it will be used as divisor in the averaging calculation,
|
|
6251
|
+
divisor_override (int, optional): If specified, it will be used as divisor in the averaging calculation,
|
|
6377
6252
|
otherwise kernel_size will be used. Default: ``0`` .
|
|
6378
|
-
data_format (str)
|
|
6253
|
+
data_format (str, optional): The optional value for data format. Currently only support ``'NCDHW'`` .
|
|
6379
6254
|
Default: ``'NCDHW'`` .
|
|
6380
6255
|
|
|
6381
6256
|
Inputs:
|
|
@@ -6980,9 +6855,9 @@ class CTCLossV2(Primitive):
|
|
|
6980
6855
|
and its correlated gradient to zero. Default: ``False`` .
|
|
6981
6856
|
|
|
6982
6857
|
Inputs:
|
|
6983
|
-
- **log_probs** (Tensor) - A tensor of shape :math:`(T, N, C)`, where :math:`T` is input length, :math:`N` is
|
|
6858
|
+
- **log_probs** (Tensor) - A 3D tensor of shape :math:`(T, N, C)`, where :math:`T` is input length, :math:`N` is
|
|
6984
6859
|
batch size and :math:`C` is number of classes (including blank). Supported dtypes: float32, float64.
|
|
6985
|
-
- **targets** (Tensor) - A tensor of shape :math:`(N, S)`, where :math:`S` is max target length,
|
|
6860
|
+
- **targets** (Tensor) - A 2D tensor of shape :math:`(N, S)`, where :math:`S` is max target length,
|
|
6986
6861
|
means the target sequences. Supported dtypes: int32, int64.
|
|
6987
6862
|
- **input_lengths** (Union(Tuple, Tensor)) - A tuple or Tensor of shape :math:`(N)`.
|
|
6988
6863
|
It means the lengths of the input. Supported dtypes: int32, int64.
|
|
@@ -7053,7 +6928,7 @@ class CTCLossV2Grad(Primitive):
|
|
|
7053
6928
|
|
|
7054
6929
|
Args:
|
|
7055
6930
|
blank (int): The blank label. Default: ``0`` .
|
|
7056
|
-
reduction (
|
|
6931
|
+
reduction (str): Apply specific reduction method to the output. Currently only support 'none'.
|
|
7057
6932
|
Default: ``"none"`` .
|
|
7058
6933
|
zero_infinity (bool): Whether to set infinite loss and correlation gradient to zero. Default: ``False`` .
|
|
7059
6934
|
|
|
@@ -7527,9 +7402,8 @@ class ApplyAdagradDA(Primitive):
|
|
|
7527
7402
|
>>> global_step = Tensor(2, mstype.int32)
|
|
7528
7403
|
>>> output = net(grad, lr, l1, l2, global_step)
|
|
7529
7404
|
>>> print(output)
|
|
7530
|
-
|
|
7531
|
-
|
|
7532
|
-
[-5.96988888e-04, -1.42478070e-03]]))
|
|
7405
|
+
[[-0.00073906, -0.00136889],
|
|
7406
|
+
[-0.00059699, -0.00142478]]
|
|
7533
7407
|
"""
|
|
7534
7408
|
|
|
7535
7409
|
__mindspore_signature__ = (
|
|
@@ -8058,7 +7932,7 @@ class ApplyAdamWithAmsgradV2(Primitive):
|
|
|
8058
7932
|
|
|
8059
7933
|
Args:
|
|
8060
7934
|
use_locking (bool): If ``True`` , updating of the `var`, `m`, and `v` tensors will
|
|
8061
|
-
be protected by a lock; Otherwise
|
|
7935
|
+
be protected by a lock; Otherwise some contention may occur.
|
|
8062
7936
|
Default: ``False`` .
|
|
8063
7937
|
|
|
8064
7938
|
Inputs:
|
|
@@ -8606,13 +8480,13 @@ class TripletMarginLoss(Primitive):
|
|
|
8606
8480
|
- **margin** (Tensor) - Make a margin between the positive pair and the negative pair.
|
|
8607
8481
|
|
|
8608
8482
|
Outputs:
|
|
8609
|
-
Union[Tensor, Scalar], if `reduction` is ``"none"``,
|
|
8483
|
+
Union[Tensor, Scalar], if `reduction` is ``"none"``, a Ten sor will be returned with a shape of :math:`(N)`.
|
|
8610
8484
|
Otherwise, a scalar value will be returned.
|
|
8611
8485
|
|
|
8612
8486
|
Raises:
|
|
8613
|
-
TypeError: If `x
|
|
8614
|
-
TypeError: If dtype of `x
|
|
8615
|
-
TypeError: If
|
|
8487
|
+
TypeError: If `x`, `positive`, `negative`, or `margin` is not a Tensor.
|
|
8488
|
+
TypeError: If dtype of `x`, `positive`, or `negative` is not BasicType.
|
|
8489
|
+
TypeError: If dtypes of `x`, `positive` and `negative` are not the same.
|
|
8616
8490
|
TypeError: If `margin` is not float32.
|
|
8617
8491
|
TypeError: If `p` is not an int.
|
|
8618
8492
|
TypeError: If `eps` is not a float.
|
|
@@ -8622,7 +8496,7 @@ class TripletMarginLoss(Primitive):
|
|
|
8622
8496
|
ValueError: If the dimension of input `x` or `positive` or `negative`
|
|
8623
8497
|
is bigger than or equal to 8.
|
|
8624
8498
|
ValueError: If length of shape of `margin` is not 0.
|
|
8625
|
-
ValueError: If
|
|
8499
|
+
ValueError: If shapes of `x`, `positive` and `negative` cannot broadcast.
|
|
8626
8500
|
ValueError: If `reduction` is not one of ``'none'``, ``'mean'``, ``'sum'``.
|
|
8627
8501
|
|
|
8628
8502
|
Supported Platforms:
|