mindspore 2.2.14__cp39-cp39-manylinux1_x86_64.whl → 2.3.0rc1__cp39-cp39-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +4 -4
- mindspore/_akg/akg/composite/build_module.py +155 -11
- mindspore/_akg/akg/config/repository.json +38 -0
- mindspore/_akg/akg/ms/info_version_adapt.py +29 -0
- mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -1
- mindspore/_akg/akg/utils/ascend_profilier/path_manager.py +2 -1
- mindspore/_akg/akg/utils/composite_op_helper.py +4 -2
- mindspore/_akg/akg/utils/dump_ascend_meta.py +2 -2
- mindspore/_akg/akg/utils/gen_random.py +14 -8
- mindspore/_akg/akg/utils/op_dsl.py +11 -0
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +5 -5
- mindspore/_c_dataengine.cpython-39-x86_64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-39-x86_64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-39-x86_64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +58 -0
- mindspore/_extends/builtin_operations.py +2 -1
- mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
- mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
- mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
- mindspore/_extends/parse/__init__.py +18 -14
- mindspore/_extends/parse/compile_config.py +229 -0
- mindspore/_extends/parse/parser.py +155 -59
- mindspore/_extends/parse/resources.py +40 -7
- mindspore/_extends/parse/standard_method.py +124 -204
- mindspore/_extends/remote/kernel_build_server.py +2 -0
- mindspore/_mindspore_offline_debug.cpython-39-x86_64-linux-gnu.so +0 -0
- mindspore/_profiler.py +30 -0
- mindspore/amp.py +24 -18
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +3 -1
- mindspore/common/_jit_fallback_utils.py +2 -3
- mindspore/common/_register_for_adapter.py +7 -0
- mindspore/common/_stub_tensor.py +6 -1
- mindspore/common/_utils.py +5 -17
- mindspore/common/api.py +91 -48
- mindspore/common/auto_dynamic_shape.py +27 -14
- mindspore/common/dtype.py +5 -4
- mindspore/common/dump.py +5 -4
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +20 -11
- mindspore/common/lazy_inline.py +58 -17
- mindspore/common/mindir_util.py +12 -2
- mindspore/common/mutable.py +79 -14
- mindspore/common/parameter.py +19 -4
- mindspore/common/seed.py +9 -9
- mindspore/common/sparse_tensor.py +251 -18
- mindspore/common/symbol.py +122 -0
- mindspore/common/tensor.py +321 -433
- mindspore/communication/__init__.py +3 -3
- mindspore/communication/_comm_helper.py +5 -0
- mindspore/communication/management.py +53 -38
- mindspore/config/op_info.config +22 -54
- mindspore/context.py +167 -59
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +6 -6
- mindspore/dataset/audio/transforms.py +711 -158
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/engine/cache_client.py +2 -2
- mindspore/dataset/engine/datasets.py +72 -38
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +33 -3
- mindspore/dataset/engine/datasets_text.py +38 -38
- mindspore/dataset/engine/datasets_user_defined.py +7 -7
- mindspore/dataset/engine/datasets_vision.py +75 -71
- mindspore/dataset/engine/offload.py +5 -7
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +408 -121
- mindspore/dataset/text/utils.py +9 -9
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/transforms.py +261 -76
- mindspore/dataset/utils/browse_dataset.py +9 -9
- mindspore/dataset/vision/__init__.py +3 -3
- mindspore/dataset/vision/c_transforms.py +5 -5
- mindspore/dataset/vision/transforms.py +2264 -514
- mindspore/dataset/vision/utils.py +40 -9
- mindspore/dataset/vision/validators.py +7 -1
- mindspore/experimental/optim/__init__.py +12 -2
- mindspore/experimental/optim/adadelta.py +161 -0
- mindspore/experimental/optim/adagrad.py +168 -0
- mindspore/experimental/optim/adam.py +35 -34
- mindspore/experimental/optim/adamax.py +170 -0
- mindspore/experimental/optim/adamw.py +40 -16
- mindspore/experimental/optim/asgd.py +153 -0
- mindspore/experimental/optim/lr_scheduler.py +60 -119
- mindspore/experimental/optim/nadam.py +157 -0
- mindspore/experimental/optim/optimizer.py +15 -8
- mindspore/experimental/optim/radam.py +194 -0
- mindspore/experimental/optim/rmsprop.py +154 -0
- mindspore/experimental/optim/rprop.py +164 -0
- mindspore/experimental/optim/sgd.py +28 -19
- mindspore/hal/__init__.py +34 -0
- mindspore/hal/_ascend.py +57 -0
- mindspore/hal/_base.py +57 -0
- mindspore/hal/_cpu.py +56 -0
- mindspore/hal/_gpu.py +57 -0
- mindspore/hal/device.py +356 -0
- mindspore/hal/event.py +179 -0
- mindspore/hal/stream.py +337 -0
- mindspore/include/api/data_type.h +2 -2
- mindspore/include/api/dual_abi_helper.h +16 -3
- mindspore/include/api/model.h +1 -3
- mindspore/include/api/status.h +14 -0
- mindspore/include/c_api/model_c.h +173 -0
- mindspore/include/c_api/ms/base/types.h +1 -0
- mindspore/include/c_api/types_c.h +19 -0
- mindspore/include/dataset/execute.h +1 -3
- mindspore/include/mindapi/base/format.h +125 -23
- mindspore/include/mindapi/base/types.h +7 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +2044 -154
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +2044 -33
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/build_tbe_kernel.py +529 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/compiler.py +56 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/custom.py +1109 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/get_file_path.py +36 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/tbe_topi.py +556 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6325 -1767
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_add_custom.h +49 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +59 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +59 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +52 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +232 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +232 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.cpp +192 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.cpp +274 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +39 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/cpu/libakg.so +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
- mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/{libmindspore_ascend.so.1 → libmindspore_ascend.so.2} +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
- mindspore/mindrecord/__init__.py +5 -1
- mindspore/mindrecord/config.py +809 -0
- mindspore/mindrecord/filereader.py +25 -0
- mindspore/mindrecord/filewriter.py +74 -56
- mindspore/mindrecord/mindpage.py +40 -6
- mindspore/mindrecord/shardutils.py +3 -2
- mindspore/mindrecord/shardwriter.py +7 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +8 -13
- mindspore/mindrecord/tools/cifar10_to_mr.py +9 -15
- mindspore/mindrecord/tools/csv_to_mr.py +4 -9
- mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +7 -12
- mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -6
- mindspore/multiprocessing/__init__.py +68 -0
- mindspore/nn/cell.py +86 -133
- mindspore/nn/dynamic_lr.py +2 -2
- mindspore/nn/layer/activation.py +79 -90
- mindspore/nn/layer/basic.py +4 -80
- mindspore/nn/layer/channel_shuffle.py +3 -16
- mindspore/nn/layer/container.py +3 -3
- mindspore/nn/layer/conv.py +71 -71
- mindspore/nn/layer/embedding.py +105 -44
- mindspore/nn/layer/image.py +4 -7
- mindspore/nn/layer/normalization.py +46 -38
- mindspore/nn/layer/padding.py +26 -39
- mindspore/nn/layer/pooling.py +13 -9
- mindspore/nn/layer/rnn_cells.py +5 -15
- mindspore/nn/layer/rnns.py +6 -5
- mindspore/nn/layer/thor_layer.py +1 -2
- mindspore/nn/layer/timedistributed.py +1 -1
- mindspore/nn/layer/transformer.py +52 -50
- mindspore/nn/learning_rate_schedule.py +6 -5
- mindspore/nn/loss/loss.py +43 -64
- mindspore/nn/optim/ada_grad.py +4 -2
- mindspore/nn/optim/adadelta.py +3 -1
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +102 -181
- mindspore/nn/optim/adamax.py +4 -2
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +4 -2
- mindspore/nn/optim/ftrl.py +31 -61
- mindspore/nn/optim/lamb.py +5 -3
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +6 -4
- mindspore/nn/optim/momentum.py +13 -25
- mindspore/nn/optim/optimizer.py +6 -3
- mindspore/nn/optim/proximal_ada_grad.py +4 -2
- mindspore/nn/optim/rmsprop.py +9 -3
- mindspore/nn/optim/rprop.py +4 -2
- mindspore/nn/optim/sgd.py +6 -5
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
- mindspore/nn/probability/distribution/beta.py +2 -2
- mindspore/nn/probability/distribution/categorical.py +4 -6
- mindspore/nn/probability/distribution/cauchy.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/poisson.py +2 -2
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +13 -1
- mindspore/nn/wrap/__init__.py +2 -1
- mindspore/nn/wrap/cell_wrapper.py +33 -12
- mindspore/nn/wrap/grad_reducer.py +148 -8
- mindspore/nn/wrap/loss_scale.py +7 -7
- mindspore/numpy/__init__.py +2 -0
- mindspore/numpy/array_creations.py +2 -0
- mindspore/numpy/array_ops.py +1 -5
- mindspore/numpy/fft.py +431 -0
- mindspore/numpy/math_ops.py +54 -60
- mindspore/numpy/utils.py +3 -0
- mindspore/ops/__init__.py +5 -4
- mindspore/ops/_grad_experimental/grad_array_ops.py +4 -129
- mindspore/ops/_grad_experimental/grad_comm_ops.py +16 -22
- mindspore/ops/_grad_experimental/grad_math_ops.py +68 -283
- mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
- mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
- mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
- mindspore/ops/_op_impl/__init__.py +0 -1
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
- mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +1 -1
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
- mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -3
- mindspore/ops/_op_impl/cpu/adam.py +2 -2
- mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
- mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
- mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
- mindspore/ops/_vmap/vmap_array_ops.py +137 -101
- mindspore/ops/_vmap/vmap_base.py +8 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +102 -56
- mindspore/ops/_vmap/vmap_image_ops.py +70 -13
- mindspore/ops/_vmap/vmap_math_ops.py +74 -49
- mindspore/ops/_vmap/vmap_nn_ops.py +164 -89
- mindspore/ops/_vmap/vmap_other_ops.py +1 -1
- mindspore/ops/auto_generate/__init__.py +31 -0
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +133 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +248 -0
- mindspore/ops/auto_generate/gen_arg_handler.py +147 -0
- mindspore/ops/auto_generate/gen_extend_func.py +130 -0
- mindspore/ops/auto_generate/gen_ops_def.py +4786 -0
- mindspore/ops/auto_generate/gen_ops_prim.py +8335 -0
- mindspore/ops/auto_generate/pyboost_inner_prim.py +77 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +118 -17
- mindspore/ops/composite/math_ops.py +9 -48
- mindspore/ops/composite/multitype_ops/_compile_utils.py +166 -601
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +15 -133
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
- mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
- mindspore/ops/deprecated.py +14 -3
- mindspore/ops/extend/__init__.py +46 -0
- mindspore/ops/extend/array_func.py +152 -0
- mindspore/ops/extend/math_func.py +76 -0
- mindspore/ops/{_op_impl/tbe/atomic_addr_clean.py → extend/nn_func.py} +5 -15
- mindspore/ops/function/__init__.py +19 -11
- mindspore/ops/function/array_func.py +251 -1440
- mindspore/ops/function/clip_func.py +12 -13
- mindspore/ops/function/debug_func.py +1 -4
- mindspore/ops/function/fft_func.py +31 -0
- mindspore/ops/function/grad/grad_func.py +24 -17
- mindspore/ops/function/image_func.py +27 -21
- mindspore/ops/function/linalg_func.py +35 -68
- mindspore/ops/function/math_func.py +451 -2360
- mindspore/ops/function/nn_func.py +459 -780
- mindspore/ops/function/other_func.py +4 -5
- mindspore/ops/function/parameter_func.py +5 -93
- mindspore/ops/function/random_func.py +24 -80
- mindspore/ops/function/sparse_unary_func.py +9 -16
- mindspore/ops/function/spectral_func.py +1 -1
- mindspore/ops/function/vmap_func.py +14 -14
- mindspore/ops/functional.py +56 -62
- mindspore/ops/op_info_register.py +22 -19
- mindspore/ops/operations/__init__.py +19 -19
- mindspore/ops/operations/_grad_ops.py +20 -723
- mindspore/ops/operations/_inner_ops.py +178 -286
- mindspore/ops/operations/_scalar_ops.py +5 -480
- mindspore/ops/operations/_sequence_ops.py +4 -34
- mindspore/ops/operations/array_ops.py +99 -2491
- mindspore/ops/operations/comm_ops.py +38 -46
- mindspore/ops/operations/custom_ops.py +8 -8
- mindspore/ops/operations/debug_ops.py +100 -31
- mindspore/ops/operations/image_ops.py +1 -217
- mindspore/ops/operations/inner_ops.py +3 -38
- mindspore/ops/operations/linalg_ops.py +1 -49
- mindspore/{rewrite/ast_transformers → ops/operations/manually_defined}/__init__.py +11 -4
- mindspore/ops/operations/manually_defined/_inner.py +61 -0
- mindspore/ops/operations/manually_defined/ops_def.py +1391 -0
- mindspore/ops/operations/math_ops.py +703 -4601
- mindspore/ops/operations/nn_ops.py +374 -1748
- mindspore/ops/operations/other_ops.py +50 -42
- mindspore/ops/operations/random_ops.py +3 -52
- mindspore/ops/primitive.py +196 -96
- mindspore/ops_generate/__init__.py +27 -0
- mindspore/ops_generate/arg_dtype_cast.py +248 -0
- mindspore/ops_generate/arg_handler.py +147 -0
- mindspore/ops_generate/gen_aclnn_implement.py +266 -0
- mindspore/ops_generate/gen_ops.py +1062 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +129 -0
- mindspore/ops_generate/gen_pyboost_func.py +932 -0
- mindspore/ops_generate/gen_utils.py +188 -0
- mindspore/ops_generate/op_proto.py +138 -0
- mindspore/ops_generate/pyboost_utils.py +364 -0
- mindspore/ops_generate/template.py +238 -0
- mindspore/parallel/__init__.py +5 -4
- mindspore/parallel/_auto_parallel_context.py +21 -76
- mindspore/parallel/_cell_wrapper.py +16 -9
- mindspore/parallel/_cost_model_context.py +1 -1
- mindspore/parallel/_dp_allreduce_fusion.py +159 -159
- mindspore/parallel/_parallel_serialization.py +30 -46
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +19 -7
- mindspore/parallel/_transformer/__init__.py +1 -1
- mindspore/parallel/_transformer/layers.py +1 -1
- mindspore/parallel/_transformer/loss.py +1 -1
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/op_parallel_config.py +1 -1
- mindspore/parallel/_transformer/transformer.py +1 -1
- mindspore/parallel/_utils.py +131 -6
- mindspore/parallel/algo_parameter_config.py +6 -6
- mindspore/parallel/checkpoint_transform.py +180 -196
- mindspore/parallel/cluster/__init__.py +15 -0
- mindspore/parallel/cluster/process_entity/__init__.py +18 -0
- mindspore/parallel/cluster/process_entity/_api.py +345 -0
- mindspore/parallel/cluster/process_entity/_utils.py +116 -0
- mindspore/parallel/cluster/run.py +139 -0
- mindspore/parallel/mpi/__init__.py +1 -1
- mindspore/parallel/mpi/_mpi_config.py +1 -1
- mindspore/parallel/parameter_broadcast.py +152 -0
- mindspore/parallel/shard.py +99 -2
- mindspore/profiler/common/util.py +20 -0
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +66 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +77 -0
- mindspore/profiler/parser/ascend_analysis/function_event.py +146 -0
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +108 -0
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +80 -0
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +52 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +104 -0
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +59 -0
- mindspore/profiler/parser/ascend_cluster_generator.py +14 -9
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -1
- mindspore/profiler/parser/ascend_flops_generator.py +20 -4
- mindspore/profiler/parser/ascend_hccl_generator.py +25 -277
- mindspore/profiler/parser/ascend_msprof_exporter.py +112 -132
- mindspore/profiler/parser/ascend_msprof_generator.py +68 -285
- mindspore/profiler/parser/ascend_op_generator.py +75 -42
- mindspore/profiler/parser/ascend_timeline_generator.py +293 -135
- mindspore/profiler/parser/base_timeline_generator.py +6 -0
- mindspore/profiler/parser/framework_parser.py +3 -2
- mindspore/profiler/parser/integrator.py +3 -1
- mindspore/profiler/parser/msadvisor_analyzer.py +1 -1
- mindspore/profiler/parser/msadvisor_parser.py +1 -1
- mindspore/profiler/parser/profiler_info.py +5 -0
- mindspore/profiler/profiling.py +296 -166
- mindspore/rewrite/__init__.py +2 -13
- mindspore/rewrite/api/node.py +121 -35
- mindspore/rewrite/api/pattern_engine.py +2 -3
- mindspore/rewrite/api/scoped_value.py +16 -15
- mindspore/rewrite/api/symbol_tree.py +45 -29
- mindspore/rewrite/ast_helpers/__init__.py +3 -6
- mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
- mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
- mindspore/rewrite/common/__init__.py +1 -2
- mindspore/rewrite/common/config.py +24 -0
- mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
- mindspore/rewrite/{namer.py → common/namer.py} +63 -18
- mindspore/rewrite/common/namespace.py +118 -0
- mindspore/rewrite/node/__init__.py +5 -5
- mindspore/rewrite/node/call_function.py +23 -7
- mindspore/rewrite/node/cell_container.py +7 -3
- mindspore/rewrite/node/control_flow.py +53 -28
- mindspore/rewrite/node/node.py +212 -196
- mindspore/rewrite/node/node_manager.py +51 -22
- mindspore/rewrite/node/node_topological_manager.py +3 -23
- mindspore/rewrite/parsers/__init__.py +12 -0
- mindspore/rewrite/parsers/arguments_parser.py +8 -9
- mindspore/rewrite/parsers/assign_parser.py +635 -413
- mindspore/rewrite/parsers/attribute_parser.py +3 -4
- mindspore/rewrite/parsers/class_def_parser.py +107 -144
- mindspore/rewrite/parsers/constant_parser.py +5 -5
- mindspore/rewrite/parsers/container_parser.py +4 -6
- mindspore/rewrite/parsers/expr_parser.py +55 -0
- mindspore/rewrite/parsers/for_parser.py +31 -98
- mindspore/rewrite/parsers/function_def_parser.py +13 -5
- mindspore/rewrite/parsers/if_parser.py +28 -10
- mindspore/rewrite/parsers/module_parser.py +8 -182
- mindspore/rewrite/parsers/parser.py +1 -5
- mindspore/rewrite/parsers/parser_register.py +1 -1
- mindspore/rewrite/parsers/return_parser.py +5 -10
- mindspore/rewrite/parsers/while_parser.py +59 -0
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree/__init__.py +20 -0
- mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
- mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
- mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
- mindspore/run_check/_check_version.py +6 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +9 -19
- mindspore/scipy/__init__.py +2 -1
- mindspore/scipy/fft.py +133 -0
- mindspore/scipy/linalg.py +140 -55
- mindspore/scipy/ops.py +15 -71
- mindspore/scipy/ops_grad.py +5 -34
- mindspore/scipy/optimize/line_search.py +2 -2
- mindspore/scipy/optimize/minimize.py +1 -1
- mindspore/train/__init__.py +3 -2
- mindspore/train/_utils.py +178 -4
- mindspore/train/amp.py +167 -245
- mindspore/train/callback/_backup_and_restore.py +4 -4
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +39 -13
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_landscape.py +14 -8
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +7 -7
- mindspore/train/callback/_time_monitor.py +2 -2
- mindspore/train/data_sink.py +1 -1
- mindspore/train/dataset_helper.py +13 -4
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/accuracy.py +7 -7
- mindspore/train/metrics/confusion_matrix.py +8 -6
- mindspore/train/metrics/cosine_similarity.py +6 -4
- mindspore/train/metrics/error.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/perplexity.py +2 -1
- mindspore/train/metrics/topk.py +2 -2
- mindspore/train/mind_ir_pb2.py +75 -6
- mindspore/train/model.py +24 -22
- mindspore/train/serialization.py +256 -132
- mindspore/train/summary/summary_record.py +51 -28
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/version.py +1 -1
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/METADATA +2 -2
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/RECORD +515 -1061
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/entry_points.txt +1 -0
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
- mindspore/config/super_bar_config.json +0 -544
- mindspore/gen_ops.py +0 -273
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/nn/layer/flash_attention.py +0 -189
- mindspore/ops/_op_impl/cpu/concat.py +0 -39
- mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
- mindspore/ops/_op_impl/tbe/__init__.py +0 -47
- mindspore/ops/_op_impl/tbe/abs.py +0 -38
- mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/acos.py +0 -37
- mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/acosh.py +0 -37
- mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
- mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
- mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
- mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
- mindspore/ops/_op_impl/tbe/add.py +0 -42
- mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/add_n.py +0 -39
- mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
- mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
- mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
- mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
- mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
- mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
- mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
- mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/asin.py +0 -37
- mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/asinh.py +0 -37
- mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/assign.py +0 -79
- mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
- mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
- mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/atan.py +0 -37
- mindspore/ops/_op_impl/tbe/atan2.py +0 -38
- mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/atanh.py +0 -37
- mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
- mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
- mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
- mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
- mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
- mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
- mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
- mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
- mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
- mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
- mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cast.py +0 -55
- mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/cdist.py +0 -38
- mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/ceil.py +0 -37
- mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/celu.py +0 -39
- mindspore/ops/_op_impl/tbe/centralization.py +0 -39
- mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
- mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/concat.py +0 -40
- mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
- mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
- mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
- mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
- mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
- mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/cos.py +0 -37
- mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/cosh.py +0 -37
- mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
- mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cummin.py +0 -41
- mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
- mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
- mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
- mindspore/ops/_op_impl/tbe/diag.py +0 -38
- mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
- mindspore/ops/_op_impl/tbe/dilation.py +0 -40
- mindspore/ops/_op_impl/tbe/div.py +0 -41
- mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
- mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
- mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
- mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
- mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
- mindspore/ops/_op_impl/tbe/elu.py +0 -38
- mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/equal.py +0 -42
- mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/erf.py +0 -37
- mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfc.py +0 -37
- mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
- mindspore/ops/_op_impl/tbe/exp.py +0 -40
- mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
- mindspore/ops/_op_impl/tbe/expm1.py +0 -37
- mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
- mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/fill.py +0 -56
- mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/flatten.py +0 -48
- mindspore/ops/_op_impl/tbe/floor.py +0 -37
- mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
- mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
- mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
- mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
- mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
- mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
- mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
- mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/ger.py +0 -43
- mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/greater.py +0 -43
- mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
- mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
- mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
- mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
- mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
- mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
- mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
- mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/im2col.py +0 -42
- mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
- mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
- mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/inv.py +0 -38
- mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/invert.py +0 -37
- mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/iou.py +0 -38
- mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/is_close.py +0 -40
- mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
- mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
- mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
- mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
- mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
- mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
- mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/lerp.py +0 -38
- mindspore/ops/_op_impl/tbe/less.py +0 -41
- mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/log.py +0 -40
- mindspore/ops/_op_impl/tbe/log1p.py +0 -37
- mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
- mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
- mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
- mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
- mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/matmul.py +0 -53
- mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
- mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
- mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
- mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum.py +0 -39
- mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
- mindspore/ops/_op_impl/tbe/minimum.py +0 -40
- mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mish.py +0 -37
- mindspore/ops/_op_impl/tbe/mod.py +0 -41
- mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/mul.py +0 -37
- mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
- mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
- mindspore/ops/_op_impl/tbe/neg.py +0 -39
- mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
- mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
- mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
- mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
- mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
- mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/pack.py +0 -58
- mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
- mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
- mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/pdist.py +0 -36
- mindspore/ops/_op_impl/tbe/pooling.py +0 -46
- mindspore/ops/_op_impl/tbe/population_count.py +0 -38
- mindspore/ops/_op_impl/tbe/pow.py +0 -41
- mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/prelu.py +0 -37
- mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/range.py +0 -39
- mindspore/ops/_op_impl/tbe/real_div.py +0 -38
- mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
- mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
- mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
- mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
- mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6.py +0 -38
- mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/renorm.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
- mindspore/ops/_op_impl/tbe/rint.py +0 -37
- mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roll.py +0 -42
- mindspore/ops/_op_impl/tbe/round.py +0 -38
- mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
- mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
- mindspore/ops/_op_impl/tbe/select.py +0 -38
- mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/selu.py +0 -39
- mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sgd.py +0 -62
- mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sign.py +0 -38
- mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/sin.py +0 -37
- mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sinh.py +0 -37
- mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/slice.py +0 -58
- mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
- mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax.py +0 -37
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
- mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/softplus.py +0 -37
- mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softsign.py +0 -37
- mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sort.py +0 -38
- mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/split_d.py +0 -38
- mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/split_v.py +0 -39
- mindspore/ops/_op_impl/tbe/splitv.py +0 -39
- mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/square.py +0 -38
- mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
- mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
- mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
- mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
- mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
- mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
- mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
- mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
- mindspore/ops/_op_impl/tbe/sub.py +0 -39
- mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tan.py +0 -38
- mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh.py +0 -37
- mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
- mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
- mindspore/ops/_op_impl/tbe/tile.py +0 -37
- mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
- mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
- mindspore/ops/_op_impl/tbe/transpose.py +0 -60
- mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
- mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
- mindspore/ops/_op_impl/tbe/trunc.py +0 -39
- mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/unpack.py +0 -38
- mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
- mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
- mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
- mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
- mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
- mindspore/ops/_tracefunc.py +0 -241
- mindspore/ops/arg_dtype_cast.py +0 -54
- mindspore/rewrite/api/tree_node_helper.py +0 -60
- mindspore/rewrite/ast_creator_register.py +0 -37
- mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
- mindspore/rewrite/namespace.py +0 -53
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.2.14.dist-info → mindspore-2.3.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -30,6 +30,12 @@ from mindspore.ops.primitive import Primitive
|
|
|
30
30
|
from mindspore.ops.primitive import PrimitiveWithInfer
|
|
31
31
|
from mindspore.ops.primitive import PrimitiveWithCheck
|
|
32
32
|
from mindspore.ops.primitive import prim_attr_register
|
|
33
|
+
from ..auto_generate import (CeLU, Flatten, LogSoftmax, ReLU, ReLU6,
|
|
34
|
+
Elu, Sigmoid, Softmax, HSwish, HSigmoid, AvgPool, BiasAdd,
|
|
35
|
+
NLLLoss, OneHot, GeLU, FastGeLU, PReLU,
|
|
36
|
+
GridSampler3D, GridSampler2D, LayerNorm, HShrink, AdamWeightDecay, Dropout,
|
|
37
|
+
ApplyRotaryPosEmb, PagedAttention, PagedAttentionMask, ReshapeAndCache)
|
|
38
|
+
from .manually_defined import BatchNorm
|
|
33
39
|
|
|
34
40
|
|
|
35
41
|
def _check_positive_int_or_tuple(arg_name, arg_value, prim_name, allow_four=False,
|
|
@@ -95,83 +101,6 @@ def _update_attr_by_format(arg_value, arg_format):
|
|
|
95
101
|
return ret
|
|
96
102
|
|
|
97
103
|
|
|
98
|
-
class CeLU(Primitive):
|
|
99
|
-
r"""
|
|
100
|
-
Computes CeLU (Continuously differentiable exponential linear units) of input tensors element-wise.
|
|
101
|
-
|
|
102
|
-
Refer to :func:`mindspore.ops.celu` for more details.
|
|
103
|
-
|
|
104
|
-
.. warning::
|
|
105
|
-
This is an experimental API that is subject to change or deletion.
|
|
106
|
-
|
|
107
|
-
Args:
|
|
108
|
-
alpha (float, optional): The :math:`\alpha` value for the Celu formulation. Default: ``1.0`` .
|
|
109
|
-
|
|
110
|
-
Inputs:
|
|
111
|
-
- **input_x** (Tensor) - The input tensor with a dtype of float16 or float32.
|
|
112
|
-
|
|
113
|
-
Outputs:
|
|
114
|
-
Tensor, with the same type and shape as the `input_x`.
|
|
115
|
-
|
|
116
|
-
Supported Platforms:
|
|
117
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
118
|
-
|
|
119
|
-
Examples:
|
|
120
|
-
>>> import mindspore
|
|
121
|
-
>>> import numpy as np
|
|
122
|
-
>>> from mindspore import Tensor, ops
|
|
123
|
-
>>> input_x = Tensor(np.array([-2.0, -1.0, 1.0, 2.0]), mindspore.float32)
|
|
124
|
-
>>> celu = ops.CeLU(alpha=1.0)
|
|
125
|
-
>>> output = celu(input_x)
|
|
126
|
-
>>> print(output)
|
|
127
|
-
[-0.86466473 -0.63212055 1. 2. ]
|
|
128
|
-
>>> input_x = Tensor(2.1, mindspore.float32)
|
|
129
|
-
>>> output = celu(input_x)
|
|
130
|
-
>>> print(output)
|
|
131
|
-
2.1
|
|
132
|
-
"""
|
|
133
|
-
|
|
134
|
-
@prim_attr_register
|
|
135
|
-
def __init__(self, alpha=1.0):
|
|
136
|
-
"""Initialize CeLU"""
|
|
137
|
-
validator.check_value_type("alpha", alpha, [float], self.name)
|
|
138
|
-
validator.check_float(alpha, 0.0, validator.NE, "alpha", self.name)
|
|
139
|
-
self.alpha = alpha
|
|
140
|
-
self.add_prim_attr('alpha', self.alpha)
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
class Flatten(Primitive):
|
|
144
|
-
r"""
|
|
145
|
-
Flattens a tensor without changing its batch size on the 0-th axis.
|
|
146
|
-
|
|
147
|
-
Refer to :func:`mindspore.ops.flatten` for more details.
|
|
148
|
-
|
|
149
|
-
Inputs:
|
|
150
|
-
- **input_x** (Tensor) - Tensor of shape :math:`(N, \ldots)` to be flattened, where :math:`N` is batch size.
|
|
151
|
-
|
|
152
|
-
Outputs:
|
|
153
|
-
Tensor, the shape of the output tensor is :math:`(N, X)`, where :math:`X` is
|
|
154
|
-
the product of the remaining dimension.
|
|
155
|
-
|
|
156
|
-
Supported Platforms:
|
|
157
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
158
|
-
|
|
159
|
-
Examples:
|
|
160
|
-
>>> import mindspore
|
|
161
|
-
>>> import numpy as np
|
|
162
|
-
>>> from mindspore import Tensor, ops
|
|
163
|
-
>>> input_x = Tensor(np.ones(shape=[1, 2, 3, 4]), mindspore.float32)
|
|
164
|
-
>>> flatten = ops.Flatten()
|
|
165
|
-
>>> output = flatten(input_x)
|
|
166
|
-
>>> print(output.shape)
|
|
167
|
-
(1, 24)
|
|
168
|
-
"""
|
|
169
|
-
|
|
170
|
-
@prim_attr_register
|
|
171
|
-
def __init__(self):
|
|
172
|
-
pass
|
|
173
|
-
|
|
174
|
-
|
|
175
104
|
class AdaptiveAvgPool3D(Primitive):
|
|
176
105
|
r"""
|
|
177
106
|
AdaptiveAvgPool3D operation.
|
|
@@ -427,86 +356,6 @@ class AdaptiveMaxPool3D(Primitive):
|
|
|
427
356
|
self.init_prim_io_names(inputs=['x', 'output_size'], outputs=['y', 'argmax'])
|
|
428
357
|
|
|
429
358
|
|
|
430
|
-
class Softmax(Primitive):
|
|
431
|
-
r"""
|
|
432
|
-
Applies the Softmax operation to the input tensor on the specified axis.
|
|
433
|
-
|
|
434
|
-
Refer to :func:`mindspore.ops.softmax` for more details.
|
|
435
|
-
|
|
436
|
-
Args:
|
|
437
|
-
axis (Union[int, tuple]): The axis to perform the Softmax operation. Default: ``-1`` .
|
|
438
|
-
|
|
439
|
-
Inputs:
|
|
440
|
-
- **logits** (Tensor) - Tensor of shape :math:`(N, *)`, where :math:`*` means, any number of
|
|
441
|
-
additional dimensions. Supported dtypes:
|
|
442
|
-
|
|
443
|
-
- Ascend: float16, float32.
|
|
444
|
-
- GPU/CPU: float16, float32, float64.
|
|
445
|
-
|
|
446
|
-
Outputs:
|
|
447
|
-
Tensor, with the same type and shape as the logits.
|
|
448
|
-
|
|
449
|
-
Supported Platforms:
|
|
450
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
451
|
-
|
|
452
|
-
Examples:
|
|
453
|
-
>>> import mindspore
|
|
454
|
-
>>> import numpy as np
|
|
455
|
-
>>> from mindspore import Tensor, ops
|
|
456
|
-
>>> logits = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
|
|
457
|
-
>>> softmax = ops.Softmax()
|
|
458
|
-
>>> output = softmax(logits)
|
|
459
|
-
>>> print(output)
|
|
460
|
-
[0.01165623 0.03168492 0.08612854 0.23412167 0.6364086 ]
|
|
461
|
-
"""
|
|
462
|
-
|
|
463
|
-
@prim_attr_register
|
|
464
|
-
def __init__(self, axis=-1):
|
|
465
|
-
"""Initialize Softmax."""
|
|
466
|
-
self.init_prim_io_names(inputs=['x'], outputs=['output'])
|
|
467
|
-
validator.check_value_type("axis", axis, [int, tuple], self.name)
|
|
468
|
-
if isinstance(axis, int):
|
|
469
|
-
self.add_prim_attr('axis', (axis,))
|
|
470
|
-
for item in self.axis:
|
|
471
|
-
validator.check_value_type("item of axis", item, [int], self.name)
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
class LogSoftmax(Primitive):
|
|
475
|
-
r"""
|
|
476
|
-
Log Softmax activation function.
|
|
477
|
-
|
|
478
|
-
Refer to :func:`mindspore.ops.log_softmax` for more details.
|
|
479
|
-
|
|
480
|
-
Args:
|
|
481
|
-
axis (int, optional): The axis to perform the Log softmax operation. Default: ``-1`` .
|
|
482
|
-
|
|
483
|
-
Inputs:
|
|
484
|
-
- **logits** (Tensor) - Tensor of shape :math:`(N, *)`, where :math:`*` means, any number of
|
|
485
|
-
additional dimensions, with float16 or float32 data type.
|
|
486
|
-
|
|
487
|
-
Outputs:
|
|
488
|
-
Tensor, with the same type and shape as the `logits`.
|
|
489
|
-
|
|
490
|
-
Supported Platforms:
|
|
491
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
492
|
-
|
|
493
|
-
Examples:
|
|
494
|
-
>>> import mindspore
|
|
495
|
-
>>> import numpy as np
|
|
496
|
-
>>> from mindspore import Tensor, ops
|
|
497
|
-
>>> logits = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
|
|
498
|
-
>>> log_softmax = ops.LogSoftmax()
|
|
499
|
-
>>> output = log_softmax(logits)
|
|
500
|
-
>>> print(output)
|
|
501
|
-
[-4.4519143 -3.4519143 -2.4519143 -1.4519144 -0.4519144]
|
|
502
|
-
"""
|
|
503
|
-
|
|
504
|
-
@prim_attr_register
|
|
505
|
-
def __init__(self, axis=-1):
|
|
506
|
-
"""Initialize LogSoftmax."""
|
|
507
|
-
validator.check_value_type("axis", axis, [int], self.name)
|
|
508
|
-
|
|
509
|
-
|
|
510
359
|
class Softplus(Primitive):
|
|
511
360
|
r"""
|
|
512
361
|
Softplus activation function.
|
|
@@ -586,39 +435,6 @@ class Softsign(Primitive):
|
|
|
586
435
|
self.init_prim_io_names(inputs=['x'], outputs=['output'])
|
|
587
436
|
|
|
588
437
|
|
|
589
|
-
class ReLU(Primitive):
|
|
590
|
-
r"""
|
|
591
|
-
Computes ReLU (Rectified Linear Unit activation function) of input tensors element-wise.
|
|
592
|
-
|
|
593
|
-
Refer to :func:`mindspore.ops.relu` for more details.
|
|
594
|
-
|
|
595
|
-
Inputs:
|
|
596
|
-
- **input_x** (Tensor) - Input Tensor of numeric types.
|
|
597
|
-
|
|
598
|
-
Outputs:
|
|
599
|
-
Tensor, has the same dtype and shape as `input_x`.
|
|
600
|
-
|
|
601
|
-
Supported Platforms:
|
|
602
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
603
|
-
|
|
604
|
-
Examples:
|
|
605
|
-
>>> import mindspore
|
|
606
|
-
>>> import numpy as np
|
|
607
|
-
>>> from mindspore import Tensor, ops
|
|
608
|
-
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
|
|
609
|
-
>>> relu = ops.ReLU()
|
|
610
|
-
>>> output = relu(input_x)
|
|
611
|
-
>>> print(output)
|
|
612
|
-
[[0. 4. 0.]
|
|
613
|
-
[2. 0. 9.]]
|
|
614
|
-
"""
|
|
615
|
-
|
|
616
|
-
@prim_attr_register
|
|
617
|
-
def __init__(self):
|
|
618
|
-
"""Initialize ReLU"""
|
|
619
|
-
self.init_prim_io_names(inputs=['x'], outputs=['output'])
|
|
620
|
-
|
|
621
|
-
|
|
622
438
|
class ReLUV3(Primitive):
|
|
623
439
|
r"""
|
|
624
440
|
Computes ReLUV3 (Rectified Linear Unit activation function) of input tensors element-wise.
|
|
@@ -633,7 +449,7 @@ class ReLUV3(Primitive):
|
|
|
633
449
|
Inputs:
|
|
634
450
|
- **input_x** (Tensor) - Tensor of shape :math:`(N, *)`, where :math:`*` means, any number of
|
|
635
451
|
additional dimensions, data type is
|
|
636
|
-
`number <https://www.mindspore.cn/docs/en/r2.
|
|
452
|
+
`number <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.html#mindspore.dtype>`_.
|
|
637
453
|
|
|
638
454
|
Outputs:
|
|
639
455
|
Tensor of shape :math:`(N, *)`, with the same type and shape as the `input_x`.
|
|
@@ -749,243 +565,6 @@ class SeLU(Primitive):
|
|
|
749
565
|
self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
|
|
750
566
|
|
|
751
567
|
|
|
752
|
-
class ReLU6(PrimitiveWithCheck):
|
|
753
|
-
r"""
|
|
754
|
-
Computes ReLU (Rectified Linear Unit) upper bounded by 6 of input tensors element-wise.
|
|
755
|
-
|
|
756
|
-
Refer to :func:`mindspore.ops.relu6` for more details.
|
|
757
|
-
|
|
758
|
-
Inputs:
|
|
759
|
-
- **input_x** (Tensor) - Tensor of shape :math:`(N, *)`,
|
|
760
|
-
where :math:`*` means any number of additional dimensions.
|
|
761
|
-
Data type must be float16, float32.
|
|
762
|
-
|
|
763
|
-
Outputs:
|
|
764
|
-
Tensor, with the same type and shape as the `input_x`.
|
|
765
|
-
|
|
766
|
-
Supported Platforms:
|
|
767
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
768
|
-
|
|
769
|
-
Examples:
|
|
770
|
-
>>> import mindspore
|
|
771
|
-
>>> import numpy as np
|
|
772
|
-
>>> from mindspore import Tensor, ops
|
|
773
|
-
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
|
|
774
|
-
>>> relu6 = ops.ReLU6()
|
|
775
|
-
>>> result = relu6(input_x)
|
|
776
|
-
>>> print(result)
|
|
777
|
-
[[0. 4. 0.]
|
|
778
|
-
[2. 0. 6.]]
|
|
779
|
-
"""
|
|
780
|
-
|
|
781
|
-
@prim_attr_register
|
|
782
|
-
def __init__(self):
|
|
783
|
-
"""Initialize ReLU6"""
|
|
784
|
-
self.init_prim_io_names(inputs=['x'], outputs=['output'])
|
|
785
|
-
|
|
786
|
-
def check_shape(self, input_x):
|
|
787
|
-
pass
|
|
788
|
-
|
|
789
|
-
def check_dtype(self, input_x):
|
|
790
|
-
validator.check_tensor_dtype_valid('input_x', input_x, (mstype.float16, mstype.float32), self.name)
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
class ReLUV2(Primitive):
|
|
794
|
-
r"""
|
|
795
|
-
The ReLUV2 interface is deprecated, please use the :class:`mindspore.ops.ReLU` instead.
|
|
796
|
-
|
|
797
|
-
Rectified Linear Unit activation function.
|
|
798
|
-
|
|
799
|
-
It returns element-wise :math:`\max(0, x)`, specially, the neurons with the negative output
|
|
800
|
-
will be suppressed and the active neurons will stay the same.
|
|
801
|
-
|
|
802
|
-
.. math::
|
|
803
|
-
|
|
804
|
-
\text{ReLU}(x) = (x)^+ = \max(0, x)
|
|
805
|
-
|
|
806
|
-
Inputs:
|
|
807
|
-
- **input_x** (Tensor) - The input tensor must be a 4-D tensor.
|
|
808
|
-
|
|
809
|
-
Outputs:
|
|
810
|
-
- **output** (Tensor) - Has the same type and shape as the `input_x`.
|
|
811
|
-
- **mask** (Tensor) - A tensor, but it is meaningless.
|
|
812
|
-
|
|
813
|
-
Raises:
|
|
814
|
-
TypeError: If `input_x` is not a Tensor.
|
|
815
|
-
ValueError: If shape of `input_x` is not 4-D.
|
|
816
|
-
|
|
817
|
-
Supported Platforms:
|
|
818
|
-
deprecated
|
|
819
|
-
|
|
820
|
-
Examples:
|
|
821
|
-
>>> input_x = Tensor(np.array([[[[1, -2], [-3, 4]], [[-5, 6], [7, -8]]]]), mindspore.float32)
|
|
822
|
-
>>> relu_v2 = ops.ReLUV2()
|
|
823
|
-
>>> output, _= relu_v2(input_x)
|
|
824
|
-
>>> print(output)
|
|
825
|
-
[[[[1. 0.]
|
|
826
|
-
[0. 4.]]
|
|
827
|
-
[[0. 6.]
|
|
828
|
-
[7. 0.]]]]
|
|
829
|
-
"""
|
|
830
|
-
|
|
831
|
-
@prim_attr_register
|
|
832
|
-
def __init__(self):
|
|
833
|
-
"""Initialize ReLUV2"""
|
|
834
|
-
self.init_prim_io_names(inputs=['x'], outputs=['output', 'mask'])
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
class Elu(Primitive):
|
|
838
|
-
r"""
|
|
839
|
-
Exponential Linear Uint activation function.
|
|
840
|
-
|
|
841
|
-
Applies the exponential linear unit function element-wise.
|
|
842
|
-
The activation function is defined as:
|
|
843
|
-
|
|
844
|
-
.. math::
|
|
845
|
-
|
|
846
|
-
\text{ELU}(x)= \left\{
|
|
847
|
-
\begin{array}{align}
|
|
848
|
-
\alpha(e^{x} - 1) & \text{if } x \le 0\\
|
|
849
|
-
x & \text{if } x \gt 0\\
|
|
850
|
-
\end{array}\right.
|
|
851
|
-
|
|
852
|
-
The picture about ELU looks like this `ELU <https://en.wikipedia.org/wiki/
|
|
853
|
-
Activation_function#/media/File:Activation_elu.svg>`_ .
|
|
854
|
-
|
|
855
|
-
Args:
|
|
856
|
-
alpha (float): The alpha value of ELU, the data type is float. Only support '1.0' currently. Default: ``1.0`` .
|
|
857
|
-
|
|
858
|
-
Inputs:
|
|
859
|
-
- **input_x** (Tensor) - The input of ELU is a Tensor of any dimension with data type of
|
|
860
|
-
float16, float32 or float64.
|
|
861
|
-
|
|
862
|
-
Outputs:
|
|
863
|
-
Tensor, has the same shape and data type as `input_x`.
|
|
864
|
-
|
|
865
|
-
Raises:
|
|
866
|
-
TypeError: If `alpha` is not a float.
|
|
867
|
-
TypeError: If dtype of `input_x` is neither float16, float32 nor float64.
|
|
868
|
-
ValueError: If `alpha` is not equal to 1.0.
|
|
869
|
-
|
|
870
|
-
Supported Platforms:
|
|
871
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
872
|
-
|
|
873
|
-
Examples:
|
|
874
|
-
>>> import mindspore
|
|
875
|
-
>>> import numpy as np
|
|
876
|
-
>>> from mindspore import Tensor, ops
|
|
877
|
-
>>> input_x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
|
|
878
|
-
>>> elu = ops.Elu()
|
|
879
|
-
>>> output = elu(input_x)
|
|
880
|
-
>>> print(output)
|
|
881
|
-
[[-0.63212055 4. -0.99966455]
|
|
882
|
-
[ 2. -0.99326205 9. ]]
|
|
883
|
-
"""
|
|
884
|
-
|
|
885
|
-
@prim_attr_register
|
|
886
|
-
def __init__(self, alpha=1.0):
|
|
887
|
-
"""Initialize Elu"""
|
|
888
|
-
validator.check_value_type("alpha", alpha, [float], self.name)
|
|
889
|
-
validator.check_number("alpha", alpha, 1.0, validator.EQ, self.name)
|
|
890
|
-
self.init_prim_io_names(inputs=['x'], outputs=['output', 'mask'])
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
class HSwish(Primitive):
|
|
894
|
-
r"""
|
|
895
|
-
Hard swish activation function.
|
|
896
|
-
|
|
897
|
-
Refer to :func:`mindspore.ops.hardswish` for more details.
|
|
898
|
-
|
|
899
|
-
Inputs:
|
|
900
|
-
- **input_x** (Tensor) - The input Tensor.
|
|
901
|
-
|
|
902
|
-
Outputs:
|
|
903
|
-
Tensor, with the same type and shape as the `input_x`.
|
|
904
|
-
|
|
905
|
-
Supported Platforms:
|
|
906
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
907
|
-
|
|
908
|
-
Examples:
|
|
909
|
-
>>> import mindspore
|
|
910
|
-
>>> import numpy as np
|
|
911
|
-
>>> from mindspore import Tensor, ops
|
|
912
|
-
>>> hswish = ops.HSwish()
|
|
913
|
-
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
|
|
914
|
-
>>> result = hswish(input_x)
|
|
915
|
-
>>> print(result)
|
|
916
|
-
[-0.3333 -0.3333 0 1.666 0.6665]
|
|
917
|
-
"""
|
|
918
|
-
|
|
919
|
-
@prim_attr_register
|
|
920
|
-
def __init__(self):
|
|
921
|
-
"""Initialize HSwish."""
|
|
922
|
-
self.init_prim_io_names(inputs=['x'], outputs=['output'])
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
class Sigmoid(Primitive):
|
|
926
|
-
r"""
|
|
927
|
-
Sigmoid activation function.
|
|
928
|
-
|
|
929
|
-
Refer to :func:`mindspore.ops.sigmoid` for more details.
|
|
930
|
-
|
|
931
|
-
Inputs:
|
|
932
|
-
- **input_x** (Tensor) - Tensor of any dimension.
|
|
933
|
-
|
|
934
|
-
Outputs:
|
|
935
|
-
Tensor, with the same type and shape as the input_x.
|
|
936
|
-
|
|
937
|
-
Supported Platforms:
|
|
938
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
939
|
-
|
|
940
|
-
Examples:
|
|
941
|
-
>>> import mindspore
|
|
942
|
-
>>> import numpy as np
|
|
943
|
-
>>> from mindspore import Tensor, ops
|
|
944
|
-
>>> input_x = Tensor(np.array([1, 2, 3, 4, 5]), mindspore.float32)
|
|
945
|
-
>>> sigmoid = ops.Sigmoid()
|
|
946
|
-
>>> output = sigmoid(input_x)
|
|
947
|
-
>>> print(output)
|
|
948
|
-
[0.7310586 0.880797 0.95257413 0.98201376 0.9933072 ]
|
|
949
|
-
"""
|
|
950
|
-
|
|
951
|
-
@prim_attr_register
|
|
952
|
-
def __init__(self):
|
|
953
|
-
"""Initialize Sigmoid."""
|
|
954
|
-
self.init_prim_io_names(inputs=['x'], outputs=['output'])
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
class HSigmoid(Primitive):
|
|
958
|
-
r"""
|
|
959
|
-
Hard sigmoid activation function.
|
|
960
|
-
|
|
961
|
-
Refer to :func:`mindspore.ops.hardsigmoid` for more details.
|
|
962
|
-
|
|
963
|
-
Inputs:
|
|
964
|
-
- **input_x** (Tensor) - The input Tensor.
|
|
965
|
-
|
|
966
|
-
Outputs:
|
|
967
|
-
Tensor, with the same type and shape as the `input_x`.
|
|
968
|
-
|
|
969
|
-
Supported Platforms:
|
|
970
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
971
|
-
|
|
972
|
-
Examples:
|
|
973
|
-
>>> import mindspore
|
|
974
|
-
>>> import numpy as np
|
|
975
|
-
>>> from mindspore import Tensor, ops
|
|
976
|
-
>>> hsigmoid = ops.HSigmoid()
|
|
977
|
-
>>> input_x = Tensor(np.array([-1, -2, 0, 2, 1]), mindspore.float16)
|
|
978
|
-
>>> result = hsigmoid(input_x)
|
|
979
|
-
>>> print(result)
|
|
980
|
-
[0.3333 0.1666 0.5 0.8335 0.6665]
|
|
981
|
-
"""
|
|
982
|
-
|
|
983
|
-
@prim_attr_register
|
|
984
|
-
def __init__(self):
|
|
985
|
-
"""Initialize HSigmoid."""
|
|
986
|
-
self.init_prim_io_names(inputs=['input_x'], outputs=['output'])
|
|
987
|
-
|
|
988
|
-
|
|
989
568
|
class Tanh(Primitive):
|
|
990
569
|
r"""
|
|
991
570
|
Computes hyperbolic tangent of input element-wise.
|
|
@@ -1189,175 +768,44 @@ class InstanceNormV2(Primitive):
|
|
|
1189
768
|
ValueError: If :math:`H * W <= 1` in input `x`.
|
|
1190
769
|
ValueError: If the shape of either item in the inputs is neither 4D nor 5D.
|
|
1191
770
|
ValueError: If `epsilon` is not in the range of [0, 1).
|
|
1192
|
-
ValueError: If `momentum` is not in the range of [0, 1].
|
|
1193
|
-
|
|
1194
|
-
Examples:
|
|
1195
|
-
>>> x = Tensor(input_data=np.random.randn(128, 48, 32, 64, 12), dtype=mindspore.float32)
|
|
1196
|
-
>>> gamma = Tensor(input_data=np.random.randn(128, 48, 1, 1, 12), dtype=mstype.float32)
|
|
1197
|
-
>>> beta = Tensor(input_data=np.random.randn(128, 48, 1, 1, 12), dtype=mstype.float32)
|
|
1198
|
-
>>> mean = Tensor(input_data=np.random.randn(128, 48, 1, 1, 12), dtype=mstype.float32)
|
|
1199
|
-
>>> var = Tensor(input_data=np.random.randn(128, 48, 1, 1, 12), dtype=mstype.float32)
|
|
1200
|
-
>>> ops = P.InstanceNormV2()
|
|
1201
|
-
>>> output = ops(x, gamma, beta, mean, var)
|
|
1202
|
-
>>> y_shape = output[0].shape
|
|
1203
|
-
>>> print(y_shape)
|
|
1204
|
-
(128, 48, 32, 64, 12)
|
|
1205
|
-
>>> batch_mean_shape = output[1].shape
|
|
1206
|
-
>>> print(batch_mean_shape)
|
|
1207
|
-
(128, 48, 1, 1, 12)
|
|
1208
|
-
>>> batch_var_shape = output[2].shape
|
|
1209
|
-
>>> print(batch_var_shape)
|
|
1210
|
-
(128, 48, 1, 1, 12)
|
|
1211
|
-
"""
|
|
1212
|
-
__mindspore_signature__ = (
|
|
1213
|
-
sig.make_sig('x', dtype=sig.sig_dtype.T1),
|
|
1214
|
-
sig.make_sig('gamma', dtype=sig.sig_dtype.T),
|
|
1215
|
-
sig.make_sig('beta', dtype=sig.sig_dtype.T),
|
|
1216
|
-
sig.make_sig('mean', dtype=sig.sig_dtype.T),
|
|
1217
|
-
sig.make_sig('variance', dtype=sig.sig_dtype.T),
|
|
1218
|
-
)
|
|
1219
|
-
|
|
1220
|
-
@prim_attr_register
|
|
1221
|
-
def __init__(self, is_training=True, momentum=0.1, epsilon=1e-5):
|
|
1222
|
-
"""Initialize InstanceNormV2."""
|
|
1223
|
-
self.init_prim_io_names(inputs=['x', 'gamma', 'beta', 'mean', 'variance'],
|
|
1224
|
-
outputs=['y', 'batch_mean', 'batch_variance'])
|
|
1225
|
-
validator.check_is_float(epsilon, 'epsilon', self.name)
|
|
1226
|
-
validator.check_is_float(momentum, 'momentum', self.name)
|
|
1227
|
-
validator.check_float_range(epsilon, 0, 1, validator.INC_RIGHT, 'epsilon', self.name)
|
|
1228
|
-
validator.check_float_range(momentum, 0, 1, validator.INC_BOTH, 'momentum', self.name)
|
|
1229
|
-
validator.check_bool(is_training, "is_training", self.name)
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
class BatchNorm(PrimitiveWithInfer):
|
|
1233
|
-
r"""
|
|
1234
|
-
Batch Normalization for input data and updated parameters.
|
|
1235
|
-
|
|
1236
|
-
Batch Normalization is widely used in convolutional neural networks. This operation
|
|
1237
|
-
applies Batch Normalization over inputs to avoid internal covariate shift as described
|
|
1238
|
-
in the paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal
|
|
1239
|
-
Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the
|
|
1240
|
-
features using a mini-batch of data and the learned parameters can be described
|
|
1241
|
-
in the following formula,
|
|
1242
|
-
|
|
1243
|
-
.. math::
|
|
1244
|
-
|
|
1245
|
-
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
|
|
1246
|
-
|
|
1247
|
-
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon,
|
|
1248
|
-
:math:`mean` is the mean of :math:`x`,
|
|
1249
|
-
:math:`variance` is the variance of :math:`x`.
|
|
1250
|
-
|
|
1251
|
-
.. warning::
|
|
1252
|
-
- If the operation is used for inference, and outputs "reserve_space_1" and "reserve_space_2" are available,
|
|
1253
|
-
then "reserve_space_1" has the same value as "mean" and "reserve_space_2" has the same value as "variance".
|
|
1254
|
-
- For Ascend 310, the result accuracy fails to reach 1‰ due to the square root instruction.
|
|
1255
|
-
|
|
1256
|
-
Args:
|
|
1257
|
-
is_training (bool): If `is_training` is ``True`` , `mean` and `variance` are computed during training.
|
|
1258
|
-
If `is_training` is ``False`` , they're loaded from checkpoint during inference. Default: ``False`` .
|
|
1259
|
-
epsilon (float): A small value added for numerical stability. Default: ``1e-5``, value must be (0, 1] .
|
|
1260
|
-
momentum (float): The hyper parameter to compute moving average for running_mean and running_var
|
|
1261
|
-
(e.g. :math:`new\_running\_mean = (1 - momentum) * running\_mean + momentum * current\_mean`).
|
|
1262
|
-
Momentum value must be [0, 1]. Default: ``0.1`` .
|
|
1263
|
-
data_format (str): The optional value for data format, is ``'NHWC'`` or ``'NCHW'``, and the ``'NHWC'`` format
|
|
1264
|
-
is only supported in GPU target. Default: ``"NCHW"`` .
|
|
1265
|
-
|
|
1266
|
-
Inputs:
|
|
1267
|
-
If `is_training` is ``False`` , inputs are Tensors.
|
|
1268
|
-
|
|
1269
|
-
- **input_x** (Tensor) - Tensor of shape :math:`(N, C)`, with float16 or float32 data type.
|
|
1270
|
-
- **scale** (Tensor) - Tensor of shape :math:`(C,)`, with float16 or float32 data type.
|
|
1271
|
-
- **bias** (Tensor) - Tensor of shape :math:`(C,)`, has the same data type with `scale`.
|
|
1272
|
-
- **mean** (Tensor) - Tensor of shape :math:`(C,)`, has the same data type with `scale`.
|
|
1273
|
-
- **variance** (Tensor) - Tensor of shape :math:`(C,)`, has the same data type with `scale`.
|
|
1274
|
-
|
|
1275
|
-
If `is_training` is ``True`` , `scale`, `bias`, `mean` and `variance` are Parameters.
|
|
1276
|
-
|
|
1277
|
-
- **input_x** (Tensor) - Tensor of shape :math:`(N, C)`, with float16 or float32 data type.
|
|
1278
|
-
- **scale** (Parameter) - Parameter of shape :math:`(C,)`, with float16 or float32 data type.
|
|
1279
|
-
- **bias** (Parameter) - Parameter of shape :math:`(C,)`, has the same data type with `scale`.
|
|
1280
|
-
- **mean** (Parameter) - Parameter of shape :math:`(C,)`, has the same data type with `scale`.
|
|
1281
|
-
- **variance** (Parameter) - Parameter of shape :math:`(C,)`, has the same data type with `scale`.
|
|
1282
|
-
|
|
1283
|
-
Outputs:
|
|
1284
|
-
Tuple of 5 Tensors, the normalized inputs and the updated parameters.
|
|
1285
|
-
|
|
1286
|
-
- **output_x** (Tensor) - The same type and shape as the input_x. The shape is :math:`(N, C)`.
|
|
1287
|
-
- **batch_mean** (Tensor) - Tensor of shape :math:`(C,)`.
|
|
1288
|
-
- **batch_variance** (Tensor) - Tensor of shape :math:`(C,)`.
|
|
1289
|
-
- **reserve_space_1** (Tensor) - Tensor of shape :math:`(C,)`.
|
|
1290
|
-
- **reserve_space_2** (Tensor) - Tensor of shape :math:`(C,)`.
|
|
1291
|
-
|
|
1292
|
-
Raises:
|
|
1293
|
-
TypeError: If `is_training` is not a bool.
|
|
1294
|
-
TypeError: If dtype of `epsilon` or `momentum` is not float.
|
|
1295
|
-
TypeError: If `data_format` is not a str.
|
|
1296
|
-
TypeError: If `input_x`, `scale`, `bias`, `mean` or `variance` is not a Tensor.
|
|
1297
|
-
TypeError: If dtype of `input_x`, `scale` is neither float16 nor float32.
|
|
1298
|
-
|
|
1299
|
-
Supported Platforms:
|
|
1300
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
771
|
+
ValueError: If `momentum` is not in the range of [0, 1].
|
|
1301
772
|
|
|
1302
773
|
Examples:
|
|
1303
|
-
>>>
|
|
1304
|
-
>>>
|
|
1305
|
-
>>>
|
|
1306
|
-
>>>
|
|
1307
|
-
>>>
|
|
1308
|
-
>>>
|
|
1309
|
-
>>>
|
|
1310
|
-
>>>
|
|
1311
|
-
>>>
|
|
1312
|
-
|
|
1313
|
-
>>>
|
|
1314
|
-
|
|
1315
|
-
|
|
774
|
+
>>> x = Tensor(input_data=np.random.randn(128, 48, 32, 64, 12), dtype=mindspore.float32)
|
|
775
|
+
>>> gamma = Tensor(input_data=np.random.randn(128, 48, 1, 1, 12), dtype=mstype.float32)
|
|
776
|
+
>>> beta = Tensor(input_data=np.random.randn(128, 48, 1, 1, 12), dtype=mstype.float32)
|
|
777
|
+
>>> mean = Tensor(input_data=np.random.randn(128, 48, 1, 1, 12), dtype=mstype.float32)
|
|
778
|
+
>>> var = Tensor(input_data=np.random.randn(128, 48, 1, 1, 12), dtype=mstype.float32)
|
|
779
|
+
>>> ops = P.InstanceNormV2()
|
|
780
|
+
>>> output = ops(x, gamma, beta, mean, var)
|
|
781
|
+
>>> y_shape = output[0].shape
|
|
782
|
+
>>> print(y_shape)
|
|
783
|
+
(128, 48, 32, 64, 12)
|
|
784
|
+
>>> batch_mean_shape = output[1].shape
|
|
785
|
+
>>> print(batch_mean_shape)
|
|
786
|
+
(128, 48, 1, 1, 12)
|
|
787
|
+
>>> batch_var_shape = output[2].shape
|
|
788
|
+
>>> print(batch_var_shape)
|
|
789
|
+
(128, 48, 1, 1, 12)
|
|
1316
790
|
"""
|
|
1317
|
-
|
|
1318
791
|
__mindspore_signature__ = (
|
|
1319
|
-
sig.make_sig('
|
|
1320
|
-
sig.make_sig('
|
|
1321
|
-
sig.make_sig('
|
|
1322
|
-
sig.make_sig('mean',
|
|
1323
|
-
sig.make_sig('variance',
|
|
792
|
+
sig.make_sig('x', dtype=sig.sig_dtype.T1),
|
|
793
|
+
sig.make_sig('gamma', dtype=sig.sig_dtype.T),
|
|
794
|
+
sig.make_sig('beta', dtype=sig.sig_dtype.T),
|
|
795
|
+
sig.make_sig('mean', dtype=sig.sig_dtype.T),
|
|
796
|
+
sig.make_sig('variance', dtype=sig.sig_dtype.T),
|
|
1324
797
|
)
|
|
1325
798
|
|
|
1326
799
|
@prim_attr_register
|
|
1327
|
-
def __init__(self, is_training=
|
|
1328
|
-
"""Initialize
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
|
-
|
|
1332
|
-
|
|
1333
|
-
validator.check_value_type('is_training', is_training, (bool,), self.name)
|
|
800
|
+
def __init__(self, is_training=True, momentum=0.1, epsilon=1e-5):
|
|
801
|
+
"""Initialize InstanceNormV2."""
|
|
802
|
+
self.init_prim_io_names(inputs=['x', 'gamma', 'beta', 'mean', 'variance'],
|
|
803
|
+
outputs=['y', 'batch_mean', 'batch_variance'])
|
|
804
|
+
validator.check_is_float(epsilon, 'epsilon', self.name)
|
|
805
|
+
validator.check_is_float(momentum, 'momentum', self.name)
|
|
1334
806
|
validator.check_float_range(epsilon, 0, 1, validator.INC_RIGHT, 'epsilon', self.name)
|
|
1335
807
|
validator.check_float_range(momentum, 0, 1, validator.INC_BOTH, 'momentum', self.name)
|
|
1336
|
-
|
|
1337
|
-
if context.get_context("device_target") != "GPU" and self.format == "NHWC":
|
|
1338
|
-
raise ValueError(f"For '{self.name}', the 'NHWC' format is only supported in GPU target, "
|
|
1339
|
-
f"but got the 'data_format' is {self.format} and "
|
|
1340
|
-
f"the platform is {context.get_context('device_target')}.")
|
|
1341
|
-
self.add_prim_attr('data_format', self.format)
|
|
1342
|
-
self.init_prim_io_names(inputs=['x', 'scale', 'offset', 'mean', 'variance'],
|
|
1343
|
-
outputs=['y', 'batch_mean', 'batch_variance', 'reserve_space_1', 'reserve_space_2'])
|
|
1344
|
-
|
|
1345
|
-
def infer_shape(self, input_x, scale, bias, mean, variance):
|
|
1346
|
-
input_x_channel = input_x[-1] if self.format == "NHWC" else input_x[1]
|
|
1347
|
-
validator.check_equal_int(len(scale), 1, "scale rank", self.name)
|
|
1348
|
-
validator.check("scale shape", scale, "bias shape", bias, validator.EQ, self.name)
|
|
1349
|
-
validator.check("scale shape[0]", scale[0], "input_x channel", input_x_channel, validator.EQ, self.name)
|
|
1350
|
-
if not self.is_training:
|
|
1351
|
-
validator.check_equal_int(len(mean), 1, "mean rank", self.name)
|
|
1352
|
-
validator.check("mean shape", mean, "variance shape", variance, validator.EQ, self.name)
|
|
1353
|
-
validator.check("mean shape", mean, "scale shape", scale, validator.EQ, self.name)
|
|
1354
|
-
return input_x, scale, scale, scale, scale
|
|
1355
|
-
|
|
1356
|
-
def infer_dtype(self, input_x, scale, bias, mean, variance):
|
|
1357
|
-
validator.check_tensor_dtype_valid("input_x", input_x, [mstype.float16, mstype.float32], self.name)
|
|
1358
|
-
args = {"scale": scale, "bias": bias, "mean": mean, "variance": variance}
|
|
1359
|
-
validator.check_tensors_dtypes_same_and_valid(args, [mstype.float16, mstype.float32], self.name)
|
|
1360
|
-
return input_x, mstype.float32, mstype.float32, mstype.float32, mstype.float32
|
|
808
|
+
validator.check_bool(is_training, "is_training", self.name)
|
|
1361
809
|
|
|
1362
810
|
|
|
1363
811
|
class Conv2D(Primitive):
|
|
@@ -1379,21 +827,26 @@ class Conv2D(Primitive):
|
|
|
1379
827
|
, :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
|
|
1380
828
|
|
|
1381
829
|
Here are the indices' meanings:
|
|
1382
|
-
- :math:`i` corresponds to the batch number, ranging from 0 to N-1, where N is the batch size of the input.
|
|
1383
830
|
|
|
1384
|
-
- :math:`
|
|
831
|
+
- :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
|
|
832
|
+
where :math:`N` is the batch size of the input.
|
|
833
|
+
|
|
834
|
+
- :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
|
|
835
|
+
where :math:`C_{out}` is the number of
|
|
1385
836
|
output channels, which is also equal to the number of kernels.
|
|
1386
837
|
|
|
1387
|
-
- :math:`k` corresponds to the input channel,
|
|
838
|
+
- :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
|
|
839
|
+
where :math:`C_{in}` is the number of
|
|
1388
840
|
input channels, which is also equal to the number of channels in the convolutional kernels.
|
|
1389
841
|
|
|
1390
|
-
Therefore, in the above formula, :math:`{bias}(C_{
|
|
1391
|
-
output channel, :math:`{weight}(C_{
|
|
842
|
+
Therefore, in the above formula, :math:`{bias}(C_{\text{out}_j})` represents the bias of the :math:`j`-th
|
|
843
|
+
output channel, :math:`{weight}(C_{\text{out}_j}, k)` represents the slice of the :math:`j`-th convolutional
|
|
1392
844
|
kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
|
|
1393
845
|
channel in the :math:`i`-th batch of the input feature map.
|
|
1394
846
|
|
|
1395
|
-
The shape of the convolutional kernel is given by :math:`(
|
|
1396
|
-
where :math
|
|
847
|
+
The shape of the convolutional kernel is given by :math:`(\text{kernel_size[0]},\text{kernel_size[1]})`,
|
|
848
|
+
where :math:`\text{kernel_size[0]}`
|
|
849
|
+
and :math:`\text{kernel_size[1]}` are the height and width of the kernel, respectively.
|
|
1397
850
|
If we consider the input and output channels as well as the `group` parameter, the complete kernel shape
|
|
1398
851
|
will be :math:`(C_{out}, C_{in} / \text{group}, \text{kernel_size[0]}, \text{kernel_size[1]})`,
|
|
1399
852
|
where `group` is the number of groups dividing `x`'s input channel when applying group convolution.
|
|
@@ -1450,7 +903,7 @@ class Conv2D(Primitive):
|
|
|
1450
903
|
group (int, optional): Specifies the number of groups dividing `x`'s input channel when applying
|
|
1451
904
|
group convolution. Default: ``1`` .
|
|
1452
905
|
data_format (str, optional): The optional value for data format, is ``'NHWC'`` or ``'NCHW'`` .
|
|
1453
|
-
Default: ``"NCHW"
|
|
906
|
+
Default: ``"NCHW"``. (NHWC is only supported in GPU now.)
|
|
1454
907
|
|
|
1455
908
|
Inputs:
|
|
1456
909
|
- **x** (Tensor) - Input tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` or
|
|
@@ -1844,13 +1297,13 @@ class MaxPool(_Pool):
|
|
|
1844
1297
|
not only the height of movement but also the width of movement, or a tuple of two int numbers that
|
|
1845
1298
|
represent height and width of movement respectively. Default: ``1`` .
|
|
1846
1299
|
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
1847
|
-
``
|
|
1300
|
+
``'same'`` or ``'valid'`` . Default: ``'valid'`` .
|
|
1848
1301
|
|
|
1849
|
-
- ``
|
|
1302
|
+
- ``'same'``: Pad the input around its edges so that the shape of input and output
|
|
1850
1303
|
are the same when `stride` is set to ``1``.
|
|
1851
1304
|
The amount of padding to is calculated by the operator internally, If the amount is even, it is
|
|
1852
1305
|
uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side.
|
|
1853
|
-
- ``
|
|
1306
|
+
- ``'valid'``: No padding is applied to the input, and the output returns the maximum
|
|
1854
1307
|
possible height and width. Extra pixels that could not complete a full stride will
|
|
1855
1308
|
be discarded.
|
|
1856
1309
|
|
|
@@ -1869,8 +1322,8 @@ class MaxPool(_Pool):
|
|
|
1869
1322
|
|
|
1870
1323
|
Raises:
|
|
1871
1324
|
TypeError: If `kernel_size` or `strides` is neither int nor tuple.
|
|
1872
|
-
ValueError: If `pad_mode` is neither 'valid' nor 'same' with not case sensitive.
|
|
1873
|
-
ValueError: If `data_format` is neither 'NCHW' nor 'NHWC'
|
|
1325
|
+
ValueError: If `pad_mode` is neither ``'valid'`` nor ``'same'`` with not case sensitive.
|
|
1326
|
+
ValueError: If `data_format` is neither ``'NCHW'`` nor ``'NHWC'``.
|
|
1874
1327
|
ValueError: If `kernel_size` or `strides` is less than 1.
|
|
1875
1328
|
ValueError: If length of shape of `input` is not equal to 4.
|
|
1876
1329
|
|
|
@@ -2033,10 +1486,10 @@ class MaxPool3D(Primitive):
|
|
|
2033
1486
|
pad[3], pad[4] and pad[5] correspondingly.
|
|
2034
1487
|
ceil_mode (Union[bool, None]): Whether to use ceil instead of floor to calculate output shape.
|
|
2035
1488
|
Only effective in "pad" mode.
|
|
2036
|
-
When
|
|
1489
|
+
When `pad_mode` is ``"pad"`` and "ceil_mode" is ``None`` , `ceil_mode` will be set as ``False``.
|
|
2037
1490
|
Default: ``None`` .
|
|
2038
|
-
data_format (str) : The optional value for data format. Currently only support ``
|
|
2039
|
-
Default: ``
|
|
1491
|
+
data_format (str) : The optional value for data format. Currently only support ``"NCDHW"`` .
|
|
1492
|
+
Default: ``"NCDHW"`` .
|
|
2040
1493
|
|
|
2041
1494
|
Inputs:
|
|
2042
1495
|
- **x** (Tensor) - Tensor of shape :math:`(N, C, D_{in}, H_{in}, W_{in})`.
|
|
@@ -2049,10 +1502,10 @@ class MaxPool3D(Primitive):
|
|
|
2049
1502
|
TypeError: If `kernel_size` or `strides` is neither an int nor a tuple.
|
|
2050
1503
|
TypeError: If `pad_mode` or `data_format` is not a string.
|
|
2051
1504
|
ValueError: If numbers in `kernel_size` or `strides` are not positive.
|
|
2052
|
-
ValueError: If `pad_mode` is not one of
|
|
2053
|
-
ValueError: If `pad_mode` is
|
|
1505
|
+
ValueError: If `pad_mode` is not one of ``"SAME"``, ``"VALID"`` or ``"PAD"``.
|
|
1506
|
+
ValueError: If `pad_mode` is ``"SAME"`` or ``"VALID"``, `ceil_mode` is not ``None``.
|
|
2054
1507
|
ValueError: If `kernel_size` or `strides` is a tuple whose length is not equal to 3.
|
|
2055
|
-
ValueError: If `data_format` is not
|
|
1508
|
+
ValueError: If `data_format` is not ``"NCDHW"``.
|
|
2056
1509
|
|
|
2057
1510
|
Supported Platforms:
|
|
2058
1511
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -2278,7 +1731,7 @@ class MaxUnpool3D(Primitive):
|
|
|
2278
1731
|
ValueError: If numbers in `strides` or `ksize` is negative.
|
|
2279
1732
|
ValueError: If numbers in `pads` is negative.
|
|
2280
1733
|
ValueError: If `ksize`, `strides` or `pads` is a tuple whose length is not equal to 3.
|
|
2281
|
-
ValueError: If `data_format` is not a str or is neither
|
|
1734
|
+
ValueError: If `data_format` is not a str or is neither ``'NCDHW'`` nor ``'NDHWC'``.
|
|
2282
1735
|
ValueError: If `output_shape` whose length is neither 0 or 5.
|
|
2283
1736
|
ValueError: If `output_shape` is not close to output size range
|
|
2284
1737
|
computed by attr `ksize, strides, pads`.
|
|
@@ -2321,98 +1774,6 @@ class MaxUnpool3D(Primitive):
|
|
|
2321
1774
|
self.output_shape = output_shape
|
|
2322
1775
|
|
|
2323
1776
|
|
|
2324
|
-
class AvgPool(Primitive):
|
|
2325
|
-
r"""
|
|
2326
|
-
Average pooling operation.
|
|
2327
|
-
|
|
2328
|
-
Refer to :func:`mindspore.ops.avg_pool2d` for more details.
|
|
2329
|
-
|
|
2330
|
-
Args:
|
|
2331
|
-
kernel_size (Union[int, tuple[int]]): The size of kernel used to take the average value,
|
|
2332
|
-
is an int number that represents height and width of the kernel, or a tuple
|
|
2333
|
-
of two int numbers that represent height and width respectively. Default: ``1`` .
|
|
2334
|
-
strides (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
|
|
2335
|
-
the height and width of movement are both strides, or a tuple of two int numbers that
|
|
2336
|
-
represent height and width of movement respectively. Default: ``1`` .
|
|
2337
|
-
pad_mode (str, optional): Specifies the padding mode with a padding value of 0. It can be set to:
|
|
2338
|
-
``"same"`` or ``"valid"`` . Default: ``"valid"`` .
|
|
2339
|
-
|
|
2340
|
-
- ``"same"``: Pad the input around its edges so that the shape of input and output
|
|
2341
|
-
are the same when `stride` is set to ``1``.
|
|
2342
|
-
The amount of padding to is calculated by the operator internally, If the amount is even, it is
|
|
2343
|
-
uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side.
|
|
2344
|
-
- ``"valid"``: No padding is applied to the input, and the output returns the maximum
|
|
2345
|
-
possible height and width. Extra pixels that could not complete a full stride will
|
|
2346
|
-
be discarded.
|
|
2347
|
-
|
|
2348
|
-
data_format (str, optional): The format of input and output data. It should be ``'NHWC'`` or ``'NCHW'`` .
|
|
2349
|
-
Default: ``'NCHW'`` .
|
|
2350
|
-
|
|
2351
|
-
Inputs:
|
|
2352
|
-
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
|
|
2353
|
-
Supported dtypes: float16, float32, float64.
|
|
2354
|
-
|
|
2355
|
-
Outputs:
|
|
2356
|
-
Tensor, with shape :math:`(N, C_{out}, H_{out}, W_{out})`.
|
|
2357
|
-
|
|
2358
|
-
Raises:
|
|
2359
|
-
TypeError: If `kernel_size` or `strides` is neither int nor tuple.
|
|
2360
|
-
TypeError: If dtype of `x` is not float16, float32 or float64.
|
|
2361
|
-
ValueError: If `kernel_size` or `strides` is less than 1.
|
|
2362
|
-
ValueError: If `pad_mode` is neither 'valid' nor 'same' with not case sensitive.
|
|
2363
|
-
ValueError: If `data_format` is neither 'NCHW' nor 'NHWC'.
|
|
2364
|
-
ValueError: If length of shape of `x` is not equal to 4.
|
|
2365
|
-
|
|
2366
|
-
Supported Platforms:
|
|
2367
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2368
|
-
|
|
2369
|
-
Examples:
|
|
2370
|
-
>>> import mindspore
|
|
2371
|
-
>>> import numpy as np
|
|
2372
|
-
>>> from mindspore import Tensor, ops, nn
|
|
2373
|
-
>>> class Net(nn.Cell):
|
|
2374
|
-
... def __init__(self):
|
|
2375
|
-
... super(Net, self).__init__()
|
|
2376
|
-
... self.avgpool_op = ops.AvgPool(pad_mode="VALID", kernel_size=2, strides=1)
|
|
2377
|
-
...
|
|
2378
|
-
... def construct(self, x):
|
|
2379
|
-
... result = self.avgpool_op(x)
|
|
2380
|
-
... return result
|
|
2381
|
-
...
|
|
2382
|
-
>>> x = Tensor(np.arange(1 * 3 * 3 * 4).reshape(1, 3, 3, 4), mindspore.float32)
|
|
2383
|
-
>>> net = Net()
|
|
2384
|
-
>>> output = net(x)
|
|
2385
|
-
>>> print(output)
|
|
2386
|
-
[[[[ 2.5 3.5 4.5]
|
|
2387
|
-
[ 6.5 7.5 8.5]]
|
|
2388
|
-
[[14.5 15.5 16.5]
|
|
2389
|
-
[18.5 19.5 20.5]]
|
|
2390
|
-
[[26.5 27.5 28.5]
|
|
2391
|
-
[30.5 31.5 32.5]]]]
|
|
2392
|
-
"""
|
|
2393
|
-
|
|
2394
|
-
@prim_attr_register
|
|
2395
|
-
def __init__(self, kernel_size=1, strides=1, pad_mode="valid", data_format="NCHW"):
|
|
2396
|
-
"""Initialize AvgPool."""
|
|
2397
|
-
self.init_prim_io_names(inputs=['x'], outputs=['output'])
|
|
2398
|
-
validator.check_value_type('kernel_size', kernel_size, [int, tuple], self.name)
|
|
2399
|
-
validator.check_value_type('strides', strides, [int, tuple], self.name)
|
|
2400
|
-
validator.check_value_type('pad_mode', pad_mode, [str], self.name)
|
|
2401
|
-
self.pad_mode = validator.check_string(pad_mode.upper(), ['VALID', 'SAME'], 'pad_mode', self.name)
|
|
2402
|
-
self.add_prim_attr("pad_mode", self.pad_mode)
|
|
2403
|
-
self.format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.name)
|
|
2404
|
-
if context.get_context("device_target") != "GPU" and self.format == "NHWC":
|
|
2405
|
-
raise ValueError(f"For '{self.name}', the 'NHWC' format is only supported in GPU target, "
|
|
2406
|
-
f"but got the 'data_format' is {self.format} and "
|
|
2407
|
-
f"the platform is {context.get_context('device_target')}.")
|
|
2408
|
-
self.add_prim_attr('data_format', self.format)
|
|
2409
|
-
self.kernel_size = _check_positive_int_or_tuple(
|
|
2410
|
-
"kernel_size", kernel_size, self.name, allow_four=False, ret_four=True)
|
|
2411
|
-
self.add_prim_attr("kernel_size", self.kernel_size)
|
|
2412
|
-
self.strides = _check_positive_int_or_tuple("strides", strides, self.name, allow_four=False, ret_four=True)
|
|
2413
|
-
self.add_prim_attr("strides", self.strides)
|
|
2414
|
-
|
|
2415
|
-
|
|
2416
1777
|
class AvgPoolV1(Primitive):
|
|
2417
1778
|
r"""
|
|
2418
1779
|
Average-pooling operation.
|
|
@@ -2641,7 +2002,7 @@ class MaxPool3DWithArgmax(Primitive):
|
|
|
2641
2002
|
TypeError: If `ksize` , `strides` , `pads` or `dilation` is not int or tuple.
|
|
2642
2003
|
ValueError: If `ksize` or `strides` is less than 1.
|
|
2643
2004
|
ValueError: If `pads` is less than 0.
|
|
2644
|
-
ValueError: If `data_format` is not 'NCDHW'
|
|
2005
|
+
ValueError: If `data_format` is not ``'NCDHW'``.
|
|
2645
2006
|
ValueError: If `argmax_type` is not mindspore.int64 or mindspore.int32.
|
|
2646
2007
|
|
|
2647
2008
|
Supported Platforms:
|
|
@@ -2743,10 +2104,10 @@ class Conv2DTranspose(Conv2DBackpropInput):
|
|
|
2743
2104
|
TypeError: If `kernel_size`, `stride`, `pad` or `dilation` is neither an int nor a tuple.
|
|
2744
2105
|
TypeError: If `out_channel` or `group` is not an int.
|
|
2745
2106
|
ValueError: If `kernel_size`, `stride` or `dilation` is less than 1.
|
|
2746
|
-
ValueError: If `pad_mode` is not one of 'same'
|
|
2107
|
+
ValueError: If `pad_mode` is not one of ``'same'``, ``'valid'`` or ``'pad'``.
|
|
2747
2108
|
ValueError: If `padding` is a tuple whose length is not equal to 4.
|
|
2748
|
-
ValueError: If `pad_mode` it not equal to 'pad' and `pad` is not equal to (0, 0, 0, 0).
|
|
2749
|
-
ValueError: If `data_format` is neither 'NCHW' nor 'NHWC'
|
|
2109
|
+
ValueError: If `pad_mode` it not equal to ``'pad'`` and `pad` is not equal to (0, 0, 0, 0).
|
|
2110
|
+
ValueError: If `data_format` is neither ``'NCHW'`` nor ``'NHWC'``.
|
|
2750
2111
|
|
|
2751
2112
|
Supported Platforms:
|
|
2752
2113
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -2772,146 +2133,6 @@ class Conv2DTranspose(Conv2DBackpropInput):
|
|
|
2772
2133
|
pad_list, mode, stride, dilation, group, data_format)
|
|
2773
2134
|
|
|
2774
2135
|
|
|
2775
|
-
class BiasAdd(Primitive):
|
|
2776
|
-
r"""
|
|
2777
|
-
Returns the sum of the input Tensor and the bias Tensor. Before adding, the bias Tensor will be broadcasted to be
|
|
2778
|
-
consistent with the shape of the input Tensor.
|
|
2779
|
-
|
|
2780
|
-
Args:
|
|
2781
|
-
data_format (str, optional): The format of input and output data.
|
|
2782
|
-
It should be ``"NHWC"`` , ``"NCHW"`` or ``"NCDHW"`` .
|
|
2783
|
-
Default is ``"NCHW"`` .
|
|
2784
|
-
|
|
2785
|
-
Inputs:
|
|
2786
|
-
- **input_x** (Tensor) - The input tensor. The shape can be 2-5 dimensions. Supported dtypes:
|
|
2787
|
-
|
|
2788
|
-
- Ascend/CPU: all Number type.
|
|
2789
|
-
- GPU: float16, float32, int8.
|
|
2790
|
-
|
|
2791
|
-
- **bias** (Tensor) - The bias tensor, with shape :math:`(C)`. C must be the same as channel dimension C of
|
|
2792
|
-
`input_x`. It has the same type as `input_x`.
|
|
2793
|
-
|
|
2794
|
-
Outputs:
|
|
2795
|
-
Tensor, with the same shape and data type as `input_x`.
|
|
2796
|
-
|
|
2797
|
-
Raises:
|
|
2798
|
-
TypeError: If `data_format` is not a str.
|
|
2799
|
-
ValueError: If value of `data_format` is not in the range of ['NHWC','NCHW','NCDHW'].
|
|
2800
|
-
TypeError: If `input_x` or `bias` is not a Tensor.
|
|
2801
|
-
TypeError: If dtype of `input_x` and `bias` is inconsistent.
|
|
2802
|
-
TypeError: If dimension of `input_x` is not in the range [2, 5].
|
|
2803
|
-
|
|
2804
|
-
Supported Platforms:
|
|
2805
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2806
|
-
|
|
2807
|
-
Examples:
|
|
2808
|
-
>>> import mindspore
|
|
2809
|
-
>>> import numpy as np
|
|
2810
|
-
>>> from mindspore import Tensor, ops
|
|
2811
|
-
>>> input_x = Tensor(np.arange(6).reshape((2, 3)), mindspore.float32)
|
|
2812
|
-
>>> bias = Tensor(np.random.random(3).reshape((3,)), mindspore.float32)
|
|
2813
|
-
>>> bias_add = ops.BiasAdd()
|
|
2814
|
-
>>> output = bias_add(input_x, bias)
|
|
2815
|
-
>>> print(output.shape)
|
|
2816
|
-
(2, 3)
|
|
2817
|
-
"""
|
|
2818
|
-
|
|
2819
|
-
@prim_attr_register
|
|
2820
|
-
def __init__(self, data_format="NCHW"):
|
|
2821
|
-
"""Initialize BiasAdd."""
|
|
2822
|
-
self.init_prim_io_names(inputs=['x', 'b'], outputs=['output'])
|
|
2823
|
-
self.format = validator.check_string(data_format, ['NCHW', 'NHWC', 'NCDHW'], 'format', self.name)
|
|
2824
|
-
self.add_prim_attr('data_format', self.format)
|
|
2825
|
-
|
|
2826
|
-
|
|
2827
|
-
class NLLLoss(Primitive):
|
|
2828
|
-
r"""
|
|
2829
|
-
Gets the negative log likelihood loss between logits and labels.
|
|
2830
|
-
|
|
2831
|
-
The nll loss with :math:`reduction = none` can be described as:
|
|
2832
|
-
|
|
2833
|
-
.. math::
|
|
2834
|
-
|
|
2835
|
-
\ell(x, t)=L=\left\{l_{1}, \ldots, l_{N}\right\}^{\top},
|
|
2836
|
-
\quad l_{n}=-w_{t_{n}} x_{n, t_{n}},
|
|
2837
|
-
\quad w_{c}=\text { weight }[c] \cdot 1
|
|
2838
|
-
|
|
2839
|
-
where :math:`x` is the logits, :math:`t` is the labels, :math:`w` is the weight,
|
|
2840
|
-
N is the batch size, :math:`c` belonging to [0, C-1] is class index, where :math:`C` is the number of classes.
|
|
2841
|
-
|
|
2842
|
-
If :math:`reduction \neq none` (default ``'mean'`` ), then
|
|
2843
|
-
|
|
2844
|
-
.. math::
|
|
2845
|
-
|
|
2846
|
-
\ell(x, t)=\left\{\begin{array}{ll}
|
|
2847
|
-
\sum_{n=1}^{N} \frac{1}{\sum_{n=1}^{N} w_{t n}} l_{n}, & \text { if reduction }=\text { 'mean'; } \\
|
|
2848
|
-
\sum_{n=1}^{N} l_{n}, & \text { if reduction }=\text { 'sum' }
|
|
2849
|
-
\end{array}\right.
|
|
2850
|
-
|
|
2851
|
-
Args:
|
|
2852
|
-
reduction (str, optional): Apply specific reduction method to the output: ``'none'`` , ``'mean'`` ,
|
|
2853
|
-
``'sum'`` . Default: ``'mean'`` .
|
|
2854
|
-
|
|
2855
|
-
- ``'none'``: no reduction will be applied.
|
|
2856
|
-
- ``'mean'``: compute and return the weighted mean of elements in the output.
|
|
2857
|
-
- ``'sum'``: the output elements will be summed.
|
|
2858
|
-
|
|
2859
|
-
ignore_index (int): Specifies a target value that is ignored
|
|
2860
|
-
and does not contribute to the input gradient. Default: ``-100`` .
|
|
2861
|
-
|
|
2862
|
-
Inputs:
|
|
2863
|
-
- **logits** (Tensor) - Input logits, with shape :math:`(N, C)`. Data type only supports float32 or float16.
|
|
2864
|
-
- **labels** (Tensor) - Ground truth labels, with shape :math:`(N,)`, where each value belong to
|
|
2865
|
-
:math:`[0, C-1]`. Data type only supports int32 or int64.
|
|
2866
|
-
- **weight** (Tensor) - The rescaling weight to each class, with shape :math:`(C,)` and data type only
|
|
2867
|
-
supports float32 or float16.
|
|
2868
|
-
|
|
2869
|
-
Outputs:
|
|
2870
|
-
Tuple of 2 tensors composed with `loss` and `total_weight`.
|
|
2871
|
-
|
|
2872
|
-
- **loss** (Tensor) - When `reduction` is ``'none'`` and `logits` is a 2D tensor,
|
|
2873
|
-
the `loss` shape is :math:`(N,)`. Otherwise, the `loss` is a scalar.
|
|
2874
|
-
The data type is the same with `input's`.
|
|
2875
|
-
- **total_weight** (Tensor) - The `total_weight` is a scalar. The data type is the same with `weight's`.
|
|
2876
|
-
|
|
2877
|
-
Raises:
|
|
2878
|
-
TypeError: If dtype of `logits` or `weight` is neither float16 nor float32.
|
|
2879
|
-
TypeError: If dtype of `labels` is neither int32 nor int64.
|
|
2880
|
-
ValueError: If `logits` is not a one or two dimension tensor, `labels` and `weight` are not
|
|
2881
|
-
one dimension tensors.
|
|
2882
|
-
When `logits` is a two dimension tensor, the first dimension of `logits` is not equal to `labels`,
|
|
2883
|
-
and second dimension of `logits` is not equal to `weight`.
|
|
2884
|
-
When `logits` is a one dimension tensor, the dimensions of `logits`, `labels`
|
|
2885
|
-
and `weight` should be equal to each other.
|
|
2886
|
-
ValueError: If the value of `labels` exceed :math:`[0, C-1]`, where :math:`C` is the number of classes.
|
|
2887
|
-
|
|
2888
|
-
Supported Platforms:
|
|
2889
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2890
|
-
|
|
2891
|
-
Examples:
|
|
2892
|
-
>>> import numpy as np
|
|
2893
|
-
>>> from mindspore import Tensor, ops
|
|
2894
|
-
>>> logits = Tensor(np.array([[0.5488135, 0.71518934],
|
|
2895
|
-
... [0.60276335, 0.5448832],
|
|
2896
|
-
... [0.4236548, 0.6458941]]).astype(np.float32))
|
|
2897
|
-
>>> labels = Tensor(np.array([0, 0, 0]).astype(np.int32))
|
|
2898
|
-
>>> weight = Tensor(np.array([0.3834415, 0.79172504]).astype(np.float32))
|
|
2899
|
-
>>> nll_loss = ops.NLLLoss(reduction="mean")
|
|
2900
|
-
>>> loss, weight = nll_loss(logits, labels, weight)
|
|
2901
|
-
>>> print(loss)
|
|
2902
|
-
-0.52507716
|
|
2903
|
-
>>> print(weight)
|
|
2904
|
-
1.1503246
|
|
2905
|
-
"""
|
|
2906
|
-
|
|
2907
|
-
@prim_attr_register
|
|
2908
|
-
def __init__(self, reduction="mean", ignore_index=-100):
|
|
2909
|
-
"""Initialize NLLLoss"""
|
|
2910
|
-
self.init_prim_io_names(inputs=['x', 'target', "weight"], outputs=['loss', 'total_weight'])
|
|
2911
|
-
self.reduction = validator.check_string(reduction, ['none', 'sum', 'mean'], 'reduction', self.name)
|
|
2912
|
-
validator.check_value_type('ignore_index', ignore_index, [int], self.name)
|
|
2913
|
-
|
|
2914
|
-
|
|
2915
2136
|
class SoftmaxCrossEntropyWithLogits(Primitive):
|
|
2916
2137
|
r"""
|
|
2917
2138
|
Gets the softmax cross-entropy value between logits and labels with one-hot encoding.
|
|
@@ -3232,7 +2453,7 @@ class MultiMarginLoss(Primitive):
|
|
|
3232
2453
|
or float64.
|
|
3233
2454
|
- **target** (Tensor) - Ground truth labels, with shape :math:`(N,)`. Data type only support int64. The
|
|
3234
2455
|
value of target should be non-negative, less than C.
|
|
3235
|
-
- **weight** (Tensor) - The rescaling weight to each class with shape :math:`(C,)`. Data type only
|
|
2456
|
+
- **weight** (Tensor, optional) - The rescaling weight to each class with shape :math:`(C,)`. Data type only
|
|
3236
2457
|
support float16, float32 or float64.
|
|
3237
2458
|
|
|
3238
2459
|
Outputs:
|
|
@@ -3254,6 +2475,11 @@ class MultiMarginLoss(Primitive):
|
|
|
3254
2475
|
>>> print(output)
|
|
3255
2476
|
0.6666667
|
|
3256
2477
|
"""
|
|
2478
|
+
__mindspore_signature__ = (
|
|
2479
|
+
sig.make_sig('x'),
|
|
2480
|
+
sig.make_sig('target'),
|
|
2481
|
+
sig.make_sig('weight', default=None)
|
|
2482
|
+
)
|
|
3257
2483
|
|
|
3258
2484
|
@prim_attr_register
|
|
3259
2485
|
def __init__(self, p=1, margin=1.0, reduction="mean"):
|
|
@@ -3264,6 +2490,9 @@ class MultiMarginLoss(Primitive):
|
|
|
3264
2490
|
self.reduction = validator.check_string(reduction, ['none', 'sum', 'mean'], 'reduction', self.name)
|
|
3265
2491
|
self.init_prim_io_names(inputs=['x', 'target', 'weight'], outputs=['y'])
|
|
3266
2492
|
|
|
2493
|
+
def __call__(self, x, target, weight=None):
|
|
2494
|
+
return super().__call__(x, target, weight)
|
|
2495
|
+
|
|
3267
2496
|
|
|
3268
2497
|
class SoftMarginLoss(Primitive):
|
|
3269
2498
|
r"""
|
|
@@ -3412,7 +2641,9 @@ class RNNTLoss(PrimitiveWithInfer):
|
|
|
3412
2641
|
blank_label (int): blank label. Default: ``0`` .
|
|
3413
2642
|
|
|
3414
2643
|
Inputs:
|
|
3415
|
-
- **acts** (Tensor) - Tensor of shape :math:`(B, T, U, V)
|
|
2644
|
+
- **acts** (Tensor) - Tensor of shape :math:`(B, T, U, V)`, where :math:`B` is batch,
|
|
2645
|
+
:math:`T` is sequence length, :math:`U` is label length and :math:`V` is output dim.
|
|
2646
|
+
Data type must be float16 or float32.
|
|
3416
2647
|
- **labels** (Tensor) - Tensor of shape :math:`(B, U-1)`. Data type is int32.
|
|
3417
2648
|
- **input_lengths** (Tensor) - Tensor of shape :math:`(B,)`. Data type is int32.
|
|
3418
2649
|
- **label_lengths** (Tensor) - Tensor of shape :math:`(B,)`. Data type is int32.
|
|
@@ -3714,107 +2945,34 @@ class ApplyCenteredRMSProp(Primitive):
|
|
|
3714
2945
|
|
|
3715
2946
|
Examples:
|
|
3716
2947
|
>>> import numpy as np
|
|
3717
|
-
>>> from mindspore import Tensor, nn, ops, Parameter
|
|
3718
|
-
>>> class Net(nn.Cell):
|
|
3719
|
-
... def __init__(self):
|
|
3720
|
-
... super(Net, self).__init__()
|
|
3721
|
-
... self.apply_centerd_rms_prop = ops.ApplyCenteredRMSProp()
|
|
3722
|
-
... self.var = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="var")
|
|
3723
|
-
...
|
|
3724
|
-
... def construct(self, mean_grad, mean_square, moment, grad, decay, momentum, epsilon, lr):
|
|
3725
|
-
... out = self.apply_centerd_rms_prop(self.var, mean_grad, mean_square, moment, grad,
|
|
3726
|
-
... lr, decay, momentum, epsilon)
|
|
3727
|
-
... return out
|
|
3728
|
-
...
|
|
3729
|
-
>>> net = Net()
|
|
3730
|
-
>>> mean_grad = Tensor(np.ones([2, 2]).astype(np.float32))
|
|
3731
|
-
>>> mean_square = Tensor(np.ones([2, 2]).astype(np.float32))
|
|
3732
|
-
>>> moment = Tensor(np.ones([2, 2]).astype(np.float32))
|
|
3733
|
-
>>> grad = Tensor(np.ones([2, 2]).astype(np.float32))
|
|
3734
|
-
>>> output = net(mean_grad, mean_square, moment, grad, 0.0, 1e-10, 0.001, 0.01)
|
|
3735
|
-
>>> print(net.var.asnumpy())
|
|
3736
|
-
[[0.68377227 0.68377227]
|
|
3737
|
-
[0.68377227 0.68377227]]
|
|
3738
|
-
"""
|
|
3739
|
-
|
|
3740
|
-
@prim_attr_register
|
|
3741
|
-
def __init__(self, use_locking=False):
|
|
3742
|
-
"""Initialize ApplyCenteredRMSProp."""
|
|
3743
|
-
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
|
|
3744
|
-
self.add_prim_attr('side_effect_mem', True)
|
|
3745
|
-
|
|
3746
|
-
|
|
3747
|
-
class LayerNorm(Primitive):
|
|
3748
|
-
r"""
|
|
3749
|
-
Applies the Layer Normalization to the input tensor.
|
|
3750
|
-
|
|
3751
|
-
This operator will normalize the input tensor on given axis. LayerNorm is described in the paper
|
|
3752
|
-
`Layer Normalization <https://arxiv.org/abs/1607.06450>`_.
|
|
3753
|
-
|
|
3754
|
-
.. math::
|
|
3755
|
-
y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
|
|
3756
|
-
|
|
3757
|
-
where :math:`\gamma` is scale, :math:`\beta` is bias, :math:`\epsilon` is epsilon.
|
|
3758
|
-
|
|
3759
|
-
Args:
|
|
3760
|
-
begin_norm_axis (int): The begin axis of the `input_x` to apply LayerNorm,
|
|
3761
|
-
the value must be in [-1, rank(input_x)). Default: ``1`` .
|
|
3762
|
-
begin_params_axis (int): The begin axis of the parameter input (`gamma`, `beta`) to
|
|
3763
|
-
apply LayerNorm, the value must be in [-1, rank(input_x)). Default: ``1`` .
|
|
3764
|
-
epsilon (float): A value added to the denominator for numerical stability(:math:`\epsilon`). Default: ``1e-7`` .
|
|
3765
|
-
|
|
3766
|
-
Inputs:
|
|
3767
|
-
- **input_x** (Tensor) - Tensor of shape :math:`(N, \ldots)`.
|
|
3768
|
-
The input of LayerNorm. Supported dtypes: float16, float32, float64.
|
|
3769
|
-
- **gamma** (Tensor) - Tensor of shape :math:`(P_\text{begin_params_axis}, \ldots, P_\text{rank(input_x)-1})`.
|
|
3770
|
-
The learnable parameter :math:`\gamma` as the scale on norm. Supported dtypes: float16, float32, float64.
|
|
3771
|
-
- **beta** (Tensor) - Tensor of shape :math:`(P_\text{begin_params_axis}, \ldots, P_\text{rank(input_x)-1})`.
|
|
3772
|
-
The learnable parameter :math:`\beta` as the scale on norm. Supported dtypes: float16, float32, float64.
|
|
3773
|
-
|
|
3774
|
-
Outputs:
|
|
3775
|
-
tuple[Tensor], tuple of 3 tensors, the normalized input and the updated parameters.
|
|
3776
|
-
|
|
3777
|
-
- **output_x** (Tensor) - The normalized input, has the same type and shape as the `input_x`.
|
|
3778
|
-
- **mean** (Tensor) - The first `begin_norm_axis` dimensions of `mean` shape is the same as `input_x`,
|
|
3779
|
-
and the remaining dimensions are 1. Suppose the shape of the `input_x` is :math:`(x_1, x_2, \ldots, x_R)`,
|
|
3780
|
-
the shape of the `mean` is :math:`(x_1, \ldots, x_{begin\_params\_axis}, 1, \ldots, 1)`
|
|
3781
|
-
(when `begin_params_axis=0`, the shape of `mean` is :math:`(1, \ldots, 1)` ).
|
|
3782
|
-
- **variance** (Tensor) - Shape is the same as `mean` .
|
|
3783
|
-
|
|
3784
|
-
Raises:
|
|
3785
|
-
TypeError: If `begin_norm_axis` or `begin_params_axis` is not an int.
|
|
3786
|
-
TypeError: If `epsilon` is not a float.
|
|
3787
|
-
TypeError: If `input_x`, `gamma` or `beta` is not a Tensor.
|
|
3788
|
-
|
|
3789
|
-
Supported Platforms:
|
|
3790
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3791
|
-
|
|
3792
|
-
Examples:
|
|
3793
|
-
>>> import mindspore
|
|
3794
|
-
>>> import numpy as np
|
|
3795
|
-
>>> from mindspore import Tensor, ops
|
|
3796
|
-
>>> input_x = Tensor(np.array([[1, 2, 3], [1, 2, 3]]), mindspore.float32)
|
|
3797
|
-
>>> gamma = Tensor(np.ones([3]), mindspore.float32)
|
|
3798
|
-
>>> beta = Tensor(np.ones([3]), mindspore.float32)
|
|
3799
|
-
>>> layer_norm = ops.LayerNorm()
|
|
3800
|
-
>>> output, mean, variance = layer_norm(input_x, gamma, beta)
|
|
3801
|
-
>>> print(output)
|
|
3802
|
-
[[-0.2247448 1. 2.2247448]
|
|
3803
|
-
[-0.2247448 1. 2.2247448]]
|
|
3804
|
-
>>> print(mean)
|
|
3805
|
-
[[2.]
|
|
3806
|
-
[2.]]
|
|
3807
|
-
>>> print(variance)
|
|
3808
|
-
[[0.6666667]
|
|
3809
|
-
[0.6666667]]
|
|
2948
|
+
>>> from mindspore import Tensor, nn, ops, Parameter
|
|
2949
|
+
>>> class Net(nn.Cell):
|
|
2950
|
+
... def __init__(self):
|
|
2951
|
+
... super(Net, self).__init__()
|
|
2952
|
+
... self.apply_centerd_rms_prop = ops.ApplyCenteredRMSProp()
|
|
2953
|
+
... self.var = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="var")
|
|
2954
|
+
...
|
|
2955
|
+
... def construct(self, mean_grad, mean_square, moment, grad, decay, momentum, epsilon, lr):
|
|
2956
|
+
... out = self.apply_centerd_rms_prop(self.var, mean_grad, mean_square, moment, grad,
|
|
2957
|
+
... lr, decay, momentum, epsilon)
|
|
2958
|
+
... return out
|
|
2959
|
+
...
|
|
2960
|
+
>>> net = Net()
|
|
2961
|
+
>>> mean_grad = Tensor(np.ones([2, 2]).astype(np.float32))
|
|
2962
|
+
>>> mean_square = Tensor(np.ones([2, 2]).astype(np.float32))
|
|
2963
|
+
>>> moment = Tensor(np.ones([2, 2]).astype(np.float32))
|
|
2964
|
+
>>> grad = Tensor(np.ones([2, 2]).astype(np.float32))
|
|
2965
|
+
>>> output = net(mean_grad, mean_square, moment, grad, 0.0, 1e-10, 0.001, 0.01)
|
|
2966
|
+
>>> print(net.var.asnumpy())
|
|
2967
|
+
[[0.68377227 0.68377227]
|
|
2968
|
+
[0.68377227 0.68377227]]
|
|
3810
2969
|
"""
|
|
3811
2970
|
|
|
3812
2971
|
@prim_attr_register
|
|
3813
|
-
def __init__(self,
|
|
3814
|
-
"""Initialize
|
|
3815
|
-
validator.check_value_type(
|
|
3816
|
-
|
|
3817
|
-
validator.check_value_type('epsilon', epsilon, [float], self.name)
|
|
2972
|
+
def __init__(self, use_locking=False):
|
|
2973
|
+
"""Initialize ApplyCenteredRMSProp."""
|
|
2974
|
+
self.use_locking = validator.check_value_type("use_locking", use_locking, [bool], self.name)
|
|
2975
|
+
self.add_prim_attr('side_effect_mem', True)
|
|
3818
2976
|
|
|
3819
2977
|
|
|
3820
2978
|
class L2Normalize(Primitive):
|
|
@@ -3833,8 +2991,9 @@ class L2Normalize(Primitive):
|
|
|
3833
2991
|
On Ascend, input data type of float64 is currently not supported.
|
|
3834
2992
|
|
|
3835
2993
|
Args:
|
|
3836
|
-
axis (Union[list(int), tuple(int), int]): Specify the axis for calculating the L2 norm.
|
|
3837
|
-
|
|
2994
|
+
axis (Union[list(int), tuple(int), int], optional): Specify the axis for calculating the L2 norm.
|
|
2995
|
+
Default: ``0`` .
|
|
2996
|
+
epsilon (float, optional): A small value added for numerical stability. Default: ``1e-4`` .
|
|
3838
2997
|
|
|
3839
2998
|
Inputs:
|
|
3840
2999
|
- **x** (Tensor) - Input to compute the normalization. Tensor of shape :math:`(N, *)`,
|
|
@@ -3879,49 +3038,6 @@ class L2Normalize(Primitive):
|
|
|
3879
3038
|
self.axis = axis
|
|
3880
3039
|
|
|
3881
3040
|
|
|
3882
|
-
class ResizeBilinear(PrimitiveWithInfer):
|
|
3883
|
-
r"""
|
|
3884
|
-
This API is deprecated, please use the :class:`mindspore.ops.ResizeBilinearV2` instead.
|
|
3885
|
-
For general resizing with other interpolation methods, refer to :func:`mindspore.ops.interpolate` for more details.
|
|
3886
|
-
|
|
3887
|
-
Note:
|
|
3888
|
-
Dynamic shape feature is not supported for now.
|
|
3889
|
-
|
|
3890
|
-
Supported Platforms:
|
|
3891
|
-
Deprecated
|
|
3892
|
-
"""
|
|
3893
|
-
|
|
3894
|
-
@prim_attr_register
|
|
3895
|
-
def __init__(self, size, align_corners=False, half_pixel_centers=False):
|
|
3896
|
-
"""Initialize ResizeBilinear."""
|
|
3897
|
-
validator.check_value_type("size", size, [tuple, list], self.name)
|
|
3898
|
-
validator.check_equal_int(len(size), 2, "size len", self.name)
|
|
3899
|
-
for item in size:
|
|
3900
|
-
validator.check_positive_int(item, 'size item', self.name)
|
|
3901
|
-
validator.check_value_type("size item", item, int, self.name)
|
|
3902
|
-
self.align_corners = validator.check_value_type("align_corners", align_corners, [bool], self.name)
|
|
3903
|
-
self.half_pixel_centers = validator.check_value_type("half_pixel_centers",
|
|
3904
|
-
half_pixel_centers, [bool], self.name)
|
|
3905
|
-
if half_pixel_centers and align_corners:
|
|
3906
|
-
raise ValueError(f"If half_pixel_centers is True, align_corners must be False, but got {align_corners}")
|
|
3907
|
-
for i, value in enumerate(size):
|
|
3908
|
-
validator.check_positive_int(value, f'{i}th value of size', self.name)
|
|
3909
|
-
|
|
3910
|
-
def infer_shape(self, input_shape):
|
|
3911
|
-
validator.check("dimension of input", len(input_shape), "", 4, validator.EQ, self.name)
|
|
3912
|
-
input_shape = list(input_shape)
|
|
3913
|
-
batch, channel, _, _ = input_shape
|
|
3914
|
-
out_shape = [batch, channel]
|
|
3915
|
-
for i in self.size:
|
|
3916
|
-
out_shape.append(int(i))
|
|
3917
|
-
return out_shape
|
|
3918
|
-
|
|
3919
|
-
def infer_dtype(self, input_dtype):
|
|
3920
|
-
validator.check_tensor_dtype_valid('input_dtype', input_dtype, [mstype.float16, mstype.float32],
|
|
3921
|
-
self.name)
|
|
3922
|
-
return input_dtype
|
|
3923
|
-
|
|
3924
|
-
|
|
3925
3041
|
class UpsampleTrilinear3D(Primitive):
|
|
3926
3042
|
r"""
|
|
3927
3043
|
Performs upsampling with trilinear interpolation across 3dims for 5dim input Tensor.
|
|
@@ -4000,145 +3116,6 @@ class UpsampleTrilinear3D(Primitive):
|
|
|
4000
3116
|
self.add_prim_attr('align_corners', self.align_corners)
|
|
4001
3117
|
|
|
4002
3118
|
|
|
4003
|
-
class OneHot(Primitive):
|
|
4004
|
-
r"""
|
|
4005
|
-
Computes a one-hot tensor.
|
|
4006
|
-
|
|
4007
|
-
The locations represented by indices in `indices` take value `on_value`, while all
|
|
4008
|
-
other locations take value `off_value`.
|
|
4009
|
-
|
|
4010
|
-
Note:
|
|
4011
|
-
If the input indices is rank `N`, the output will have rank `N+1`. The new axis is created at dimension `axis`.
|
|
4012
|
-
On Ascend, if `on_value` is Int64 dtype, `indices` must be Int64 dtype.
|
|
4013
|
-
|
|
4014
|
-
Args:
|
|
4015
|
-
axis (int): Position to insert the value. e.g. If shape of `indices` is :math:`(N, C)`, and `axis` is -1,
|
|
4016
|
-
the output shape will be :math:`(N, C, D)`, If `axis` is 0, the output shape will be :math:`(D, N, C)`.
|
|
4017
|
-
Default: ``-1`` .
|
|
4018
|
-
|
|
4019
|
-
Inputs:
|
|
4020
|
-
- **indices** (Tensor) - A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
|
|
4021
|
-
Data type must be int32 or int64.
|
|
4022
|
-
- **depth** (int) - A scalar defining the depth of the one-hot dimension.
|
|
4023
|
-
- **on_value** (Tensor) - A value to fill in output when `indices[j] = i`. Data type must be int32, int64,
|
|
4024
|
-
float16 or float32.
|
|
4025
|
-
- **off_value** (Tensor) - A value to fill in output when `indices[j] != i`.
|
|
4026
|
-
It has the same data type as `on_value`.
|
|
4027
|
-
|
|
4028
|
-
Outputs:
|
|
4029
|
-
Tensor, one-hot tensor. Tensor of shape :math:`(X_0, \ldots, X_{axis}, \text{depth} ,X_{axis+1}, \ldots, X_n)`,
|
|
4030
|
-
and it has the same data type as `on_value`.
|
|
4031
|
-
|
|
4032
|
-
Raises:
|
|
4033
|
-
TypeError: If `axis` or `depth` is not an int.
|
|
4034
|
-
TypeError: If dtype of `indices` is not int32 or int64.
|
|
4035
|
-
TypeError: If `indices`, `on_value` or `off_value` is not a Tensor.
|
|
4036
|
-
ValueError: If `axis` is not in range [-1, len(indices_shape)].
|
|
4037
|
-
ValueError: If `depth` is less than 0.
|
|
4038
|
-
|
|
4039
|
-
Supported Platforms:
|
|
4040
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4041
|
-
|
|
4042
|
-
Examples:
|
|
4043
|
-
>>> import mindspore
|
|
4044
|
-
>>> import numpy as np
|
|
4045
|
-
>>> from mindspore import Tensor, ops
|
|
4046
|
-
>>> indices = Tensor(np.array([0, 1, 2]), mindspore.int32)
|
|
4047
|
-
>>> depth, on_value, off_value = 3, Tensor(1.0, mindspore.float32), Tensor(0.0, mindspore.float32)
|
|
4048
|
-
>>> onehot = ops.OneHot()
|
|
4049
|
-
>>> output = onehot(indices, depth, on_value, off_value)
|
|
4050
|
-
>>> print(output)
|
|
4051
|
-
[[1. 0. 0.]
|
|
4052
|
-
[0. 1. 0.]
|
|
4053
|
-
[0. 0. 1.]]
|
|
4054
|
-
"""
|
|
4055
|
-
|
|
4056
|
-
@prim_attr_register
|
|
4057
|
-
def __init__(self, axis=-1):
|
|
4058
|
-
"""Initialize OneHot."""
|
|
4059
|
-
self.init_prim_io_names(inputs=['indices', 'depth', 'on_value', 'off_value'], outputs=['output'])
|
|
4060
|
-
validator.check_value_type("axis", axis, [int], self.name)
|
|
4061
|
-
|
|
4062
|
-
|
|
4063
|
-
class GeLU(Primitive):
|
|
4064
|
-
r"""
|
|
4065
|
-
Gaussian Error Linear Units activation function.
|
|
4066
|
-
|
|
4067
|
-
GeLU is described in the paper `Gaussian Error Linear Units (GELUs) <https://arxiv.org/abs/1606.08415>`_.
|
|
4068
|
-
And also please refer to `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding
|
|
4069
|
-
<https://arxiv.org/abs/1810.04805>`_.
|
|
4070
|
-
|
|
4071
|
-
GeLU is defined as follows:
|
|
4072
|
-
|
|
4073
|
-
.. math::
|
|
4074
|
-
GELU(x_i) = x_i*P(X < x_i)
|
|
4075
|
-
|
|
4076
|
-
where :math:`P` is the cumulative distribution function of the standard Gaussian distribution,
|
|
4077
|
-
:math:`x_i` is the input element.
|
|
4078
|
-
|
|
4079
|
-
Inputs:
|
|
4080
|
-
- **x** (Tensor) - The input of the activation function GeLU, the data type is float16, float32 or float64.
|
|
4081
|
-
|
|
4082
|
-
Outputs:
|
|
4083
|
-
Tensor, with the same type and shape as `x`.
|
|
4084
|
-
|
|
4085
|
-
Raises:
|
|
4086
|
-
TypeError: If `x` is not a Tensor.
|
|
4087
|
-
TypeError: If dtype of `x` is not float16, float32 or float64.
|
|
4088
|
-
|
|
4089
|
-
Supported Platforms:
|
|
4090
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4091
|
-
|
|
4092
|
-
Examples:
|
|
4093
|
-
>>> import mindspore
|
|
4094
|
-
>>> import numpy as np
|
|
4095
|
-
>>> from mindspore import Tensor, ops
|
|
4096
|
-
>>> x = Tensor(np.array([1.0, 2.0, 3.0]), mindspore.float32)
|
|
4097
|
-
>>> gelu = ops.GeLU()
|
|
4098
|
-
>>> result = gelu(x)
|
|
4099
|
-
>>> print(result)
|
|
4100
|
-
[0.841192 1.9545976 2.9963627]
|
|
4101
|
-
"""
|
|
4102
|
-
|
|
4103
|
-
@prim_attr_register
|
|
4104
|
-
def __init__(self):
|
|
4105
|
-
"""Initialize GeLU"""
|
|
4106
|
-
self.init_prim_io_names(inputs=['x'], outputs=['output'])
|
|
4107
|
-
|
|
4108
|
-
|
|
4109
|
-
class FastGeLU(Primitive):
|
|
4110
|
-
r"""
|
|
4111
|
-
Fast Gaussian Error Linear Units activation function.
|
|
4112
|
-
|
|
4113
|
-
Refer to :func:`mindspore.ops.fast_gelu` for more details.
|
|
4114
|
-
|
|
4115
|
-
Inputs:
|
|
4116
|
-
- **x** (Tensor) - Input to compute the FastGeLU with data type of float16 or float32.
|
|
4117
|
-
|
|
4118
|
-
Outputs:
|
|
4119
|
-
Tensor, with the same type and shape as `x`.
|
|
4120
|
-
|
|
4121
|
-
Supported Platforms:
|
|
4122
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4123
|
-
|
|
4124
|
-
Examples:
|
|
4125
|
-
>>> import mindspore
|
|
4126
|
-
>>> import numpy as np
|
|
4127
|
-
>>> from mindspore import Tensor, ops
|
|
4128
|
-
>>> x = Tensor(np.array([[-1.0, 4.0, -8.0], [2.0, -5.0, 9.0]]), mindspore.float32)
|
|
4129
|
-
>>> fast_gelu = ops.FastGeLU()
|
|
4130
|
-
>>> output = fast_gelu(x)
|
|
4131
|
-
>>> print(output)
|
|
4132
|
-
[[-1.5418735e-01 3.9921875e+00 -9.7473649e-06]
|
|
4133
|
-
[ 1.9375000e+00 -1.0052517e-03 8.9824219e+00]]
|
|
4134
|
-
"""
|
|
4135
|
-
|
|
4136
|
-
@prim_attr_register
|
|
4137
|
-
def __init__(self):
|
|
4138
|
-
"""Initialize FastGeLU."""
|
|
4139
|
-
self.init_prim_io_names(inputs=['x'], outputs=['output'])
|
|
4140
|
-
|
|
4141
|
-
|
|
4142
3119
|
class GetNext(Primitive):
|
|
4143
3120
|
"""
|
|
4144
3121
|
Returns the next element in the dataset queue.
|
|
@@ -4193,55 +3170,6 @@ class GetNext(Primitive):
|
|
|
4193
3170
|
validator.check_value_type("output_num", output_num, [int], self.name)
|
|
4194
3171
|
|
|
4195
3172
|
|
|
4196
|
-
class PReLU(PrimitiveWithInfer):
|
|
4197
|
-
r"""
|
|
4198
|
-
Parametric Rectified Linear Unit activation function.
|
|
4199
|
-
|
|
4200
|
-
Refer to :func:`mindspore.ops.prelu` for more details.
|
|
4201
|
-
|
|
4202
|
-
Inputs:
|
|
4203
|
-
- **x** (Tensor) - The input Tensor of the activation function. The data type is float16 or float32.
|
|
4204
|
-
The shape is :math:`(N, C, *)` where :math:`*` means, any number of additional dimensions.
|
|
4205
|
-
- **weight** (Tensor) - Weight Tensor. The data type is float16 or float32.
|
|
4206
|
-
The weight can only be a vector, and the length is the same as the number of channels C of the `input_x`.
|
|
4207
|
-
On GPU devices, when the input is a scalar, the shape is 1.
|
|
4208
|
-
|
|
4209
|
-
Outputs:
|
|
4210
|
-
Tensor, with the same type as `x`.
|
|
4211
|
-
|
|
4212
|
-
Supported Platforms:
|
|
4213
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4214
|
-
|
|
4215
|
-
Examples:
|
|
4216
|
-
>>> import mindspore
|
|
4217
|
-
>>> import numpy as np
|
|
4218
|
-
>>> from mindspore import Tensor, nn, ops
|
|
4219
|
-
>>> class Net(nn.Cell):
|
|
4220
|
-
... def __init__(self):
|
|
4221
|
-
... super(Net, self).__init__()
|
|
4222
|
-
... self.prelu = ops.PReLU()
|
|
4223
|
-
... def construct(self, x, weight):
|
|
4224
|
-
... result = self.prelu(x, weight)
|
|
4225
|
-
... return result
|
|
4226
|
-
...
|
|
4227
|
-
>>> x = Tensor(np.arange(-6, 6).reshape((2, 3, 2)), mindspore.float32)
|
|
4228
|
-
>>> weight = Tensor(np.array([0.1, 0.6, -0.3]), mindspore.float32)
|
|
4229
|
-
>>> net = Net()
|
|
4230
|
-
>>> output = net(x, weight)
|
|
4231
|
-
>>> print(output)
|
|
4232
|
-
[[[-0.60 -0.50]
|
|
4233
|
-
[-2.40 -1.80]
|
|
4234
|
-
[ 0.60 0.30]]
|
|
4235
|
-
[[ 0.00 1.00]
|
|
4236
|
-
[ 2.00 3.00]
|
|
4237
|
-
[ 4.0 5.00]]]
|
|
4238
|
-
"""
|
|
4239
|
-
|
|
4240
|
-
@prim_attr_register
|
|
4241
|
-
def __init__(self):
|
|
4242
|
-
self.init_prim_io_names(inputs=['x', 'weight'], outputs=['output'])
|
|
4243
|
-
|
|
4244
|
-
|
|
4245
3173
|
class LSTM(Primitive):
|
|
4246
3174
|
r"""
|
|
4247
3175
|
Performs the Long Short-Term Memory (LSTM) on the input.
|
|
@@ -4729,18 +3657,18 @@ class ComputeAccidentalHits(Primitive):
|
|
|
4729
3657
|
num_true (int): The number of target classes per training example. Default: ``1`` .
|
|
4730
3658
|
|
|
4731
3659
|
Inputs:
|
|
4732
|
-
- **true_classes** (Tensor) - The target classes. With data type of
|
|
3660
|
+
- **true_classes** (Tensor) - The target classes. With data type of int64
|
|
4733
3661
|
and shape :math:`(batch\_size, num\_true)`.
|
|
4734
3662
|
- **sampled_candidates** (Tensor) - The Candidate sampling results of operators, types of training samples,
|
|
4735
|
-
with data type of
|
|
3663
|
+
with data type of int64 and shape :math:`(num\_sampled, )`.
|
|
4736
3664
|
|
|
4737
3665
|
Outputs:
|
|
4738
3666
|
Tuple of 3 Tensors.
|
|
4739
3667
|
|
|
4740
3668
|
- **indices** (Tensor) - A Tensor with shape :math:`(num\_accidental\_hits, )`,
|
|
4741
|
-
with
|
|
3669
|
+
with data type of int32.
|
|
4742
3670
|
- **ids** (Tensor) - A Tensor with shape :math:`(num\_accidental\_hits, )`,
|
|
4743
|
-
with
|
|
3671
|
+
with data type of int64.
|
|
4744
3672
|
- **weights** (Tensor) - A Tensor with shape :math:`(num\_accidental\_hits, )`, with the type float32.
|
|
4745
3673
|
|
|
4746
3674
|
Raises:
|
|
@@ -4866,210 +3794,96 @@ class Adam(Primitive):
|
|
|
4866
3794
|
:math:`t` represents updating step while :math:`beta_1^t(\beta_1^{t})` and :math:`beta_2^t(\beta_2^{t})`
|
|
4867
3795
|
represent `beta1_power` and `beta2_power`, :math:`\alpha` represents `learning_rate`, :math:`w` represents `var`,
|
|
4868
3796
|
:math:`\epsilon` represents
|
|
4869
|
-
`epsilon`.
|
|
4870
|
-
|
|
4871
|
-
Inputs of `var`, `m`, `v` and `gradient`
|
|
4872
|
-
comply with the implicit type conversion rules to make the data types consistent.
|
|
4873
|
-
If they have different data types, the lower priority data type will be converted to
|
|
4874
|
-
the relatively highest priority data type.
|
|
4875
|
-
|
|
4876
|
-
Args:
|
|
4877
|
-
use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.
|
|
4878
|
-
If ``True`` , updates of the var, m, and v tensors will be protected by a lock.
|
|
4879
|
-
If ``False`` , the result is unpredictable. Default: ``False`` .
|
|
4880
|
-
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
|
|
4881
|
-
If ``True`` , update the gradients using NAG.
|
|
4882
|
-
If ``False`` , update the gradients without using NAG. Default: ``False`` .
|
|
4883
|
-
|
|
4884
|
-
Inputs:
|
|
4885
|
-
- **var** (Parameter) - Weights to be updated. The shape is :math:`(N, *)` where :math:`*` means,
|
|
4886
|
-
any number of additional dimensions. The data type can be float16 or float32.
|
|
4887
|
-
- **m** (Parameter) - The 1st moment vector in the updating formula,
|
|
4888
|
-
the shape should be the same as `var`.
|
|
4889
|
-
- **v** (Parameter) - the 2nd moment vector in the updating formula,
|
|
4890
|
-
the shape should be the same as `var`.
|
|
4891
|
-
- **beta1_power** (float) - :math:`beta_1^t(\beta_1^{t})` in the updating formula.
|
|
4892
|
-
- **beta2_power** (float) - :math:`beta_2^t(\beta_2^{t})` in the updating formula.
|
|
4893
|
-
- **lr** (float) - :math:`l` in the updating formula. The paper suggested value is :math:`10^{-8}`.
|
|
4894
|
-
- **beta1** (float) - The exponential decay rate for the 1st moment estimations.
|
|
4895
|
-
The paper suggested value is :math:`0.9`.
|
|
4896
|
-
- **beta2** (float) - The exponential decay rate for the 2nd moment estimations.
|
|
4897
|
-
The paper suggested value is :math:`0.999`.
|
|
4898
|
-
- **epsilon** (float) - Term added to the denominator to improve numerical stability.
|
|
4899
|
-
- **gradient** (Tensor) - Gradient, has the same shape and data type as `var`.
|
|
4900
|
-
|
|
4901
|
-
Outputs:
|
|
4902
|
-
Tuple of 3 Tensor, the updated parameters.
|
|
4903
|
-
|
|
4904
|
-
- **var** (Tensor) - The same shape and data type as Inputs `var`.
|
|
4905
|
-
- **m** (Tensor) - The same shape and data type as Inputs `m`.
|
|
4906
|
-
- **v** (Tensor) - The same shape and data type as Inputs `v`.
|
|
4907
|
-
|
|
4908
|
-
Raises:
|
|
4909
|
-
TypeError: If neither `use_locking` nor `use_nesterov` is a bool.
|
|
4910
|
-
TypeError: If `var`, `m` or `v` is not a Parameter.
|
|
4911
|
-
TypeError: If `beta1_power`, `beta2_power1`, `lr`, `beta1`, `beta2`, `epsilon` or `gradient` is not a Tensor.
|
|
4912
|
-
|
|
4913
|
-
Supported Platforms:
|
|
4914
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4915
|
-
|
|
4916
|
-
Examples:
|
|
4917
|
-
>>> import mindspore
|
|
4918
|
-
>>> import numpy as np
|
|
4919
|
-
>>> from mindspore import Tensor, nn, ops
|
|
4920
|
-
>>> from mindspore import Parameter
|
|
4921
|
-
>>> class Net(nn.Cell):
|
|
4922
|
-
... def __init__(self):
|
|
4923
|
-
... super(Net, self).__init__()
|
|
4924
|
-
... self.apply_adam = ops.Adam()
|
|
4925
|
-
... self.var = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="var")
|
|
4926
|
-
... self.m = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="m")
|
|
4927
|
-
... self.v = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="v")
|
|
4928
|
-
... def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad):
|
|
4929
|
-
... out = self.apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2,
|
|
4930
|
-
... epsilon, grad)
|
|
4931
|
-
... return out
|
|
4932
|
-
...
|
|
4933
|
-
>>> net = Net()
|
|
4934
|
-
>>> gradient = Tensor(np.ones([2, 2]).astype(np.float32))
|
|
4935
|
-
>>> output = net(0.9, 0.999, 0.001, 0.9, 0.999, 1e-8, gradient)
|
|
4936
|
-
>>> print(net.var.asnumpy())
|
|
4937
|
-
[[0.9996838 0.9996838]
|
|
4938
|
-
[0.9996838 0.9996838]]
|
|
4939
|
-
"""
|
|
4940
|
-
__mindspore_signature__ = (
|
|
4941
|
-
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
|
|
4942
|
-
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1),
|
|
4943
|
-
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T2),
|
|
4944
|
-
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T3),
|
|
4945
|
-
sig.make_sig('beta2_power', dtype=sig.sig_dtype.T4),
|
|
4946
|
-
sig.make_sig('lr', dtype=sig.sig_dtype.T5),
|
|
4947
|
-
sig.make_sig('beta1', dtype=sig.sig_dtype.T6),
|
|
4948
|
-
sig.make_sig('beta2', dtype=sig.sig_dtype.T7),
|
|
4949
|
-
sig.make_sig('epsilon', dtype=sig.sig_dtype.T8),
|
|
4950
|
-
sig.make_sig('gradient', dtype=sig.sig_dtype.T)
|
|
4951
|
-
)
|
|
4952
|
-
|
|
4953
|
-
@prim_attr_register
|
|
4954
|
-
def __init__(self, use_locking=False, use_nesterov=False):
|
|
4955
|
-
"""Initialize Adam."""
|
|
4956
|
-
validator.check_value_type("use_locking", use_locking, [bool], self.name)
|
|
4957
|
-
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
|
|
4958
|
-
self.add_prim_attr('side_effect_mem', True)
|
|
4959
|
-
|
|
4960
|
-
|
|
4961
|
-
class AdamWeightDecay(Primitive):
|
|
4962
|
-
r"""
|
|
4963
|
-
Updates gradients by the Adaptive Moment Estimation algorithm with weight decay (AdamWeightDecay).
|
|
4964
|
-
|
|
4965
|
-
The Adam algorithm is proposed in `Adam: A Method for Stochastic Optimization <https://arxiv.org/abs/1412.6980>`_.
|
|
4966
|
-
The AdamWeightDecay variant was proposed in `Decoupled Weight Decay Regularization
|
|
4967
|
-
<https://arxiv.org/abs/1711.05101>`_.
|
|
4968
|
-
|
|
4969
|
-
The updating formulas are as follows,
|
|
4970
|
-
|
|
4971
|
-
.. math::
|
|
4972
|
-
\begin{array}{ll} \\
|
|
4973
|
-
m = \beta_1 * m + (1 - \beta_1) * g \\
|
|
4974
|
-
v = \beta_2 * v + (1 - \beta_2) * g * g \\
|
|
4975
|
-
update = \frac{m}{\sqrt{v} + \epsilon} \\
|
|
4976
|
-
update =
|
|
4977
|
-
\begin{cases}
|
|
4978
|
-
update + weight\_decay * w
|
|
4979
|
-
& \text{ if } weight\_decay > 0 \\
|
|
4980
|
-
update
|
|
4981
|
-
& \text{ otherwise }
|
|
4982
|
-
\end{cases} \\
|
|
4983
|
-
w = w - lr * update
|
|
4984
|
-
\end{array}
|
|
3797
|
+
`epsilon`.
|
|
4985
3798
|
|
|
4986
|
-
|
|
4987
|
-
|
|
4988
|
-
|
|
4989
|
-
|
|
3799
|
+
Inputs of `var`, `m`, `v` and `gradient`
|
|
3800
|
+
comply with the implicit type conversion rules to make the data types consistent.
|
|
3801
|
+
If they have different data types, the lower priority data type will be converted to
|
|
3802
|
+
the relatively highest priority data type.
|
|
4990
3803
|
|
|
4991
3804
|
Args:
|
|
4992
3805
|
use_locking (bool): Whether to enable a lock to protect variable tensors from being updated.
|
|
4993
3806
|
If ``True`` , updates of the var, m, and v tensors will be protected by a lock.
|
|
4994
3807
|
If ``False`` , the result is unpredictable. Default: ``False`` .
|
|
3808
|
+
use_nesterov (bool): Whether to use Nesterov Accelerated Gradient (NAG) algorithm to update the gradients.
|
|
3809
|
+
If ``True`` , update the gradients using NAG.
|
|
3810
|
+
If ``False`` , update the gradients without using NAG. Default: ``False`` .
|
|
4995
3811
|
|
|
4996
3812
|
Inputs:
|
|
4997
3813
|
- **var** (Parameter) - Weights to be updated. The shape is :math:`(N, *)` where :math:`*` means,
|
|
4998
3814
|
any number of additional dimensions. The data type can be float16 or float32.
|
|
4999
3815
|
- **m** (Parameter) - The 1st moment vector in the updating formula,
|
|
5000
|
-
|
|
5001
|
-
- **v** (Parameter) -
|
|
5002
|
-
|
|
5003
|
-
- **
|
|
5004
|
-
|
|
5005
|
-
- **
|
|
5006
|
-
|
|
5007
|
-
|
|
5008
|
-
|
|
5009
|
-
|
|
5010
|
-
|
|
5011
|
-
- **
|
|
5012
|
-
Default: ``0.0`` .
|
|
5013
|
-
- **gradient** (Tensor) - Gradient, has the same shape as `var`.
|
|
3816
|
+
the shape should be the same as `var`.
|
|
3817
|
+
- **v** (Parameter) - the 2nd moment vector in the updating formula,
|
|
3818
|
+
the shape should be the same as `var`.
|
|
3819
|
+
- **beta1_power** (float) - :math:`beta_1^t(\beta_1^{t})` in the updating formula.
|
|
3820
|
+
- **beta2_power** (float) - :math:`beta_2^t(\beta_2^{t})` in the updating formula.
|
|
3821
|
+
- **lr** (float) - :math:`l` in the updating formula. The paper suggested value is :math:`10^{-8}`.
|
|
3822
|
+
- **beta1** (float) - The exponential decay rate for the 1st moment estimations.
|
|
3823
|
+
The paper suggested value is :math:`0.9`.
|
|
3824
|
+
- **beta2** (float) - The exponential decay rate for the 2nd moment estimations.
|
|
3825
|
+
The paper suggested value is :math:`0.999`.
|
|
3826
|
+
- **epsilon** (float) - Term added to the denominator to improve numerical stability.
|
|
3827
|
+
- **gradient** (Tensor) - Gradient, has the same shape and data type as `var`.
|
|
5014
3828
|
|
|
5015
3829
|
Outputs:
|
|
5016
3830
|
Tuple of 3 Tensor, the updated parameters.
|
|
5017
3831
|
|
|
5018
|
-
- **var** (Tensor) - The same shape and data type as `var`.
|
|
5019
|
-
- **m** (Tensor) - The same shape and data type as `m`.
|
|
5020
|
-
- **v** (Tensor) - The same shape and data type as `v`.
|
|
3832
|
+
- **var** (Tensor) - The same shape and data type as Inputs `var`.
|
|
3833
|
+
- **m** (Tensor) - The same shape and data type as Inputs `m`.
|
|
3834
|
+
- **v** (Tensor) - The same shape and data type as Inputs `v`.
|
|
5021
3835
|
|
|
5022
3836
|
Raises:
|
|
5023
|
-
TypeError: If `use_locking` is
|
|
5024
|
-
TypeError: If `
|
|
5025
|
-
TypeError: If `
|
|
5026
|
-
TypeError: If `gradient` is not a Tensor.
|
|
5027
|
-
ValueError: If `eps` <= 0.
|
|
5028
|
-
ValueError: If `beta1`, `beta2` is not in range (0.0,1.0).
|
|
5029
|
-
ValueError: If `decay` < 0.
|
|
3837
|
+
TypeError: If neither `use_locking` nor `use_nesterov` is a bool.
|
|
3838
|
+
TypeError: If `var`, `m` or `v` is not a Parameter.
|
|
3839
|
+
TypeError: If `beta1_power`, `beta2_power1`, `lr`, `beta1`, `beta2`, `epsilon` or `gradient` is not a Tensor.
|
|
5030
3840
|
|
|
5031
3841
|
Supported Platforms:
|
|
5032
3842
|
``Ascend`` ``GPU`` ``CPU``
|
|
5033
3843
|
|
|
5034
3844
|
Examples:
|
|
3845
|
+
>>> import mindspore
|
|
5035
3846
|
>>> import numpy as np
|
|
5036
|
-
>>>
|
|
5037
|
-
>>> from mindspore import
|
|
3847
|
+
>>> from mindspore import Tensor, nn, ops
|
|
3848
|
+
>>> from mindspore import Parameter
|
|
5038
3849
|
>>> class Net(nn.Cell):
|
|
5039
3850
|
... def __init__(self):
|
|
5040
3851
|
... super(Net, self).__init__()
|
|
5041
|
-
... self.
|
|
3852
|
+
... self.apply_adam = ops.Adam()
|
|
5042
3853
|
... self.var = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="var")
|
|
5043
3854
|
... self.m = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="m")
|
|
5044
3855
|
... self.v = Parameter(Tensor(np.ones([2, 2]).astype(np.float32)), name="v")
|
|
5045
|
-
... def construct(self, lr, beta1, beta2, epsilon,
|
|
5046
|
-
... out = self.
|
|
5047
|
-
... epsilon,
|
|
3856
|
+
... def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad):
|
|
3857
|
+
... out = self.apply_adam(self.var, self.m, self.v, beta1_power, beta2_power, lr, beta1, beta2,
|
|
3858
|
+
... epsilon, grad)
|
|
5048
3859
|
... return out
|
|
3860
|
+
...
|
|
5049
3861
|
>>> net = Net()
|
|
5050
3862
|
>>> gradient = Tensor(np.ones([2, 2]).astype(np.float32))
|
|
5051
|
-
>>> output = net(0.001, 0.9, 0.999, 1e-8,
|
|
3863
|
+
>>> output = net(0.9, 0.999, 0.001, 0.9, 0.999, 1e-8, gradient)
|
|
5052
3864
|
>>> print(net.var.asnumpy())
|
|
5053
|
-
[[0.
|
|
5054
|
-
[0.
|
|
3865
|
+
[[0.9996838 0.9996838]
|
|
3866
|
+
[0.9996838 0.9996838]]
|
|
5055
3867
|
"""
|
|
5056
3868
|
__mindspore_signature__ = (
|
|
5057
3869
|
sig.make_sig('var', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
|
|
5058
|
-
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.
|
|
3870
|
+
sig.make_sig('m', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T1),
|
|
5059
3871
|
sig.make_sig('v', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T2),
|
|
5060
|
-
sig.make_sig('
|
|
5061
|
-
sig.make_sig('
|
|
5062
|
-
sig.make_sig('
|
|
5063
|
-
sig.make_sig('
|
|
5064
|
-
sig.make_sig('
|
|
3872
|
+
sig.make_sig('beta1_power', dtype=sig.sig_dtype.T3),
|
|
3873
|
+
sig.make_sig('beta2_power', dtype=sig.sig_dtype.T4),
|
|
3874
|
+
sig.make_sig('lr', dtype=sig.sig_dtype.T5),
|
|
3875
|
+
sig.make_sig('beta1', dtype=sig.sig_dtype.T6),
|
|
3876
|
+
sig.make_sig('beta2', dtype=sig.sig_dtype.T7),
|
|
3877
|
+
sig.make_sig('epsilon', dtype=sig.sig_dtype.T8),
|
|
5065
3878
|
sig.make_sig('gradient', dtype=sig.sig_dtype.T)
|
|
5066
3879
|
)
|
|
5067
3880
|
|
|
5068
3881
|
@prim_attr_register
|
|
5069
|
-
def __init__(self, use_locking=False):
|
|
5070
|
-
"""Initialize
|
|
5071
|
-
self.add_prim_attr('side_effect_mem', True)
|
|
3882
|
+
def __init__(self, use_locking=False, use_nesterov=False):
|
|
3883
|
+
"""Initialize Adam."""
|
|
5072
3884
|
validator.check_value_type("use_locking", use_locking, [bool], self.name)
|
|
3885
|
+
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.name)
|
|
3886
|
+
self.add_prim_attr('side_effect_mem', True)
|
|
5073
3887
|
|
|
5074
3888
|
|
|
5075
3889
|
class AdamNoUpdateParam(Primitive):
|
|
@@ -5632,16 +4446,20 @@ class KLDivLoss(Primitive):
|
|
|
5632
4446
|
Note:
|
|
5633
4447
|
- On Ascend, float64 dtype is not currently supported.
|
|
5634
4448
|
- The output aligns with the mathematical definition of Kullback-Leibler divergence
|
|
5635
|
-
only when `reduction` is set to 'batchmean'
|
|
4449
|
+
only when `reduction` is set to ``'batchmean'``.
|
|
4450
|
+
- On Ascend, the value of `reduction` must be one of ``'batchmean'``, ``'none'`` or ``'sum'``.
|
|
4451
|
+
- On GPU, the value of `reduction` must be one of ``'mean'``, ``'none'`` or ``'sum'``.
|
|
4452
|
+
- On CPU, the value of `reduction` must be one of ``'mean'``, ``'batchmean'``, ``'none'``
|
|
4453
|
+
or ``'sum'``.
|
|
5636
4454
|
|
|
5637
4455
|
Args:
|
|
5638
4456
|
reduction (str): Specifies the reduction to be applied to the output.
|
|
5639
4457
|
Default: ``'mean'`` .
|
|
5640
4458
|
|
|
5641
|
-
-
|
|
5642
|
-
-
|
|
5643
|
-
-
|
|
5644
|
-
|
|
4459
|
+
- ``'none'``: no reduction will be applied.
|
|
4460
|
+
- ``'mean'``: compute and return the mean of elements in the output.
|
|
4461
|
+
- ``'sum'``: the output elements will be summed.
|
|
4462
|
+
- ``'batchmean'``: average loss is taken over the batch, similar to the mean mode.
|
|
5645
4463
|
|
|
5646
4464
|
Inputs:
|
|
5647
4465
|
- **logits** (Tensor) - The input Tensor. The data type must be float16, float32 or float64.
|
|
@@ -6497,22 +5315,21 @@ class ApplyAddSign(Primitive):
|
|
|
6497
5315
|
is the last moment of :math:`m_{t+1}`, :math:`lr` represents scaling factor `lr`, :math:`g` represents `grad`,
|
|
6498
5316
|
:math:`\alpha` represents `alpha`, :math:`\beta` represents `beta`.
|
|
6499
5317
|
|
|
6500
|
-
|
|
5318
|
+
The data type of all inputs must be float16 or float32 on Ascend and float16, float32 or float64 on CPU and GPU.
|
|
5319
|
+
|
|
5320
|
+
Inputs of `var`, `accum` and `grad` , `sign_decay` and `beta` comply with the implicit type conversion rules
|
|
6501
5321
|
to make the data types consistent.
|
|
6502
5322
|
If they have different data types, the lower priority data type will be converted to
|
|
6503
5323
|
the relatively highest priority data type.
|
|
6504
|
-
The data type of inputs must be float16 or float32 on Ascend and float16, float32 or float64 on CPU and GPU.
|
|
6505
5324
|
|
|
6506
5325
|
Inputs:
|
|
6507
|
-
- **var** (Parameter) - Variable tensor to be updated.
|
|
5326
|
+
- **var** (Parameter) - Variable tensor to be updated.
|
|
6508
5327
|
The shape is :math:`(N, *)` where :math:`*` means, any number of additional dimensions.
|
|
6509
5328
|
- **m** (Parameter) - Variable tensor to be updated, has the same data type as `var`.
|
|
6510
5329
|
- **lr** (Union[Number, Tensor]) - The learning rate value, must be a scalar.
|
|
6511
|
-
|
|
6512
|
-
- **
|
|
6513
|
-
- **sign_decay** (Union[Number, Tensor]) - Must be a scalar. With float16, float32 or float64 data type.
|
|
5330
|
+
- **alpha** (Union[Number, Tensor]) - Must be a scalar.
|
|
5331
|
+
- **sign_decay** (Union[Number, Tensor]) - Must be a scalar.
|
|
6514
5332
|
- **beta** (Union[Number, Tensor]) - The exponential decay rate, must be a scalar.
|
|
6515
|
-
With float16, float32 or float64 data type.
|
|
6516
5333
|
- **grad** (Tensor) - A tensor of the same shape as `var`, for the gradient.
|
|
6517
5334
|
|
|
6518
5335
|
Outputs:
|
|
@@ -6522,7 +5339,8 @@ class ApplyAddSign(Primitive):
|
|
|
6522
5339
|
- **m** (Tensor) - The same shape and data type as `m`.
|
|
6523
5340
|
|
|
6524
5341
|
Raises:
|
|
6525
|
-
TypeError: If dtype of `var`, `lr
|
|
5342
|
+
TypeError: If dtype of `var`, `lr` and `alpha` is not float16, float32 or float64.
|
|
5343
|
+
TypeError: If dtype of `sign_decay` and `beta` are both not float16, float32 or float64.
|
|
6526
5344
|
TypeError: If `lr`, `alpha` or `sign_decay` is neither a Number nor a Tensor.
|
|
6527
5345
|
TypeError: If `grad` is not a Tensor.
|
|
6528
5346
|
TypeError: If the data type of `var`, `accum` and `grad` conversion of Parameter is not supported.
|
|
@@ -6829,9 +5647,12 @@ class LARSUpdate(PrimitiveWithInfer):
|
|
|
6829
5647
|
For more details, please refer to :class:`mindspore.nn.LARS`.
|
|
6830
5648
|
|
|
6831
5649
|
Args:
|
|
6832
|
-
epsilon (float): Term added to the denominator to improve numerical stability.
|
|
6833
|
-
|
|
6834
|
-
|
|
5650
|
+
epsilon (float, optional): Term added to the denominator to improve numerical stability.
|
|
5651
|
+
Default: ``1e-05`` .
|
|
5652
|
+
hyperpara (float, optional): Trust coefficient for calculating the local learning rate.
|
|
5653
|
+
Default: ``0.001`` .
|
|
5654
|
+
use_clip (bool, optional): Whether to use clip operation for calculating the local learning rate.
|
|
5655
|
+
Default: ``False`` .
|
|
6835
5656
|
|
|
6836
5657
|
Inputs:
|
|
6837
5658
|
- **weight** (Tensor) - A tensor, representing the weight.
|
|
@@ -7033,7 +5854,7 @@ class SparseApplyFtrl(Primitive):
|
|
|
7033
5854
|
Examples:
|
|
7034
5855
|
>>> import mindspore
|
|
7035
5856
|
>>> import numpy as np
|
|
7036
|
-
>>> from mindspore import Tensor, nn, Parameter
|
|
5857
|
+
>>> from mindspore import Tensor, nn, Parameter, ops
|
|
7037
5858
|
>>> class SparseApplyFtrlNet(nn.Cell):
|
|
7038
5859
|
... def __init__(self):
|
|
7039
5860
|
... super(SparseApplyFtrlNet, self).__init__()
|
|
@@ -7131,69 +5952,6 @@ class SparseApplyFtrlV2(PrimitiveWithInfer):
|
|
|
7131
5952
|
return var_dtype, accum_dtype, linear_dtype
|
|
7132
5953
|
|
|
7133
5954
|
|
|
7134
|
-
class Dropout(PrimitiveWithCheck):
|
|
7135
|
-
r"""
|
|
7136
|
-
During training, randomly zeroes some of the elements of the input tensor
|
|
7137
|
-
with probability :math:`1 - keep\_prob` from a Bernoulli distribution. It plays the
|
|
7138
|
-
role of reducing neuron correlation and avoid overfitting.
|
|
7139
|
-
|
|
7140
|
-
Refer to :func:`mindspore.ops.dropout` for more details.
|
|
7141
|
-
|
|
7142
|
-
Args:
|
|
7143
|
-
keep_prob (float, optional): The keep rate, between 0 and 1, e.g. keep_prob = 0.9,
|
|
7144
|
-
means dropping out 10% of input units. Default: ``0.5`` .
|
|
7145
|
-
Seed0 (int, optional): Seed0 value for random generating. Default: ``0`` .
|
|
7146
|
-
Seed1 (int, optional): Seed1 value for random generating. Default: ``0`` .
|
|
7147
|
-
|
|
7148
|
-
Inputs:
|
|
7149
|
-
- **x** (Tensor) - The input Tensor of shape :math:`(*, N)`, with data type of float16, float32 or float64.
|
|
7150
|
-
|
|
7151
|
-
Outputs:
|
|
7152
|
-
- **output** (Tensor) - With the same shape and data type as `x`.
|
|
7153
|
-
- **mask** (Tensor) - The mask applied to `x`.
|
|
7154
|
-
|
|
7155
|
-
- On GPU and CPU, `mask` has the same shape and data type as `x`.
|
|
7156
|
-
- On Ascend, to achieve a better performance, it is denoted as a 1-D Tensor
|
|
7157
|
-
with Uint8 data type. It has shape :math:`(byte\_counts, )` where :math:`byte\_counts` is the
|
|
7158
|
-
number of bytes needed to mask the input `x`, :math:`byte\_counts` is calculated using the
|
|
7159
|
-
following formula:
|
|
7160
|
-
|
|
7161
|
-
.. math::
|
|
7162
|
-
|
|
7163
|
-
byte\_counts = \text{ceil}(\text{cumprod}(x.shape) / 128) * 16
|
|
7164
|
-
|
|
7165
|
-
If shape of `x` is :math:`(2, 3, 4, 5, 6)`, the shape of `mask` will be :math:`(96, )`.
|
|
7166
|
-
|
|
7167
|
-
Supported Platforms:
|
|
7168
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
7169
|
-
|
|
7170
|
-
Examples:
|
|
7171
|
-
>>> import mindspore
|
|
7172
|
-
>>> import numpy as np
|
|
7173
|
-
>>> from mindspore import Tensor, ops
|
|
7174
|
-
>>> dropout = ops.Dropout(keep_prob=0.5)
|
|
7175
|
-
>>> x = Tensor(np.ones([1, 2, 3, 4, 5]), mindspore.float32)
|
|
7176
|
-
>>> output, mask = dropout(x)
|
|
7177
|
-
>>> print(output.shape, mask.shape, mask.dtype)
|
|
7178
|
-
(1, 2, 3, 4, 5) (16,) UInt8
|
|
7179
|
-
"""
|
|
7180
|
-
|
|
7181
|
-
@prim_attr_register
|
|
7182
|
-
def __init__(self, keep_prob=0.5, Seed0=0, Seed1=0):
|
|
7183
|
-
"""Initialize Dropout."""
|
|
7184
|
-
self.seed0 = validator.check_value_type("Seed0", Seed0, [int], self.name)
|
|
7185
|
-
self.seed1 = validator.check_value_type("Seed1", Seed1, [int], self.name)
|
|
7186
|
-
self.keep_prob = validator.check_float_range(keep_prob, 0, 1, validator.INC_RIGHT, "keep_prob", self.name)
|
|
7187
|
-
self.add_prim_attr("side_effect_hidden", True)
|
|
7188
|
-
|
|
7189
|
-
def check_shape(self, x_shape):
|
|
7190
|
-
validator.check_int(len(x_shape), 1, validator.GE, "x_shape", self.name)
|
|
7191
|
-
|
|
7192
|
-
def check_dtype(self, x_dtype):
|
|
7193
|
-
valid_dtypes = (mstype.float16, mstype.float32, mstype.float64)
|
|
7194
|
-
validator.check_tensor_dtype_valid("x", x_dtype, valid_dtypes, self.name)
|
|
7195
|
-
|
|
7196
|
-
|
|
7197
5955
|
class Dropout2D(PrimitiveWithInfer):
|
|
7198
5956
|
r"""
|
|
7199
5957
|
During training, randomly zeroes some channels of the input tensor with probability :math:`1-keep\_prob`
|
|
@@ -7545,7 +6303,7 @@ class DynamicRNN(Primitive):
|
|
|
7545
6303
|
- **w** (Tensor) - Weight. Tensor of shape :math:`(input\_size + hidden\_size, 4 * hidden\_size)`.
|
|
7546
6304
|
The data type must be float16.
|
|
7547
6305
|
- **b** (Tensor) - Bias. Tensor of shape :math:`(4 * hidden\_size)`.
|
|
7548
|
-
The data type must be float16
|
|
6306
|
+
The data type must be float16.
|
|
7549
6307
|
- **seq_length** (Tensor) - The length of each batch. Tensor of shape :math:`(batch\_size, )`.
|
|
7550
6308
|
Only `None` is currently supported.
|
|
7551
6309
|
- **init_h** (Tensor) - Hidden state of initial time. Tensor of shape :math:`(1, batch\_size, hidden\_size)`.
|
|
@@ -7614,6 +6372,7 @@ class DynamicRNN(Primitive):
|
|
|
7614
6372
|
self.forget_bias = validator.check_value_type("forget_bias", forget_bias, [float], self.name)
|
|
7615
6373
|
self.cell_depth = validator.check_value_type("cell_depth", cell_depth, [int], self.name)
|
|
7616
6374
|
self.keep_prob = validator.check_value_type("keep_prob", keep_prob, [float], self.name)
|
|
6375
|
+
validator.check_number_range(keep_prob, 0.0, 1.0, validator.INC_BOTH, float, "keep_prob")
|
|
7617
6376
|
self.cell_clip = validator.check_value_type("cell_clip", cell_clip, [float], self.name)
|
|
7618
6377
|
self.num_proj = validator.check_non_negative_int(num_proj, "num_proj", self.name)
|
|
7619
6378
|
self.forget_bias = validator.check_value_type("forget_bias", forget_bias, [float], self.name)
|
|
@@ -7649,21 +6408,21 @@ class DynamicGRUV2(Primitive):
|
|
|
7649
6408
|
:math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product.
|
|
7650
6409
|
|
|
7651
6410
|
Args:
|
|
7652
|
-
direction (str): A string identifying the direction in the operator. Default: ``'UNIDIRECTIONAL'`` .
|
|
6411
|
+
direction (str, optional): A string identifying the direction in the operator. Default: ``'UNIDIRECTIONAL'`` .
|
|
7653
6412
|
Only ``'UNIDIRECTIONAL'`` is currently supported.
|
|
7654
|
-
cell_depth (int): An integer identifying the cell depth in the operator. Default: ``1`` .
|
|
7655
|
-
keep_prob (float): A float identifying the keep prob in the operator. Default: ``1.0`` .
|
|
7656
|
-
cell_clip (float): A float identifying the cell clip in the operator. Default: ``-1.0`` .
|
|
7657
|
-
num_proj (int): An integer identifying the number projection in the operator. Default: ``0`` .
|
|
7658
|
-
time_major (bool): A bool identifying the time major in the operator. Default: ``True`` .
|
|
7659
|
-
activation (str) : A string identifying the type of activation function in the operator.
|
|
6413
|
+
cell_depth (int, optional): An integer identifying the cell depth in the operator. Default: ``1`` .
|
|
6414
|
+
keep_prob (float, optional): A float identifying the keep prob in the operator. Default: ``1.0`` .
|
|
6415
|
+
cell_clip (float, optional): A float identifying the cell clip in the operator. Default: ``-1.0`` .
|
|
6416
|
+
num_proj (int, optional): An integer identifying the number projection in the operator. Default: ``0`` .
|
|
6417
|
+
time_major (bool, optional): A bool identifying the time major in the operator. Default: ``True`` .
|
|
6418
|
+
activation (str, optional) : A string identifying the type of activation function in the operator.
|
|
7660
6419
|
Default: ``'tanh'`` . Only ``'tanh'`` is currently supported.
|
|
7661
|
-
gate_order (str): A string identifying the gate order in weight and bias. Default: ``'rzh'`` .
|
|
6420
|
+
gate_order (str, optional): A string identifying the gate order in weight and bias. Default: ``'rzh'`` .
|
|
7662
6421
|
``'zrh'`` is another option. Here, ``'rzh'`` means the gate order is: reset gate, update gate, hidden gate.
|
|
7663
6422
|
``'zrh'`` means the gate order is: update gate, reset gate, hidden gate.
|
|
7664
|
-
reset_after (bool): A bool identifying whether to apply reset gate after matrix multiplication.
|
|
6423
|
+
reset_after (bool, optional): A bool identifying whether to apply reset gate after matrix multiplication.
|
|
7665
6424
|
Default: ``True`` .
|
|
7666
|
-
is_training (bool): A bool identifying is training in the operator. Default: ``True`` .
|
|
6425
|
+
is_training (bool, optional): A bool identifying is training in the operator. Default: ``True`` .
|
|
7667
6426
|
|
|
7668
6427
|
Inputs:
|
|
7669
6428
|
- **x** (Tensor) - Current words.
|
|
@@ -8007,8 +6766,9 @@ class Conv3D(Primitive):
|
|
|
8007
6766
|
|
|
8008
6767
|
Applies a 3D convolution over an input tensor which is typically of shape
|
|
8009
6768
|
:math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`,
|
|
8010
|
-
where :math:`N` is batch size, :math:`C` is channel number,
|
|
8011
|
-
:math:`H
|
|
6769
|
+
where :math:`N` is batch size, :math:`C` is channel number,
|
|
6770
|
+
:math:`D, H, W`
|
|
6771
|
+
are the depth, height and width of the feature map, respectively.
|
|
8012
6772
|
|
|
8013
6773
|
The output is calculated based on formula:
|
|
8014
6774
|
|
|
@@ -8019,25 +6779,30 @@ class Conv3D(Primitive):
|
|
|
8019
6779
|
|
|
8020
6780
|
where :math:`bias` is the output channel bias, :math:`ccor` is
|
|
8021
6781
|
the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
|
|
8022
|
-
|
|
6782
|
+
:math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
|
|
8023
6783
|
|
|
8024
6784
|
Here are the indices' meanings:
|
|
8025
|
-
- :math:`i` corresponds to the batch number, ranging from 0 to N-1, where N is the batch size of the input.
|
|
8026
6785
|
|
|
8027
|
-
- :math:`
|
|
6786
|
+
- :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
|
|
6787
|
+
where :math:`N` is the batch size of the input.
|
|
6788
|
+
|
|
6789
|
+
- :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
|
|
6790
|
+
where :math:`C_{out}` is the number of
|
|
8028
6791
|
output channels, which is also equal to the number of kernels.
|
|
8029
6792
|
|
|
8030
|
-
- :math:`k` corresponds to the input channel,
|
|
6793
|
+
- :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
|
|
6794
|
+
where :math:`C_{in}` is the number of
|
|
8031
6795
|
input channels, which is also equal to the number of channels in the convolutional kernels.
|
|
8032
6796
|
|
|
8033
|
-
Therefore, in the above formula, :math:`{bias}(C_{
|
|
8034
|
-
output channel, :math:`{weight}(C_{
|
|
6797
|
+
Therefore, in the above formula, :math:`{bias}(C_{\text{out}_j})` represents the bias of the :math:`j`-th
|
|
6798
|
+
output channel, :math:`{weight}(C_{\text{out}_j}, k)`represents the slice of the :math:`j`-th convolutional
|
|
8035
6799
|
kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
|
|
8036
6800
|
channel in the :math:`i`-th batch of the input feature map.
|
|
8037
6801
|
|
|
8038
6802
|
The shape of the convolutional kernel is given by
|
|
8039
6803
|
:math:`(\text{kernel_size[0]}, \text{kernel_size[1]}, \text{kernel_size[2]})`
|
|
8040
|
-
where :math
|
|
6804
|
+
where :math:`\text{kernel_size[0]}` ,
|
|
6805
|
+
:math:`\text{kernel_size[1]}` and :math:`\text{kernel_size[2]}` are the depth,
|
|
8041
6806
|
height and width of the kernel, respectively.
|
|
8042
6807
|
If we consider the input and output channels as well as the `group` parameter, the complete kernel shape
|
|
8043
6808
|
will be :math:`(C_{out}, C_{in} / \text{group}, \text{kernel_size[0]},
|
|
@@ -8048,8 +6813,8 @@ class Conv3D(Primitive):
|
|
|
8048
6813
|
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
|
|
8049
6814
|
|
|
8050
6815
|
Note:
|
|
8051
|
-
1. On Ascend platform,
|
|
8052
|
-
2. On Ascend
|
|
6816
|
+
1. On Ascend platform, :math:`groups=1` must be satisfied.
|
|
6817
|
+
2. On Ascend :math:`dilation` on depth only supports the case of 1.
|
|
8053
6818
|
|
|
8054
6819
|
Args:
|
|
8055
6820
|
out_channel (int): Specifies output channel :math:`C_{out}`.
|
|
@@ -8671,7 +7436,7 @@ class Conv3DTranspose(Primitive):
|
|
|
8671
7436
|
\times (\text{kernel_size}[2] - 1) + \text{output_padding}[2] + 1
|
|
8672
7437
|
|
|
8673
7438
|
Note:
|
|
8674
|
-
In Ascend,
|
|
7439
|
+
In Ascend, only support :math:`group=1`.
|
|
8675
7440
|
|
|
8676
7441
|
Args:
|
|
8677
7442
|
in_channel (int): The channel of the input x.
|
|
@@ -8739,7 +7504,7 @@ class Conv3DTranspose(Primitive):
|
|
|
8739
7504
|
ValueError: If `pad` is a tuple whose length is not equal to 6.
|
|
8740
7505
|
ValueError: If `pad_mode` is not equal to 'pad' and `pad` is not equal to (0, 0, 0, 0, 0, 0).
|
|
8741
7506
|
ValueError: If `data_format` is not 'NCDHW'.
|
|
8742
|
-
TypeError: If data type of dout and weight is
|
|
7507
|
+
TypeError: If data type of dout and weight is neither float16 nor float32.
|
|
8743
7508
|
ValueError: If bias is not none. The rank of dout and weight is not 5.
|
|
8744
7509
|
|
|
8745
7510
|
Supported Platforms:
|
|
@@ -9014,46 +7779,6 @@ class SoftShrink(Primitive):
|
|
|
9014
7779
|
validator.check_number("lambd", lambd, 0, validator.GE, self.name)
|
|
9015
7780
|
|
|
9016
7781
|
|
|
9017
|
-
class HShrink(Primitive):
|
|
9018
|
-
r"""
|
|
9019
|
-
Hard Shrink activation function.
|
|
9020
|
-
|
|
9021
|
-
Refer to :func:`mindspore.ops.hardshrink` for more details.
|
|
9022
|
-
|
|
9023
|
-
Args:
|
|
9024
|
-
lambd (float, optional): The threshold :math:`\lambda` defined by the Hard Shrink formula. Default: ``0.5`` .
|
|
9025
|
-
|
|
9026
|
-
Inputs:
|
|
9027
|
-
- **input_x** (Tensor) - The input of Hard Shrink with data type of float16 or float32.
|
|
9028
|
-
|
|
9029
|
-
Outputs:
|
|
9030
|
-
Tensor, the same shape and data type as the input.
|
|
9031
|
-
|
|
9032
|
-
Supported Platforms:
|
|
9033
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
9034
|
-
|
|
9035
|
-
Examples:
|
|
9036
|
-
>>> import mindspore as ms
|
|
9037
|
-
>>> import mindspore.ops as ops
|
|
9038
|
-
>>> from mindspore import Tensor, nn
|
|
9039
|
-
>>> import numpy as np
|
|
9040
|
-
>>> input_x = Tensor(np.array([[0.5, 1, 2.0], [0.0533, 0.0776, -2.1233]]), ms.float32)
|
|
9041
|
-
>>> hshrink = ops.HShrink()
|
|
9042
|
-
>>> output = hshrink(input_x)
|
|
9043
|
-
>>> print(output)
|
|
9044
|
-
[[ 0. 1. 2. ]
|
|
9045
|
-
[ 0. 0. -2.1233]]
|
|
9046
|
-
"""
|
|
9047
|
-
|
|
9048
|
-
@prim_attr_register
|
|
9049
|
-
def __init__(self, lambd=0.5):
|
|
9050
|
-
"""Initialize HShrink"""
|
|
9051
|
-
validator.check_value_type('lambd', lambd, [float], self.name)
|
|
9052
|
-
if lambd < 0.0:
|
|
9053
|
-
lambd = 0.0
|
|
9054
|
-
self.add_prim_attr('lambd', lambd)
|
|
9055
|
-
|
|
9056
|
-
|
|
9057
7782
|
class ApplyAdagradDA(Primitive):
|
|
9058
7783
|
r"""
|
|
9059
7784
|
Update `var` according to the proximal adagrad scheme.
|
|
@@ -9098,11 +7823,9 @@ class ApplyAdagradDA(Primitive):
|
|
|
9098
7823
|
- **global_step** ([Number, Tensor]) - Training step number. Must be a scalar. With int32 or int64 data type.
|
|
9099
7824
|
|
|
9100
7825
|
Outputs:
|
|
9101
|
-
Tuple of
|
|
7826
|
+
Tuple of 1 Tensors, the updated parameters.
|
|
9102
7827
|
|
|
9103
7828
|
- **var** (Tensor) - The same shape and data type as `var`.
|
|
9104
|
-
- **gradient_accumulator** (Tensor) - The same shape and data type as `gradient_accumulator`.
|
|
9105
|
-
- **gradient_squared_accumulator** (Tensor) - The same shape and data type as `gradient_squared_accumulator`.
|
|
9106
7829
|
|
|
9107
7830
|
Raises:
|
|
9108
7831
|
TypeError: If `var`, `gradient_accumulator` or `gradient_squared_accumulator` is not a Parameter.
|
|
@@ -9153,11 +7876,7 @@ class ApplyAdagradDA(Primitive):
|
|
|
9153
7876
|
>>> print(output)
|
|
9154
7877
|
(Tensor(shape=[2, 2], dtype=Float32, value=
|
|
9155
7878
|
[[-7.39064650e-04, -1.36888528e-03],
|
|
9156
|
-
[-5.96988888e-04, -1.42478070e-03]])
|
|
9157
|
-
[[ 4.00000006e-01, 7.00000048e-01],
|
|
9158
|
-
[ 2.00000003e-01, 6.99999988e-01]]), Tensor(shape=[2, 2], dtype=Float32, value=
|
|
9159
|
-
[[ 2.90000021e-01, 2.60000020e-01],
|
|
9160
|
-
[ 1.09999999e-01, 2.40000010e-01]]))
|
|
7879
|
+
[-5.96988888e-04, -1.42478070e-03]]))
|
|
9161
7880
|
"""
|
|
9162
7881
|
|
|
9163
7882
|
__mindspore_signature__ = (
|
|
@@ -9669,6 +8388,14 @@ class ApplyAdamWithAmsgradV2(Primitive):
|
|
|
9669
8388
|
var:=var-lr_t*m_t/(\sqrt{\hat v_t}+\epsilon) \\
|
|
9670
8389
|
\end{array}
|
|
9671
8390
|
|
|
8391
|
+
:math:`t` represents updating step while :math:`m` represents the 1st moment vector,
|
|
8392
|
+
:math:`v` represents the 2nd moment vector, :math:`\hat v_t` represents `vhat`,
|
|
8393
|
+
:math:`lr` represents learning rate,
|
|
8394
|
+
:math:`g` represents `grad`, :math:`\beta_1, \beta_2` represent `beta1` and `beta2`,
|
|
8395
|
+
:math:`\beta_1^{t}` represents `beta1_power`, :math:`\beta_2^{t}` represents `beta2_power`,
|
|
8396
|
+
:math:`var` represents the variable to be updated,
|
|
8397
|
+
:math:`\epsilon` represents `epsilon`.
|
|
8398
|
+
|
|
9672
8399
|
All of the inputs are consistent with implicit type conversion rules,
|
|
9673
8400
|
which ensure that the data types are the same. If they have different data types, the lower precision data type
|
|
9674
8401
|
will be converted to the data type with relatively higher precision.
|
|
@@ -9770,83 +8497,6 @@ class ApplyAdamWithAmsgradV2(Primitive):
|
|
|
9770
8497
|
self.add_prim_attr("side_effect_mem", True)
|
|
9771
8498
|
|
|
9772
8499
|
|
|
9773
|
-
class GridSampler3D(Primitive):
|
|
9774
|
-
"""
|
|
9775
|
-
Given an input and a grid, the output is calculated using the input values
|
|
9776
|
-
and pixel positions in the grid. Only volume (5-D) input is supported.
|
|
9777
|
-
|
|
9778
|
-
.. warning::
|
|
9779
|
-
This is an experimental API that is subject to change or deletion.
|
|
9780
|
-
|
|
9781
|
-
Refer to :func:`mindspore.ops.grid_sample` for more details.
|
|
9782
|
-
|
|
9783
|
-
Args:
|
|
9784
|
-
interpolation_mode (str, optional): An optional string specifying the interpolation method.
|
|
9785
|
-
The optional values are ``"bilinear"`` or ``"nearest"`` . Default: ``"bilinear"`` .
|
|
9786
|
-
|
|
9787
|
-
- ``"nearest"``: Nearest neighbor interpolation. Each output pixel is assigned the value of the
|
|
9788
|
-
nearest input pixel. This method is simple and fast but can result in blocky or pixelated outputs.
|
|
9789
|
-
- ``"bilinear"``: Bilinear interpolation. Each output pixel is a weighted average of the four nearest input
|
|
9790
|
-
pixels, computed using bilinear interpolation. This method produces smoother results compared
|
|
9791
|
-
to nearest neighbor interpolation.
|
|
9792
|
-
|
|
9793
|
-
padding_mode (str, optional): An optional string specifying the pad method.
|
|
9794
|
-
The optional values are ``"zeros"`` , ``"border"`` or ``"reflection"`` . Default: ``"zeros"`` .
|
|
9795
|
-
When the sampling grid is outside input's bounds, effects of various padding modes are as follows:
|
|
9796
|
-
|
|
9797
|
-
- ``"zeros"``: Pads the input tensor with zeros.
|
|
9798
|
-
- ``"border"``: Pads the input tensor with the values of the pixels on the border of the tensor.
|
|
9799
|
-
- ``"reflection"``: Pads the input tensor by reflecting the values of the pixels at the
|
|
9800
|
-
boundary of the tensor.
|
|
9801
|
-
|
|
9802
|
-
align_corners (bool, optional): An optional bool specifying alignment method. If set to ``True`` ,
|
|
9803
|
-
the extrema (-1 and 1) are considered as referring to
|
|
9804
|
-
the center points of the input’s corner pixels. If set to ``False`` , they are instead considered as
|
|
9805
|
-
referring to the corner points of the input’s corner pixels, making the sampling more resolution agnostic.
|
|
9806
|
-
Default: ``False`` .
|
|
9807
|
-
|
|
9808
|
-
Inputs:
|
|
9809
|
-
- **input_x** (Tensor) - A 5-D tensor with dtype of float16, float32 or float64
|
|
9810
|
-
and shape of :math:`(N, C, D_{in}, H_{in}, W_{in})`.
|
|
9811
|
-
- **grid** (Tensor) - A 5-D tensor whose dtype is the same as `input_x` and whose shape is :math:`(N, D_{out},
|
|
9812
|
-
H_{out}, W_{out}, 3)`.
|
|
9813
|
-
|
|
9814
|
-
Outputs:
|
|
9815
|
-
A 5-D Tensor whose dtype is the same as `input_x` and whose shape is :math:`(N, C, D_{out}, H_{out}, W_{out})`.
|
|
9816
|
-
|
|
9817
|
-
Supported Platforms:
|
|
9818
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
9819
|
-
|
|
9820
|
-
Examples:
|
|
9821
|
-
>>> import numpy as np
|
|
9822
|
-
>>> from mindspore import Tensor, ops
|
|
9823
|
-
>>> gridsampler = ops.GridSampler3D(interpolation_mode='bilinear', padding_mode='zeros', align_corners=True)
|
|
9824
|
-
>>> input_x = Tensor(np.arange(32).reshape((2, 2, 2, 2, 2)).astype(np.float32))
|
|
9825
|
-
>>> grid = Tensor(np.arange(-0.2, 1, 0.1).reshape((2, 2, 1, 1, 3)).astype(np.float32))
|
|
9826
|
-
>>> output = gridsampler(input_x, grid)
|
|
9827
|
-
>>> print(output)
|
|
9828
|
-
[[[[[ 3.3 ]]
|
|
9829
|
-
[[ 4.35 ]]]
|
|
9830
|
-
[[[11.300001]]
|
|
9831
|
-
[[12.349999]]]]
|
|
9832
|
-
[[[[21.4 ]]
|
|
9833
|
-
[[22.449999]]]
|
|
9834
|
-
[[[29.4 ]]
|
|
9835
|
-
[[30.449999]]]]]
|
|
9836
|
-
"""
|
|
9837
|
-
|
|
9838
|
-
@prim_attr_register
|
|
9839
|
-
def __init__(self, interpolation_mode='bilinear', padding_mode='zeros', align_corners=False):
|
|
9840
|
-
"""Initialize GridSampler3D."""
|
|
9841
|
-
validator.check_string(interpolation_mode, ['bilinear', 'nearest'], 'interpolation_mode', self.name)
|
|
9842
|
-
validator.check_string(padding_mode, ['zeros', 'border', 'reflection'], 'padding_mode', self.name)
|
|
9843
|
-
validator.check_bool(align_corners, 'align_corners', self.name)
|
|
9844
|
-
self.init_prim_io_names(inputs=['input_x', 'grid'], outputs=['output'])
|
|
9845
|
-
self.add_prim_attr('interpolation_mode', interpolation_mode)
|
|
9846
|
-
self.add_prim_attr('padding_mode', padding_mode)
|
|
9847
|
-
self.add_prim_attr('align_corners', align_corners)
|
|
9848
|
-
|
|
9849
|
-
|
|
9850
8500
|
class FractionalMaxPool(Primitive):
|
|
9851
8501
|
r"""
|
|
9852
8502
|
Performs fractional max pooling on the input.
|
|
@@ -10399,104 +9049,21 @@ class DeformableOffsets(Primitive):
|
|
|
10399
9049
|
self.add_prim_attr('modulated', self.modulated)
|
|
10400
9050
|
|
|
10401
9051
|
|
|
10402
|
-
class GridSampler2D(Primitive):
|
|
10403
|
-
"""
|
|
10404
|
-
This operation samples 2d `input_x` by using interpolation based on flow field grid,
|
|
10405
|
-
which is usually gennerated by :func:`mindspore.ops.affine_grid`.
|
|
10406
|
-
|
|
10407
|
-
.. warning::
|
|
10408
|
-
This is an experimental API that is subject to change or deletion.
|
|
10409
|
-
|
|
10410
|
-
Refer to :func:`mindspore.ops.grid_sample` for more details.
|
|
10411
|
-
|
|
10412
|
-
Args:
|
|
10413
|
-
interpolation_mode (str, optional): An optional string specifying the interpolation method.
|
|
10414
|
-
The optional values are
|
|
10415
|
-
``"bilinear"`` or ``"nearest"`` . Default: ``"bilinear"`` .
|
|
10416
|
-
|
|
10417
|
-
- ``"nearest"``: Nearest neighbor interpolation. Each output pixel is assigned the value of the
|
|
10418
|
-
nearest input pixel. This method is simple and fast but can result in blocky or pixelated outputs.
|
|
10419
|
-
- ``"bilinear"``: Bilinear interpolation. Each output pixel is a weighted average of the four nearest input
|
|
10420
|
-
pixels, computed using bilinear interpolation. This method produces smoother results compared
|
|
10421
|
-
to nearest neighbor interpolation.
|
|
10422
|
-
|
|
10423
|
-
padding_mode (str, optional): An optional string specifying the pad method.
|
|
10424
|
-
The optional values are ``"zeros"`` , ``"border"`` or ``"reflection"`` . Default: ``"zeros"`` .
|
|
10425
|
-
When the sampling grid is outside input's bounds, effects of various padding modes are as follows:
|
|
10426
|
-
|
|
10427
|
-
- ``"zeros"``: Pads the input tensor with zeros.
|
|
10428
|
-
- ``"border"``: Pads the input tensor with the values of the pixels on the border of the tensor.
|
|
10429
|
-
- ``"reflection"``: Pads the input tensor by reflecting the values of the pixels at the
|
|
10430
|
-
boundary of the tensor.
|
|
10431
|
-
|
|
10432
|
-
align_corners (bool, optional): An optional bool. When set to ``True`` ,
|
|
10433
|
-
the centers of the corner pixels of the input
|
|
10434
|
-
and output tensors are aligned. When set to ``False`` , it is not aligned. Default: ``False`` .
|
|
10435
|
-
|
|
10436
|
-
Inputs:
|
|
10437
|
-
- **input_x** (Tensor) - A 4-D tensor with shape
|
|
10438
|
-
:math:`(N, C, H_{in}, W_{in})`. Supported dtypes:
|
|
10439
|
-
|
|
10440
|
-
- Ascend: float16, float32.
|
|
10441
|
-
- GPU/CPU: float16, float32, float64.
|
|
10442
|
-
|
|
10443
|
-
- **grid** (Tensor) - A 4-D tensor whose dtype is the same as `input_x` and whose shape is
|
|
10444
|
-
:math:`(N, H_{out}, W_{out}, 2)`.
|
|
10445
|
-
Used to specify the sampling pixel locations normalized by the input spatial
|
|
10446
|
-
dimensions.
|
|
10447
|
-
|
|
10448
|
-
Outputs:
|
|
10449
|
-
A 4-D Tensor whose dtype is the same as `input_x` and whose shape is :math:`(N, C, H_{out}, W_{out})`.
|
|
10450
|
-
|
|
10451
|
-
Supported Platforms:
|
|
10452
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
10453
|
-
|
|
10454
|
-
Examples:
|
|
10455
|
-
>>> import numpy as np
|
|
10456
|
-
>>> from mindspore import Tensor, ops
|
|
10457
|
-
>>> gridsampler = ops.GridSampler2D(interpolation_mode='bilinear', padding_mode='zeros', align_corners=True)
|
|
10458
|
-
>>> input_x = Tensor(np.arange(16).reshape((2, 2, 2, 2)).astype(np.float32))
|
|
10459
|
-
>>> grid = Tensor(np.arange(-9, 9, 0.5).reshape((2, 3, 3, 2)).astype(np.float32))
|
|
10460
|
-
>>> output = gridsampler(input_x, grid)
|
|
10461
|
-
>>> print(output)
|
|
10462
|
-
[[[[ 0. 0. 0. ]
|
|
10463
|
-
[ 0. 0. 0. ]
|
|
10464
|
-
[ 0. 0. 0.5 ]]
|
|
10465
|
-
[[ 0. 0. 0. ]
|
|
10466
|
-
[ 0. 0. 0. ]
|
|
10467
|
-
[ 0. 1.5 4.5 ]]]
|
|
10468
|
-
[[[10. 8.25 1.375]
|
|
10469
|
-
[ 0. 0. 0. ]
|
|
10470
|
-
[ 0. 0. 0. ]]
|
|
10471
|
-
[[14. 11.25 1.875]
|
|
10472
|
-
[ 0. 0. 0. ]
|
|
10473
|
-
[ 0. 0. 0. ]]]]
|
|
10474
|
-
"""
|
|
10475
|
-
|
|
10476
|
-
@prim_attr_register
|
|
10477
|
-
def __init__(self, interpolation_mode='bilinear', padding_mode='zeros', align_corners=False):
|
|
10478
|
-
"""Initialize GridSampler2D."""
|
|
10479
|
-
validator.check_string(interpolation_mode, ['bilinear', 'nearest'], 'interpolation_mode', self.name)
|
|
10480
|
-
validator.check_string(padding_mode, ['zeros', 'border', 'reflection'], 'padding_mode', self.name)
|
|
10481
|
-
validator.check_bool(align_corners, 'align_corners', self.name)
|
|
10482
|
-
self.init_prim_io_names(inputs=['input', 'grid'], outputs=['output'])
|
|
10483
|
-
self.add_prim_attr('interpolation_mode', interpolation_mode)
|
|
10484
|
-
self.add_prim_attr('padding_mode', padding_mode)
|
|
10485
|
-
self.add_prim_attr('align_corners', align_corners)
|
|
10486
|
-
|
|
10487
|
-
|
|
10488
9052
|
class Pdist(Primitive):
|
|
10489
9053
|
r"""
|
|
10490
9054
|
Computes the p-norm distance between each pair of row vectors in the input.
|
|
10491
9055
|
|
|
10492
9056
|
Refer to :func:`mindspore.ops.pdist` for more details.
|
|
10493
9057
|
|
|
9058
|
+
Note:
|
|
9059
|
+
The pdist operator involves exponentiation, the inf/nan calculation result may be generated
|
|
9060
|
+
when the float16 input is used. The float32 input is recommended.
|
|
9061
|
+
|
|
10494
9062
|
Args:
|
|
10495
9063
|
p (float, optional): The order of norm distance, :math:`p∈[0, ∞)`. Default: ``2.0`` .
|
|
10496
9064
|
|
|
10497
9065
|
Inputs:
|
|
10498
|
-
- **x** (Tensor) - Input tensor
|
|
10499
|
-
one-dim or multi-dim. Supported dtypes: float16, float32 or float64.
|
|
9066
|
+
- **x** (Tensor) - Input tensor. Supported dtypes: float16, float32 or float64.
|
|
10500
9067
|
|
|
10501
9068
|
Outputs:
|
|
10502
9069
|
Tensor, has the same dtype as `x`.
|
|
@@ -11338,7 +9905,8 @@ class PromptFlashAttention(Primitive):
|
|
|
11338
9905
|
S -- Sequence length
|
|
11339
9906
|
H -- Hidden size
|
|
11340
9907
|
|
|
11341
|
-
|
|
9908
|
+
Note:
|
|
9909
|
+
experiment ops
|
|
11342
9910
|
|
|
11343
9911
|
.. warning::
|
|
11344
9912
|
This is an experimental API that is subject to change or deletion.
|
|
@@ -11355,6 +9923,7 @@ class PromptFlashAttention(Primitive):
|
|
|
11355
9923
|
num_key_value_heads (int): head numbers of key/value which are used in GQA algorithm.
|
|
11356
9924
|
The value o indicates if the key and value have the same head nums, use numHeads. Default: 0.
|
|
11357
9925
|
sparse_mode (int): Default: 0
|
|
9926
|
+
inner_precise (int): 0, float16 high precision. 1, high performance. default 1
|
|
11358
9927
|
|
|
11359
9928
|
Inputs:
|
|
11360
9929
|
- **query** (Tensor) - The query tensor with data type of float16 or float32.
|
|
@@ -11365,8 +9934,8 @@ class PromptFlashAttention(Primitive):
|
|
|
11365
9934
|
Input tensor of shape :math:`(B, S, H)` / `(B, N, S, D)`.
|
|
11366
9935
|
- **attn_mask** (Tensor) - The attention mask tensor with data type of float16 or float32.
|
|
11367
9936
|
For each element, 0 indicates retention and 1 indicates discard. Input tensor of shape :math:`(B, 1, S, S)`.
|
|
11368
|
-
- **actual_seq_lengths** (Tensor): Describe actual sequence length of each input with data type of
|
|
11369
|
-
- **actual_seq_lengths_kv** (Tensor): Describe actual sequence length of each input with data type of
|
|
9937
|
+
- **actual_seq_lengths** (Tensor): Describe actual sequence length of each input with data type of int64.
|
|
9938
|
+
- **actual_seq_lengths_kv** (Tensor): Describe actual sequence length of each input with data type of int64.
|
|
11370
9939
|
- **pse_shift** (Tensor) - The position encoding tensor with data type of float16 or float32.
|
|
11371
9940
|
- **dep_scale1** (Tensor)
|
|
11372
9941
|
- **quant_scale1** (Tensor)
|
|
@@ -11374,11 +9943,10 @@ class PromptFlashAttention(Primitive):
|
|
|
11374
9943
|
- **quant_scale2** (Tensor)
|
|
11375
9944
|
- **quant_offset2** (Tensor)
|
|
11376
9945
|
|
|
11377
|
-
|
|
11378
9946
|
Outputs:
|
|
11379
9947
|
- **attention_out** (Tensor) - Input tensor of shape :math:`(B, S, H)` / `(B, N, S, D)`.
|
|
11380
9948
|
|
|
11381
|
-
|
|
9949
|
+
Supported Platforms:
|
|
11382
9950
|
``Ascend``
|
|
11383
9951
|
|
|
11384
9952
|
Examples:
|
|
@@ -11392,15 +9960,16 @@ class PromptFlashAttention(Primitive):
|
|
|
11392
9960
|
>>> query = Tensor(np.ones((B, N, S, D), dtype=np.float16))
|
|
11393
9961
|
>>> key = Tensor(np.ones((B, N, S, D), dtype=np.float16))
|
|
11394
9962
|
>>> value = Tensor(np.ones((B, N, S, D), dtype=np.float16))
|
|
9963
|
+
>>> attn_mask = Tensor(np.ones((B, 1, S, S), dtype=np.float16))
|
|
11395
9964
|
>>> pfa = P.PromptFlashAttention(N, input_layout='BNSD')
|
|
11396
|
-
>>> out = pfa(query, key, value,
|
|
11397
|
-
>>> print(out
|
|
9965
|
+
>>> out = pfa(query, key, value, attn_mask, None, None, None, None, None, None, None, None)
|
|
9966
|
+
>>> print(out.shape)
|
|
11398
9967
|
(1, 16, 256, 16)
|
|
11399
9968
|
"""
|
|
11400
9969
|
|
|
11401
9970
|
@prim_attr_register
|
|
11402
|
-
def __init__(self, num_heads, scale_value=1.0, pre_tokens=
|
|
11403
|
-
num_key_value_heads=0, sparse_mode=0):
|
|
9971
|
+
def __init__(self, num_heads, scale_value=1.0, pre_tokens=214748647, next_tokens=0, input_layout='BSH',
|
|
9972
|
+
num_key_value_heads=0, sparse_mode=0, inner_precise=1):
|
|
11404
9973
|
"""Initialize PromptFlashAttention."""
|
|
11405
9974
|
validator.check_value_type('num_heads', num_heads, [int], self.name)
|
|
11406
9975
|
validator.check_value_type('scale_value', scale_value, [float], self.name)
|
|
@@ -11409,69 +9978,165 @@ class PromptFlashAttention(Primitive):
|
|
|
11409
9978
|
validator.check_value_type('input_layout', input_layout, [str], self.name)
|
|
11410
9979
|
validator.check_value_type('num_key_value_heads', num_key_value_heads, [int], self.name)
|
|
11411
9980
|
validator.check_value_type('sparse_mode', sparse_mode, [int], self.name)
|
|
9981
|
+
validator.check_value_type('inner_precise', inner_precise, [int], self.name)
|
|
11412
9982
|
self.init_prim_io_names(inputs=["query", "key", "value", "attn_mask", "actual_seq_lengths",
|
|
11413
9983
|
"actual_seq_lengths_kv", "pse_shift", "deq_scale1", "quant_scale1",
|
|
11414
9984
|
"deq_scale2", "quant_scale2", "quant_offset2"],
|
|
11415
9985
|
outputs=["attention_out"])
|
|
11416
9986
|
|
|
11417
9987
|
|
|
9988
|
+
class IncreFlashAttention(Primitive):
|
|
9989
|
+
r"""
|
|
9990
|
+
The interface for fully inference.
|
|
9991
|
+
|
|
9992
|
+
B -- Batch size
|
|
9993
|
+
|
|
9994
|
+
S -- Sequence length
|
|
9995
|
+
|
|
9996
|
+
H -- Hidden size
|
|
9997
|
+
|
|
9998
|
+
.. warning::
|
|
9999
|
+
This is an experimental API that is subject to change or deletion.
|
|
10000
|
+
If there is no input parameter and no default value, None needs to be passed.
|
|
10001
|
+
|
|
10002
|
+
Args:
|
|
10003
|
+
- **num_heads** (int) - The number of heads.
|
|
10004
|
+
- **input_layout** (str) - the data layout of the input qkv, support `(BSH)` and `(BNSD)`. Default `BSH`.
|
|
10005
|
+
- **scale_value** (double) - The scale value indicating the scale coefficient, which is used as the scalar of
|
|
10006
|
+
Muls in the calculation. Default: 1.0.
|
|
10007
|
+
- **num_key_value_heads** (int) - head numbers of key/value which are used in GQA algorithm.
|
|
10008
|
+
The value o indicates if the key and value have the same head nums, use numHeads. Default: 0.
|
|
10009
|
+
- **block_size** (int) - Default: 0.
|
|
10010
|
+
- **inner_precise** (int) - Default: 1.
|
|
10011
|
+
|
|
10012
|
+
Inputs:
|
|
10013
|
+
- **query** (Tensor) - The query tensor with data type of float16 or bfloat16.
|
|
10014
|
+
Input tensor of shape :math:`(B, 1, H)` / :math:`(B, N, 1, D)`.
|
|
10015
|
+
- **key** (TensorList) - The key tensor with data type of float16 or bfloat16.
|
|
10016
|
+
Input tensor of shape :math:`(B, S, H)` / :math:`(B, N, S, D)`.
|
|
10017
|
+
- **value** (TensorList) - The value tensor with data type of float16 or bfloat16.
|
|
10018
|
+
Input tensor of shape :math:`(B, S, H)` / :math:`(B, N, S, D)`.
|
|
10019
|
+
- **attn_mask** (Tensor) - The attention mask tensor with data type of float16 or bool.
|
|
10020
|
+
Input tensor of shape :math:`(B, S)` / :math:`(B, 1, S)` / :math:`(B, 1, 1, S)`.
|
|
10021
|
+
- **actual_seq_lengths** (Tensor) - Describe actual sequence length of each input with data type of int.
|
|
10022
|
+
- **pse_shift** (Tensor) - The position encoding tensor with data type of float16 or float32.
|
|
10023
|
+
- **dequant_scale1** (Tensor) - Quantitative parametor, the tensor with data type of uint64.
|
|
10024
|
+
- **quant_scale1** (Tensor) - Quantitative parametor, the tensor with data type of float.
|
|
10025
|
+
- **dequant_scale2** (Tensor) - Quantitative parametor, the tensor with data type of uint64.
|
|
10026
|
+
- **quant_scale2** (Tensor) - Quantitative parametor, the tensor with data type of float.
|
|
10027
|
+
- **quant_offset2** (Tensor) - Quantitative parametor, the tensor with data type of float.
|
|
10028
|
+
- **antiquant_scale** (Tensor) - Quantitative parametor, the tensor with data type of float.
|
|
10029
|
+
- **antiquant_offset** (Tensor) - Quantitative parametor, the tensor with data type of float.
|
|
10030
|
+
- **block_table** (Tensor) - The tensor with data type of float.
|
|
10031
|
+
|
|
10032
|
+
Outputs:
|
|
10033
|
+
- **attention_out** (Tensor) - Input tensor of shape :math:`(B, 1, H)` / :math:`(B, N, 1, D)`.
|
|
10034
|
+
|
|
10035
|
+
Supported Platforms:
|
|
10036
|
+
``Ascend``
|
|
10037
|
+
"""
|
|
10038
|
+
|
|
10039
|
+
@prim_attr_register
|
|
10040
|
+
def __init__(self, num_heads, input_layout="BSH", scale_value=1.0, num_key_value_heads=0, block_size=0,
|
|
10041
|
+
inner_precise=1):
|
|
10042
|
+
"""Initialize IncreFlashAttention."""
|
|
10043
|
+
validator.check_value_type('num_heads', num_heads, [int], self.name)
|
|
10044
|
+
validator.check_value_type('input_layout', input_layout, [str], self.name)
|
|
10045
|
+
validator.check_value_type('scale_value', scale_value, [float], self.name)
|
|
10046
|
+
validator.check_value_type('num_key_value_heads', num_key_value_heads, [int], self.name)
|
|
10047
|
+
validator.check_value_type('block_size', block_size, [int], self.name)
|
|
10048
|
+
validator.check_value_type('inner_precise', inner_precise, [int], self.name)
|
|
10049
|
+
self.init_prim_io_names(inputs=["query", "key", "value", "attn_mask", "actual_seq_lengths", "pse_shift",
|
|
10050
|
+
"dequant_scale1", "quant_scale1", "dequant_scale2", "quant_scale2",
|
|
10051
|
+
"quant_offset2", "antiquant_scale", "antiquant_offset", "block_table"],
|
|
10052
|
+
outputs=["attention_out"])
|
|
10053
|
+
|
|
10054
|
+
|
|
11418
10055
|
class FlashAttentionScore(Primitive):
|
|
11419
10056
|
r"""
|
|
11420
10057
|
FlashAttentionScore.
|
|
10058
|
+
.. math::
|
|
10059
|
+
\begin{array}{ll} \\
|
|
10060
|
+
y = Dropout(Softmax(Mask(scale_value \mul (real_shift + query * key), attn_mask), -1), keep_prob) \\
|
|
10061
|
+
\mul value \\
|
|
10062
|
+
\end{array}
|
|
10063
|
+
|
|
11421
10064
|
.. warning::
|
|
11422
10065
|
This is an experimental API that is subject to change or deletion.
|
|
11423
10066
|
B -- Batch size
|
|
11424
|
-
S1 -- Sequence length of query
|
|
11425
|
-
S2 -- Sequence length of key and value
|
|
10067
|
+
S1 -- Sequence length of query. The value ranges from 1 to 32768 and is a multiple of 16.
|
|
10068
|
+
S2 -- Sequence length of key and value. The value ranges from 1 to 32768 and is a multiple of 16.
|
|
11426
10069
|
N1 -- Num heads of query
|
|
11427
10070
|
N2 -- Num heads of key and value, and N2 must be a factor of N1
|
|
11428
|
-
D --
|
|
10071
|
+
D -- Head size. Support value: 64, 80, 96, 120, 128 and 256.
|
|
11429
10072
|
H1 -- Hidden size of query, which equals to N1 * D
|
|
11430
10073
|
H2 -- Hidden size of key and value, which equals to N2 * D
|
|
11431
10074
|
Args:
|
|
11432
|
-
head_num (int): The head num of query.
|
|
10075
|
+
head_num (int): The head num of query. Default: 1.
|
|
11433
10076
|
keep_prob (float): The keep probability of dropout. Default: 1.0.
|
|
11434
|
-
scale_value (float): The scale
|
|
11435
|
-
pre_tokens (int):
|
|
11436
|
-
|
|
11437
|
-
|
|
11438
|
-
|
|
11439
|
-
|
|
10077
|
+
scale_value (float): The scale factor of score. Default: 1.0.
|
|
10078
|
+
pre_tokens (int): Parameter for sparse computation, represents how many tokens are counted forward.
|
|
10079
|
+
When sparse_mode is set to 1, 2, 3, or 5, this parameter does not take effect. Default: 2147483647.
|
|
10080
|
+
next_tokens (int): Parameter for sparse computation, represents how many tokens are counted backward.
|
|
10081
|
+
When sparse_mode is set to 1, 2, 3, or 5, this parameter does not take effect. Default: 2147483647.
|
|
10082
|
+
inner_precise (int): The parameter is reserved and not implemented yet. Default: 0.
|
|
10083
|
+
input_layout (str): Specifies the layout of input `query`, key and value. The value can be "BSH" or "BNSD".
|
|
11440
10084
|
Default: "BSH".
|
|
11441
|
-
sparse_mode (int): Default 0.
|
|
11442
|
-
|
|
11443
|
-
|
|
11444
|
-
|
|
10085
|
+
sparse_mode (int): Indicates sparse mode. Default 0.
|
|
10086
|
+
|
|
10087
|
+
- 0: Indicates the defaultMask mode. If attn_mask is not passed, the mask operation is not performed,
|
|
10088
|
+
and preTokens and nextTokens(internally assigned as INT_MAX) are ignored. If passed in, the full attn_mask
|
|
10089
|
+
matrix (S1 * S2) needs to be passed in, indicating that the part between preTokens and nextTokens needs to
|
|
10090
|
+
be calculated.
|
|
10091
|
+
- 1: Represents allMask, that is, passing in the complete attn_mask matrix.
|
|
10092
|
+
- 2: Representing the leftUpCausal mode corresponds to the lower triangle scenario divided by the left
|
|
10093
|
+
vertex, and the optimized attn_mask matrix (2048*2048) is required.
|
|
10094
|
+
- 3: Representing the rightDownCausal model corresponds to the lower triangle scene divided by the lower
|
|
10095
|
+
right vertex, and the optimized attn_mask matrix (2048*2048) is required.
|
|
10096
|
+
- 4: Represents the band scenario, that is, the part between counting preTokens and nextTokens, and the
|
|
10097
|
+
optimized attn_mask matrix (2048*2048) is required..
|
|
10098
|
+
- 5: Represents the prefix scenario, that is, on the basis of rightDownCasual, a matrix with length S1 and
|
|
10099
|
+
width N is added to the left side. The value of N is obtained by the new input prefix, and the N value of
|
|
10100
|
+
each Batch axis is different. Not implemented yet.
|
|
10101
|
+
- 6: Represents the global scenario, not implemented yet.
|
|
10102
|
+
- 7: Represents the dilated scenario, not implemented yet.
|
|
10103
|
+
- 8: Represents the block_local scenario, not implemented yet.
|
|
10104
|
+
|
|
10105
|
+
Inputs:
|
|
10106
|
+
- **query** (Tensor[float16, bfloat16]) - The query tensor.
|
|
11445
10107
|
Input tensor of shape :math:`(B, S1, H1)` or `(B, N1, S1, D)`.
|
|
11446
|
-
- **key** (Tensor[float16,
|
|
10108
|
+
- **key** (Tensor[float16, bfloat16]) - The key tensor.
|
|
11447
10109
|
Input tensor of shape :math:`(B, S2, H2)` or `(B, N2, S2, D)`.
|
|
11448
|
-
- **value** (Tensor[float16,
|
|
10110
|
+
- **value** (Tensor[float16, bfloat16]) - The value tensor.
|
|
11449
10111
|
Input tensor of shape :math:`(B, S2, H2)` or `(B, N2, S2, D)`.
|
|
11450
|
-
- **real_shift** (Tensor[float16,
|
|
11451
|
-
|
|
11452
|
-
|
|
10112
|
+
- **real_shift** (Union[Tensor[float16, bfloat16], None]) - The position embedding code. If S is greater than
|
|
10113
|
+
1024 and the mask of the lower triangle is used, enter only the inverse 1024 lines of the lower triangle for
|
|
10114
|
+
memory optimization.
|
|
10115
|
+
Input tensor of shape :math: `(B, N1, S1, S2)`, `(1, N1, S1, S2)`, `(B, N1, 1024, S2)`, `(1, N1, 1024, S2)`
|
|
10116
|
+
or (1024, 1024).
|
|
10117
|
+
- **drop_mask** (Union[Tensor[uint8], None]) - The dropout mask tensor.
|
|
11453
10118
|
Input tensor of shape :math:`(B, N1, S1, S2 // 8) or None`.
|
|
11454
|
-
- **padding_mask** (None) -
|
|
11455
|
-
- **attn_mask** (Tensor[uint8], None) - The attention mask tensor.
|
|
11456
|
-
|
|
11457
|
-
|
|
11458
|
-
- **prefix** (Tensor[int64], None) -
|
|
10119
|
+
- **padding_mask** (None) - Reserved parameter. Not implemented yet.
|
|
10120
|
+
- **attn_mask** (Union[Tensor[uint8], None]) - The attention mask tensor. For each element, 0 indicates
|
|
10121
|
+
retention and 1 indicates discard. Input tensor of shape :math:`(B, N1, S1, S2)`, `(B, 1, S1, S2)`, `(S1, S2)`
|
|
10122
|
+
or (2048, 2048).
|
|
10123
|
+
- **prefix** (Union[Tensor[int64], None]) - N value of each Batch in the prefix sparse calculation scenario.
|
|
11459
10124
|
Input tensor of shape :math:`(B,)`.
|
|
11460
10125
|
|
|
11461
10126
|
Outputs:
|
|
11462
10127
|
- **softmax_max** (Tensor[float32]) - (B, N1, S1, 8)
|
|
11463
10128
|
- **softmax_sum** (Tensor[float32]) - (B, N1, S1, 8)
|
|
11464
|
-
- **softmax_out** (Tensor[
|
|
11465
|
-
- **attention_out** (Tensor[float16,
|
|
10129
|
+
- **softmax_out** (Tensor[float16, bfloat16]) - Useless output, ignore it. Output tensor of shape : `()`
|
|
10130
|
+
- **attention_out** (Tensor[float16, bfloat16]) - The output of attention, its shape, and data type
|
|
11466
10131
|
are the same as the query.
|
|
11467
10132
|
|
|
11468
10133
|
Supported Platforms:
|
|
11469
|
-
``
|
|
10134
|
+
``Ascend910B``
|
|
11470
10135
|
"""
|
|
11471
10136
|
|
|
11472
10137
|
@prim_attr_register
|
|
11473
|
-
def __init__(self, head_num, keep_prob=1.0, scale_value=1.0, pre_tokens=
|
|
11474
|
-
input_layout="BSH", sparse_mode=0):
|
|
10138
|
+
def __init__(self, head_num=1, keep_prob=1.0, scale_value=1.0, pre_tokens=2147483647, next_tokens=2147483647,
|
|
10139
|
+
inner_precise=0, input_layout="BSH", sparse_mode=0):
|
|
11475
10140
|
"""Initialize FlashAttentionScore"""
|
|
11476
10141
|
validator.check_value_type('head_num', head_num, [int], self.name)
|
|
11477
10142
|
validator.check_value_type('keep_prob', keep_prob, [int, float], self.name)
|
|
@@ -11482,11 +10147,15 @@ class FlashAttentionScore(Primitive):
|
|
|
11482
10147
|
validator.check_value_type('next_tokens', next_tokens, [int], self.name)
|
|
11483
10148
|
validator.check_value_type('inner_precise', inner_precise, [int], self.name)
|
|
11484
10149
|
validator.check_value_type('sparse_mode', sparse_mode, [int], self.name)
|
|
10150
|
+
valid_sparse_mode = [0, 1, 2, 3, 4]
|
|
10151
|
+
if sparse_mode not in valid_sparse_mode:
|
|
10152
|
+
raise ValueError(f"Attribute 'sparse_mode' must be one of {valid_sparse_mode}, but got {sparse_mode}")
|
|
11485
10153
|
if inner_precise not in [0]:
|
|
11486
10154
|
raise ValueError(f"Attribute 'inner_precise' must be 0, but got {inner_precise}")
|
|
11487
10155
|
validator.check_value_type('input_layout', input_layout, [str], self.name)
|
|
11488
|
-
|
|
11489
|
-
|
|
10156
|
+
support_layout = ["BSH", "BNSD"]
|
|
10157
|
+
if input_layout not in support_layout:
|
|
10158
|
+
raise ValueError(f"Attribute 'input_layout' must be one of {support_layout}, but got {input_layout}")
|
|
11490
10159
|
self.init_prim_io_names(
|
|
11491
10160
|
inputs=['query', 'key', 'value', 'real_shift', 'drop_mask', 'padding_mask', 'attn_mask', 'prefix'],
|
|
11492
10161
|
outputs=['softmax_max', 'softmax_sum', 'softmax_out', 'attention_out'])
|
|
@@ -11529,55 +10198,12 @@ class RmsNorm(Primitive):
|
|
|
11529
10198
|
self.init_prim_io_names(inputs=['x', 'gamma'], outputs=["y", "rstd"])
|
|
11530
10199
|
|
|
11531
10200
|
|
|
11532
|
-
class
|
|
11533
|
-
r"""
|
|
11534
|
-
.. warning::
|
|
11535
|
-
This is an experimental API that is subject to change or deletion.
|
|
11536
|
-
"""
|
|
11537
|
-
@prim_attr_register
|
|
11538
|
-
def __init__(self, head_num, scale_value=1.0, kv_head_num=0):
|
|
11539
|
-
"""Initialize PagedAttention"""
|
|
11540
|
-
validator.check_value_type('head_num', head_num, [int], self.name)
|
|
11541
|
-
validator.check_value_type('scale_value', scale_value, [float], self.name) # scale after qkbmm
|
|
11542
|
-
validator.check_value_type('kv_head_num', kv_head_num, [int], self.name) # for MQA
|
|
11543
|
-
self.init_prim_io_names(
|
|
11544
|
-
inputs=['query', 'key_cache', 'value_cache', 'block_tables', 'context_lens'],
|
|
11545
|
-
outputs=['attention_out'])
|
|
11546
|
-
|
|
11547
|
-
|
|
11548
|
-
class PagedAttentionMask(Primitive):
|
|
10201
|
+
class MatmulQkv(Primitive):
|
|
11549
10202
|
r"""
|
|
11550
|
-
|
|
11551
|
-
This is an experimental API that is subject to change or deletion.
|
|
11552
|
-
"""
|
|
11553
|
-
@prim_attr_register
|
|
11554
|
-
def __init__(self, head_num, scale_value=1.0, kv_head_num=0):
|
|
11555
|
-
"""Initialize PagedAttentionMask"""
|
|
11556
|
-
validator.check_value_type('head_num', head_num, [int], self.name)
|
|
11557
|
-
validator.check_value_type('scale_value', scale_value, [float], self.name) # scale after qkbmm
|
|
11558
|
-
validator.check_value_type('kv_head_num', kv_head_num, [int], self.name) # for MQA
|
|
11559
|
-
self.init_prim_io_names(
|
|
11560
|
-
inputs=['query', 'key_cache', 'value_cache', 'block_tables', 'context_lens', 'alibi_mask'],
|
|
11561
|
-
outputs=['attention_out'])
|
|
11562
|
-
|
|
11563
|
-
|
|
11564
|
-
class ReshapeAndCache(Primitive):
|
|
11565
|
-
r"""
|
|
11566
|
-
.. warning::
|
|
11567
|
-
This is an experimental API that is subject to change or deletion.
|
|
10203
|
+
Fuse three matmul ops for q k v attention into one
|
|
11568
10204
|
"""
|
|
11569
|
-
__mindspore_signature__ = (
|
|
11570
|
-
sig.make_sig('key', dtype=sig.sig_dtype.T),
|
|
11571
|
-
sig.make_sig('value', dtype=sig.sig_dtype.T),
|
|
11572
|
-
sig.make_sig('key_cache', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
|
|
11573
|
-
sig.make_sig('value_cache', sig.sig_rw.RW_WRITE, dtype=sig.sig_dtype.T),
|
|
11574
|
-
sig.make_sig('slot_mapping', dtype=sig.sig_dtype.T1),
|
|
11575
|
-
)
|
|
11576
|
-
|
|
11577
10205
|
@prim_attr_register
|
|
11578
10206
|
def __init__(self):
|
|
11579
|
-
"""Initialize
|
|
11580
|
-
self.init_prim_io_names(
|
|
11581
|
-
|
|
11582
|
-
outputs=['key_out'])
|
|
11583
|
-
self.add_prim_attr('side_effect_mem', True)
|
|
10207
|
+
"""Initialize"""
|
|
10208
|
+
self.init_prim_io_names(inputs=['hidden_states', 'weight_q', 'weight_k', 'weight_v'],
|
|
10209
|
+
outputs=["output_q", "output_k", "output_v"])
|