mindspore 2.2.11__cp39-cp39-win_amd64.whl → 2.3.0__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +7 -5
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +76 -18
- mindspore/_extends/builtin_operations.py +2 -1
- mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
- mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
- mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
- mindspore/_extends/parse/__init__.py +18 -14
- mindspore/_extends/parse/compile_config.py +258 -0
- mindspore/_extends/parse/namespace.py +2 -2
- mindspore/_extends/parse/parser.py +174 -62
- mindspore/_extends/parse/resources.py +45 -14
- mindspore/_extends/parse/standard_method.py +142 -240
- mindspore/{ops/_op_impl/tbe/atomic_addr_clean.py → _extends/pijit/__init__.py} +6 -16
- mindspore/_extends/pijit/pijit_func_white_list.py +343 -0
- mindspore/_extends/remote/kernel_build_server.py +2 -0
- mindspore/_profiler.py +30 -0
- mindspore/amp.py +51 -24
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/base.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/boost/grad_freeze.py +2 -2
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/boost/less_batch_normalization.py +9 -6
- mindspore/common/__init__.py +15 -4
- mindspore/common/_jit_fallback_utils.py +2 -3
- mindspore/common/_register_for_adapter.py +7 -0
- mindspore/common/_register_for_recompute.py +48 -0
- mindspore/common/_register_for_tensor.py +8 -9
- mindspore/common/_stub_tensor.py +7 -1
- mindspore/common/_utils.py +5 -17
- mindspore/common/api.py +411 -106
- mindspore/common/auto_dynamic_shape.py +27 -14
- mindspore/common/dtype.py +17 -10
- mindspore/common/dump.py +6 -8
- mindspore/common/file_system.py +48 -0
- mindspore/common/generator.py +260 -0
- mindspore/common/hook_handle.py +51 -4
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +34 -14
- mindspore/common/lazy_inline.py +72 -19
- mindspore/common/mindir_util.py +12 -2
- mindspore/common/mutable.py +79 -14
- mindspore/common/no_inline.py +54 -0
- mindspore/common/np_dtype.py +25 -0
- mindspore/common/parameter.py +30 -11
- mindspore/common/recompute.py +262 -0
- mindspore/common/seed.py +9 -9
- mindspore/common/sparse_tensor.py +272 -24
- mindspore/common/symbol.py +122 -0
- mindspore/common/tensor.py +468 -496
- mindspore/communication/__init__.py +6 -11
- mindspore/communication/_comm_helper.py +5 -0
- mindspore/communication/comm_func.py +1140 -0
- mindspore/communication/management.py +118 -102
- mindspore/config/op_info.config +22 -54
- mindspore/context.py +378 -65
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +6 -6
- mindspore/dataset/audio/transforms.py +711 -158
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/engine/cache_client.py +2 -2
- mindspore/dataset/engine/datasets.py +163 -83
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +33 -3
- mindspore/dataset/engine/datasets_text.py +38 -38
- mindspore/dataset/engine/datasets_user_defined.py +78 -59
- mindspore/dataset/engine/datasets_vision.py +77 -73
- mindspore/dataset/engine/offload.py +5 -7
- mindspore/dataset/engine/queue.py +56 -38
- mindspore/dataset/engine/validators.py +11 -5
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +408 -121
- mindspore/dataset/text/utils.py +9 -9
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/transforms.py +261 -76
- mindspore/dataset/utils/browse_dataset.py +9 -9
- mindspore/dataset/vision/__init__.py +8 -8
- mindspore/dataset/vision/c_transforms.py +10 -10
- mindspore/dataset/vision/py_transforms_util.py +3 -3
- mindspore/dataset/vision/transforms.py +2844 -549
- mindspore/dataset/vision/utils.py +161 -10
- mindspore/dataset/vision/validators.py +14 -2
- mindspore/dnnl.dll +0 -0
- mindspore/experimental/optim/__init__.py +12 -2
- mindspore/experimental/optim/adadelta.py +161 -0
- mindspore/experimental/optim/adagrad.py +168 -0
- mindspore/experimental/optim/adam.py +35 -34
- mindspore/experimental/optim/adamax.py +170 -0
- mindspore/experimental/optim/adamw.py +40 -16
- mindspore/experimental/optim/asgd.py +153 -0
- mindspore/experimental/optim/lr_scheduler.py +71 -127
- mindspore/experimental/optim/nadam.py +157 -0
- mindspore/experimental/optim/optimizer.py +15 -8
- mindspore/experimental/optim/radam.py +194 -0
- mindspore/experimental/optim/rmsprop.py +154 -0
- mindspore/experimental/optim/rprop.py +164 -0
- mindspore/experimental/optim/sgd.py +28 -19
- mindspore/hal/__init__.py +40 -0
- mindspore/hal/_ascend.py +57 -0
- mindspore/hal/_base.py +57 -0
- mindspore/hal/_cpu.py +56 -0
- mindspore/hal/_gpu.py +57 -0
- mindspore/hal/device.py +356 -0
- mindspore/hal/event.py +179 -0
- mindspore/hal/memory.py +326 -0
- mindspore/hal/stream.py +339 -0
- mindspore/include/api/data_type.h +2 -2
- mindspore/include/api/dual_abi_helper.h +16 -3
- mindspore/include/api/model.h +4 -3
- mindspore/include/api/status.h +14 -0
- mindspore/include/c_api/model_c.h +173 -0
- mindspore/include/c_api/ms/base/types.h +1 -0
- mindspore/include/c_api/types_c.h +19 -0
- mindspore/include/dataset/execute.h +1 -3
- mindspore/include/dataset/vision.h +54 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +2 -2
- mindspore/mindrecord/__init__.py +5 -1
- mindspore/mindrecord/config.py +809 -0
- mindspore/mindrecord/filereader.py +25 -0
- mindspore/mindrecord/filewriter.py +76 -58
- mindspore/mindrecord/mindpage.py +40 -6
- mindspore/mindrecord/shardutils.py +3 -2
- mindspore/mindrecord/shardwriter.py +7 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +53 -66
- mindspore/mindrecord/tools/cifar10_to_mr.py +48 -63
- mindspore/mindrecord/tools/csv_to_mr.py +7 -17
- mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +11 -21
- mindspore/mindrecord/tools/tfrecord_to_mr.py +2 -10
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/mint/__init__.py +1137 -0
- mindspore/{rewrite/ast_transformers → mint/linalg}/__init__.py +9 -4
- mindspore/mint/nn/__init__.py +512 -0
- mindspore/mint/nn/functional.py +573 -0
- mindspore/mint/optim/__init__.py +24 -0
- mindspore/mint/optim/adamw.py +185 -0
- mindspore/multiprocessing/__init__.py +72 -0
- mindspore/nn/__init__.py +1 -0
- mindspore/nn/cell.py +213 -257
- mindspore/nn/dynamic_lr.py +2 -2
- mindspore/nn/extend/__init__.py +29 -0
- mindspore/nn/extend/basic.py +140 -0
- mindspore/nn/extend/embedding.py +143 -0
- mindspore/{rewrite/ast_creator_register.py → nn/extend/layer/__init__.py} +9 -19
- mindspore/nn/extend/layer/normalization.py +109 -0
- mindspore/nn/extend/pooling.py +117 -0
- mindspore/nn/layer/activation.py +84 -94
- mindspore/nn/layer/basic.py +177 -82
- mindspore/nn/layer/channel_shuffle.py +3 -16
- mindspore/nn/layer/container.py +3 -3
- mindspore/nn/layer/conv.py +75 -66
- mindspore/nn/layer/embedding.py +103 -45
- mindspore/nn/layer/embedding_service.py +531 -0
- mindspore/nn/layer/embedding_service_layer.py +393 -0
- mindspore/nn/layer/image.py +4 -7
- mindspore/nn/layer/math.py +1 -1
- mindspore/nn/layer/normalization.py +52 -66
- mindspore/nn/layer/padding.py +30 -39
- mindspore/nn/layer/pooling.py +18 -9
- mindspore/nn/layer/rnn_cells.py +6 -16
- mindspore/nn/layer/rnns.py +6 -5
- mindspore/nn/layer/thor_layer.py +1 -2
- mindspore/nn/layer/timedistributed.py +1 -1
- mindspore/nn/layer/transformer.py +52 -50
- mindspore/nn/learning_rate_schedule.py +6 -5
- mindspore/nn/loss/loss.py +63 -84
- mindspore/nn/optim/ada_grad.py +6 -4
- mindspore/nn/optim/adadelta.py +3 -1
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +102 -181
- mindspore/nn/optim/adamax.py +4 -2
- mindspore/nn/optim/adasum.py +3 -3
- mindspore/nn/optim/asgd.py +4 -2
- mindspore/nn/optim/ftrl.py +31 -61
- mindspore/nn/optim/lamb.py +5 -3
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +6 -4
- mindspore/nn/optim/momentum.py +13 -25
- mindspore/nn/optim/optimizer.py +6 -3
- mindspore/nn/optim/proximal_ada_grad.py +4 -2
- mindspore/nn/optim/rmsprop.py +9 -3
- mindspore/nn/optim/rprop.py +4 -2
- mindspore/nn/optim/sgd.py +7 -4
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
- mindspore/nn/probability/distribution/beta.py +2 -2
- mindspore/nn/probability/distribution/categorical.py +4 -6
- mindspore/nn/probability/distribution/cauchy.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -2
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/logistic.py +1 -1
- mindspore/nn/probability/distribution/poisson.py +2 -2
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +13 -1
- mindspore/nn/wrap/__init__.py +2 -1
- mindspore/nn/wrap/cell_wrapper.py +58 -13
- mindspore/nn/wrap/grad_reducer.py +148 -8
- mindspore/nn/wrap/loss_scale.py +32 -9
- mindspore/numpy/__init__.py +2 -0
- mindspore/numpy/array_creations.py +2 -0
- mindspore/numpy/array_ops.py +6 -6
- mindspore/numpy/dtypes.py +3 -3
- mindspore/numpy/fft.py +431 -0
- mindspore/numpy/math_ops.py +61 -67
- mindspore/numpy/utils.py +3 -0
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +8 -4
- mindspore/ops/_grad_experimental/grad_array_ops.py +4 -160
- mindspore/ops/_grad_experimental/grad_comm_ops.py +93 -36
- mindspore/ops/_grad_experimental/grad_inner_ops.py +8 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +92 -287
- mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
- mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
- mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
- mindspore/ops/_op_impl/__init__.py +0 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +1 -0
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
- mindspore/ops/_op_impl/{cpu/concat.py → aicpu/generate_eod_mask.py} +16 -17
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
- mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -3
- mindspore/ops/_op_impl/cpu/adam.py +2 -2
- mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
- mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
- mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
- mindspore/ops/_vmap/vmap_array_ops.py +164 -101
- mindspore/ops/_vmap/vmap_base.py +8 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +143 -58
- mindspore/ops/_vmap/vmap_image_ops.py +70 -13
- mindspore/ops/_vmap/vmap_math_ops.py +130 -58
- mindspore/ops/_vmap/vmap_nn_ops.py +249 -115
- mindspore/ops/_vmap/vmap_other_ops.py +1 -1
- mindspore/ops/auto_generate/__init__.py +31 -0
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +231 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +250 -0
- mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
- mindspore/ops/auto_generate/gen_extend_func.py +980 -0
- mindspore/ops/auto_generate/gen_ops_def.py +6443 -0
- mindspore/ops/auto_generate/gen_ops_prim.py +13167 -0
- mindspore/ops/auto_generate/pyboost_inner_prim.py +429 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +121 -23
- mindspore/ops/composite/math_ops.py +10 -49
- mindspore/ops/composite/multitype_ops/_compile_utils.py +191 -618
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +25 -134
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
- mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
- mindspore/ops/deprecated.py +14 -3
- mindspore/ops/extend/__init__.py +53 -0
- mindspore/ops/extend/array_func.py +218 -0
- mindspore/ops/extend/math_func.py +76 -0
- mindspore/ops/extend/nn_func.py +308 -0
- mindspore/ops/function/__init__.py +31 -11
- mindspore/ops/function/array_func.py +848 -1736
- mindspore/ops/function/clip_func.py +19 -31
- mindspore/ops/function/debug_func.py +2 -5
- mindspore/ops/function/fft_func.py +31 -0
- mindspore/ops/function/grad/grad_func.py +27 -20
- mindspore/ops/function/image_func.py +27 -21
- mindspore/ops/function/linalg_func.py +30 -53
- mindspore/ops/function/math_func.py +916 -2791
- mindspore/ops/function/nn_func.py +1445 -889
- mindspore/ops/function/other_func.py +6 -7
- mindspore/ops/function/parameter_func.py +6 -92
- mindspore/ops/function/random_func.py +254 -108
- mindspore/ops/function/reshard_func.py +102 -0
- mindspore/ops/function/sparse_func.py +4 -4
- mindspore/ops/function/sparse_unary_func.py +11 -18
- mindspore/ops/function/spectral_func.py +1 -1
- mindspore/ops/function/vmap_func.py +15 -14
- mindspore/ops/functional.py +342 -343
- mindspore/ops/op_info_register.py +16 -43
- mindspore/ops/operations/__init__.py +32 -23
- mindspore/ops/operations/_embedding_cache_ops.py +1 -1
- mindspore/ops/operations/_grad_ops.py +21 -853
- mindspore/ops/operations/_infer_ops.py +19 -0
- mindspore/ops/operations/_inner_ops.py +155 -511
- mindspore/ops/operations/_quant_ops.py +4 -4
- mindspore/ops/operations/_rl_inner_ops.py +3 -3
- mindspore/ops/operations/_scalar_ops.py +5 -480
- mindspore/ops/operations/_sequence_ops.py +6 -36
- mindspore/ops/operations/_tensor_array.py +8 -8
- mindspore/ops/operations/array_ops.py +112 -2698
- mindspore/ops/operations/comm_ops.py +801 -118
- mindspore/ops/operations/custom_ops.py +62 -121
- mindspore/ops/operations/debug_ops.py +105 -36
- mindspore/ops/operations/image_ops.py +3 -219
- mindspore/ops/operations/inner_ops.py +54 -40
- mindspore/ops/operations/linalg_ops.py +1 -49
- mindspore/ops/operations/manually_defined/__init__.py +24 -0
- mindspore/ops/operations/manually_defined/_inner.py +61 -0
- mindspore/ops/operations/manually_defined/ops_def.py +2016 -0
- mindspore/ops/operations/math_ops.py +621 -4654
- mindspore/ops/operations/nn_ops.py +316 -2226
- mindspore/ops/operations/other_ops.py +53 -45
- mindspore/ops/operations/random_ops.py +4 -51
- mindspore/ops/operations/reshard_ops.py +53 -0
- mindspore/ops/operations/sparse_ops.py +8 -8
- mindspore/ops/primitive.py +204 -103
- mindspore/ops/silent_check.py +162 -0
- mindspore/ops_generate/__init__.py +27 -0
- mindspore/ops_generate/arg_dtype_cast.py +250 -0
- mindspore/ops_generate/arg_handler.py +197 -0
- mindspore/ops_generate/gen_aclnn_implement.py +263 -0
- mindspore/ops_generate/gen_ops.py +1084 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
- mindspore/ops_generate/gen_pyboost_func.py +968 -0
- mindspore/ops_generate/gen_utils.py +209 -0
- mindspore/ops_generate/op_proto.py +138 -0
- mindspore/ops_generate/pyboost_utils.py +354 -0
- mindspore/ops_generate/template.py +239 -0
- mindspore/parallel/__init__.py +7 -4
- mindspore/parallel/_auto_parallel_context.py +155 -6
- mindspore/parallel/_cell_wrapper.py +16 -9
- mindspore/parallel/_cost_model_context.py +1 -1
- mindspore/parallel/_dp_allreduce_fusion.py +159 -159
- mindspore/parallel/_parallel_serialization.py +62 -14
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +18 -9
- mindspore/parallel/_transformer/__init__.py +1 -1
- mindspore/parallel/_transformer/layers.py +1 -1
- mindspore/parallel/_transformer/loss.py +1 -1
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/op_parallel_config.py +1 -1
- mindspore/parallel/_transformer/transformer.py +10 -10
- mindspore/parallel/_utils.py +161 -6
- mindspore/parallel/algo_parameter_config.py +6 -8
- mindspore/parallel/checkpoint_transform.py +369 -64
- mindspore/parallel/cluster/__init__.py +15 -0
- mindspore/parallel/cluster/process_entity/__init__.py +18 -0
- mindspore/parallel/cluster/process_entity/_api.py +344 -0
- mindspore/parallel/cluster/process_entity/_utils.py +126 -0
- mindspore/parallel/cluster/run.py +136 -0
- mindspore/parallel/mpi/__init__.py +1 -1
- mindspore/parallel/mpi/_mpi_config.py +1 -1
- mindspore/parallel/parameter_broadcast.py +152 -0
- mindspore/parallel/shard.py +128 -17
- mindspore/profiler/__init__.py +3 -2
- mindspore/profiler/common/process_pool.py +41 -0
- mindspore/profiler/common/singleton.py +28 -0
- mindspore/profiler/common/util.py +125 -0
- mindspore/profiler/envprofiling.py +2 -2
- mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +53 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +159 -0
- mindspore/profiler/parser/ascend_analysis/function_event.py +161 -0
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +131 -0
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +85 -0
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +57 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +116 -0
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +68 -0
- mindspore/profiler/parser/ascend_cluster_generator.py +116 -0
- mindspore/profiler/parser/ascend_communicate_generator.py +314 -0
- mindspore/profiler/parser/ascend_flops_generator.py +27 -5
- mindspore/profiler/parser/ascend_fpbp_generator.py +8 -2
- mindspore/profiler/parser/ascend_hccl_generator.py +31 -280
- mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
- mindspore/profiler/parser/ascend_memory_generator.py +185 -0
- mindspore/profiler/parser/ascend_msprof_exporter.py +151 -126
- mindspore/profiler/parser/ascend_msprof_generator.py +75 -274
- mindspore/profiler/parser/ascend_op_generator.py +94 -36
- mindspore/profiler/parser/ascend_timeline_generator.py +297 -131
- mindspore/profiler/parser/base_timeline_generator.py +17 -3
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +2 -1
- mindspore/profiler/parser/framework_parser.py +11 -4
- mindspore/profiler/parser/integrator.py +3 -1
- mindspore/profiler/parser/memory_usage_parser.py +8 -2
- mindspore/profiler/parser/minddata_analyzer.py +8 -2
- mindspore/profiler/parser/minddata_parser.py +73 -4
- mindspore/profiler/parser/msadvisor_analyzer.py +5 -3
- mindspore/profiler/parser/msadvisor_parser.py +10 -4
- mindspore/profiler/parser/profiler_info.py +16 -1
- mindspore/profiler/profiling.py +522 -195
- mindspore/rewrite/__init__.py +2 -13
- mindspore/rewrite/api/node.py +123 -37
- mindspore/rewrite/api/pattern_engine.py +2 -3
- mindspore/rewrite/api/scoped_value.py +16 -15
- mindspore/rewrite/api/symbol_tree.py +46 -30
- mindspore/rewrite/ast_helpers/__init__.py +3 -6
- mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
- mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
- mindspore/rewrite/common/__init__.py +1 -2
- mindspore/rewrite/common/config.py +24 -0
- mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
- mindspore/rewrite/{namer.py → common/namer.py} +63 -18
- mindspore/rewrite/common/namespace.py +118 -0
- mindspore/rewrite/node/__init__.py +5 -5
- mindspore/rewrite/node/call_function.py +23 -7
- mindspore/rewrite/node/cell_container.py +7 -3
- mindspore/rewrite/node/control_flow.py +53 -28
- mindspore/rewrite/node/node.py +212 -196
- mindspore/rewrite/node/node_manager.py +51 -22
- mindspore/rewrite/node/node_topological_manager.py +3 -23
- mindspore/rewrite/parsers/__init__.py +12 -0
- mindspore/rewrite/parsers/arguments_parser.py +8 -9
- mindspore/rewrite/parsers/assign_parser.py +637 -413
- mindspore/rewrite/parsers/attribute_parser.py +3 -4
- mindspore/rewrite/parsers/class_def_parser.py +115 -148
- mindspore/rewrite/parsers/constant_parser.py +5 -5
- mindspore/rewrite/parsers/container_parser.py +4 -6
- mindspore/rewrite/parsers/expr_parser.py +55 -0
- mindspore/rewrite/parsers/for_parser.py +31 -98
- mindspore/rewrite/parsers/function_def_parser.py +13 -5
- mindspore/rewrite/parsers/if_parser.py +28 -10
- mindspore/rewrite/parsers/module_parser.py +8 -182
- mindspore/rewrite/parsers/parser.py +1 -5
- mindspore/rewrite/parsers/parser_register.py +1 -1
- mindspore/rewrite/parsers/return_parser.py +5 -10
- mindspore/rewrite/parsers/while_parser.py +59 -0
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree/__init__.py +20 -0
- mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
- mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
- mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
- mindspore/run_check/_check_version.py +6 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +9 -19
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +6 -5
- mindspore/train/_utils.py +178 -4
- mindspore/train/amp.py +167 -245
- mindspore/train/anf_ir_pb2.py +14 -2
- mindspore/train/callback/__init__.py +5 -2
- mindspore/train/callback/_backup_and_restore.py +5 -5
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +151 -37
- mindspore/train/callback/_cluster_monitor.py +201 -0
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_flops_collector.py +238 -0
- mindspore/train/callback/_landscape.py +16 -11
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_mindio_ttp.py +443 -0
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +13 -14
- mindspore/train/callback/_time_monitor.py +3 -3
- mindspore/train/data_sink.py +6 -5
- mindspore/train/dataset_helper.py +66 -21
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/accuracy.py +7 -7
- mindspore/train/metrics/confusion_matrix.py +8 -6
- mindspore/train/metrics/cosine_similarity.py +6 -4
- mindspore/train/metrics/error.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/perplexity.py +2 -1
- mindspore/train/metrics/topk.py +2 -2
- mindspore/train/mind_ir_pb2.py +89 -15
- mindspore/train/model.py +298 -56
- mindspore/train/serialization.py +501 -221
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/_writer_pool.py +1 -1
- mindspore/train/summary/summary_record.py +56 -34
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/turbojpeg.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/METADATA +3 -3
- mindspore-2.3.0.dist-info/RECORD +1400 -0
- {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/entry_points.txt +1 -0
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
- mindspore/gen_ops.py +0 -273
- mindspore/nn/layer/flash_attention.py +0 -189
- mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
- mindspore/ops/_op_impl/tbe/__init__.py +0 -47
- mindspore/ops/_op_impl/tbe/abs.py +0 -38
- mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/acos.py +0 -37
- mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/acosh.py +0 -37
- mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
- mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
- mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
- mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
- mindspore/ops/_op_impl/tbe/add.py +0 -42
- mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/add_n.py +0 -39
- mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
- mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
- mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
- mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
- mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
- mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
- mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
- mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/asin.py +0 -37
- mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/asinh.py +0 -37
- mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/assign.py +0 -79
- mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
- mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
- mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/atan.py +0 -37
- mindspore/ops/_op_impl/tbe/atan2.py +0 -38
- mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/atanh.py +0 -37
- mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
- mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
- mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
- mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
- mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
- mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
- mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
- mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
- mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
- mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
- mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cast.py +0 -55
- mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/cdist.py +0 -38
- mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/ceil.py +0 -37
- mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/celu.py +0 -39
- mindspore/ops/_op_impl/tbe/centralization.py +0 -39
- mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
- mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/concat.py +0 -40
- mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
- mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
- mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
- mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
- mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
- mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/cos.py +0 -37
- mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/cosh.py +0 -37
- mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
- mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cummin.py +0 -41
- mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
- mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
- mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
- mindspore/ops/_op_impl/tbe/diag.py +0 -38
- mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
- mindspore/ops/_op_impl/tbe/dilation.py +0 -40
- mindspore/ops/_op_impl/tbe/div.py +0 -41
- mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
- mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
- mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
- mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
- mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
- mindspore/ops/_op_impl/tbe/elu.py +0 -38
- mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/equal.py +0 -42
- mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/erf.py +0 -37
- mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfc.py +0 -37
- mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
- mindspore/ops/_op_impl/tbe/exp.py +0 -40
- mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
- mindspore/ops/_op_impl/tbe/expm1.py +0 -37
- mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
- mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/fill.py +0 -56
- mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/flatten.py +0 -48
- mindspore/ops/_op_impl/tbe/floor.py +0 -37
- mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
- mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
- mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
- mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
- mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
- mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
- mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
- mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/ger.py +0 -43
- mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/greater.py +0 -43
- mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
- mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
- mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
- mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
- mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
- mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
- mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
- mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/im2col.py +0 -42
- mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
- mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
- mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/inv.py +0 -38
- mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/invert.py +0 -37
- mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/iou.py +0 -38
- mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/is_close.py +0 -40
- mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
- mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
- mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
- mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
- mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
- mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
- mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/lerp.py +0 -38
- mindspore/ops/_op_impl/tbe/less.py +0 -41
- mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/log.py +0 -40
- mindspore/ops/_op_impl/tbe/log1p.py +0 -37
- mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
- mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
- mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
- mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
- mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/matmul.py +0 -53
- mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
- mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
- mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
- mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum.py +0 -39
- mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
- mindspore/ops/_op_impl/tbe/minimum.py +0 -40
- mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mish.py +0 -37
- mindspore/ops/_op_impl/tbe/mod.py +0 -41
- mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/mul.py +0 -37
- mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
- mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
- mindspore/ops/_op_impl/tbe/neg.py +0 -39
- mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
- mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
- mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
- mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
- mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
- mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/pack.py +0 -58
- mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
- mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
- mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/pdist.py +0 -36
- mindspore/ops/_op_impl/tbe/pooling.py +0 -46
- mindspore/ops/_op_impl/tbe/population_count.py +0 -38
- mindspore/ops/_op_impl/tbe/pow.py +0 -41
- mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/prelu.py +0 -37
- mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/range.py +0 -39
- mindspore/ops/_op_impl/tbe/real_div.py +0 -38
- mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
- mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
- mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
- mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
- mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6.py +0 -38
- mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/renorm.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
- mindspore/ops/_op_impl/tbe/rint.py +0 -37
- mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roll.py +0 -42
- mindspore/ops/_op_impl/tbe/round.py +0 -38
- mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
- mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
- mindspore/ops/_op_impl/tbe/select.py +0 -38
- mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/selu.py +0 -39
- mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sgd.py +0 -62
- mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sign.py +0 -38
- mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/sin.py +0 -37
- mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sinh.py +0 -37
- mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/slice.py +0 -58
- mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
- mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax.py +0 -37
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
- mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/softplus.py +0 -37
- mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softsign.py +0 -37
- mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sort.py +0 -38
- mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/split_d.py +0 -38
- mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/split_v.py +0 -39
- mindspore/ops/_op_impl/tbe/splitv.py +0 -39
- mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/square.py +0 -38
- mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
- mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
- mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
- mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
- mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
- mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
- mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
- mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
- mindspore/ops/_op_impl/tbe/sub.py +0 -39
- mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tan.py +0 -38
- mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh.py +0 -37
- mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
- mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
- mindspore/ops/_op_impl/tbe/tile.py +0 -37
- mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
- mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
- mindspore/ops/_op_impl/tbe/transpose.py +0 -60
- mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
- mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
- mindspore/ops/_op_impl/tbe/trunc.py +0 -39
- mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/unpack.py +0 -38
- mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
- mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
- mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
- mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
- mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
- mindspore/ops/_tracefunc.py +0 -241
- mindspore/ops/arg_dtype_cast.py +0 -54
- mindspore/rewrite/api/tree_node_helper.py +0 -60
- mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
- mindspore/rewrite/namespace.py +0 -53
- mindspore-2.2.11.dist-info/RECORD +0 -1920
- {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/WHEEL +0 -0
- {mindspore-2.2.11.dist-info → mindspore-2.3.0.dist-info}/top_level.txt +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2020-
|
|
1
|
+
# Copyright 2020-2024 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -14,6 +14,7 @@
|
|
|
14
14
|
# ============================================================================
|
|
15
15
|
|
|
16
16
|
"""Operators for gradients."""
|
|
17
|
+
# pylint: disable=unused-import
|
|
17
18
|
from __future__ import absolute_import
|
|
18
19
|
|
|
19
20
|
from __future__ import division
|
|
@@ -27,6 +28,15 @@ from mindspore import _checkparam as validator
|
|
|
27
28
|
from mindspore.common import dtype as mstype
|
|
28
29
|
from mindspore.communication.management import GlobalComm
|
|
29
30
|
from mindspore.common._utils import is_shape_unknown, is_dim_unknown
|
|
31
|
+
from ..auto_generate import (AbsGrad, ACosGrad, LogitGrad, AcoshGrad, AsinGrad, AsinhGrad, ReciprocalGrad, RsqrtGrad,
|
|
32
|
+
SqrtGrad, BatchNormGrad, BatchNormGradGrad, BiasAddGrad, GeLUGrad, FastGeLUGrad,
|
|
33
|
+
AvgPoolGrad, MinimumGrad, LogSoftmaxGrad, PReLUGrad, ReluGrad, ReLU6Grad, EluGrad,
|
|
34
|
+
GatherDGradV2, ResizeBilinearGrad, ResizeLinear1DGrad, ResizeNearestNeighborV2Grad,
|
|
35
|
+
SigmoidGrad, HSwishGrad, NLLLossGrad, AtanGrad, GridSampler3DGrad, GridSampler2DGrad,
|
|
36
|
+
ResizeBicubicGrad, HSigmoidGrad, CholeskyGrad, ResizeNearestNeighborGrad, LayerNormGrad,
|
|
37
|
+
HShrinkGrad, LayerNormGradGrad, SiLUGrad, MaximumGrad, MaximumGradGrad, RmsNormGrad,
|
|
38
|
+
FlashAttentionScoreGrad, UpsampleTrilinear3DGrad, UpsampleNearest3DGrad,
|
|
39
|
+
BinaryCrossEntropyGrad)
|
|
30
40
|
|
|
31
41
|
|
|
32
42
|
class SparseFillEmptyRowsGrad(Primitive):
|
|
@@ -39,92 +49,6 @@ class SparseFillEmptyRowsGrad(Primitive):
|
|
|
39
49
|
outputs=['y_values', 'y_default_value'])
|
|
40
50
|
|
|
41
51
|
|
|
42
|
-
class AbsGrad(PrimitiveWithInfer):
|
|
43
|
-
"""Computes gradients for abs operation."""
|
|
44
|
-
|
|
45
|
-
@prim_attr_register
|
|
46
|
-
def __init__(self):
|
|
47
|
-
"""Initialize AbsGrad"""
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
class ACosGrad(Primitive):
|
|
51
|
-
"""
|
|
52
|
-
Computes ACosGrad of input element-wise.
|
|
53
|
-
|
|
54
|
-
Returns:
|
|
55
|
-
Tensor, has the same type as input.
|
|
56
|
-
"""
|
|
57
|
-
|
|
58
|
-
@prim_attr_register
|
|
59
|
-
def __init__(self):
|
|
60
|
-
"""Initialize ACosGrad"""
|
|
61
|
-
self.init_prim_io_names(inputs=['y', 'dy'], outputs=['z'])
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
class LogitGrad(Primitive):
|
|
65
|
-
"""
|
|
66
|
-
Computes LogitGrad of input element-wise.
|
|
67
|
-
|
|
68
|
-
Returns:
|
|
69
|
-
Tensor, has the same type as input.
|
|
70
|
-
"""
|
|
71
|
-
@prim_attr_register
|
|
72
|
-
def __init__(self, eps=-1.0):
|
|
73
|
-
"""Initialize Exp"""
|
|
74
|
-
self.init_prim_io_names(inputs=['grad', 'input'], outputs=['dx'])
|
|
75
|
-
validator.check_value_type("eps", eps, [float], self.name)
|
|
76
|
-
self.add_prim_attr('eps', eps)
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
class AcoshGrad(Primitive):
|
|
80
|
-
"""Performs grad of Acosh operation."""
|
|
81
|
-
|
|
82
|
-
@prim_attr_register
|
|
83
|
-
def __init__(self):
|
|
84
|
-
"""Initialize AcoshGrad"""
|
|
85
|
-
self.init_prim_io_names(inputs=['y', 'dy'], outputs=['z'])
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
class AsinGrad(Primitive):
|
|
89
|
-
"""
|
|
90
|
-
Computes AsinGrad of input element-wise.
|
|
91
|
-
|
|
92
|
-
Returns:
|
|
93
|
-
Tensor, has the same type as input.
|
|
94
|
-
"""
|
|
95
|
-
|
|
96
|
-
@prim_attr_register
|
|
97
|
-
def __init__(self):
|
|
98
|
-
"""Initialize AsinGrad"""
|
|
99
|
-
self.init_prim_io_names(inputs=['y', 'dy'], outputs=['z'])
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
class AsinhGrad(Primitive):
|
|
103
|
-
"""Performs grad of Asinh operation."""
|
|
104
|
-
|
|
105
|
-
@prim_attr_register
|
|
106
|
-
def __init__(self):
|
|
107
|
-
"""Initialize AsinhGrad"""
|
|
108
|
-
self.init_prim_io_names(inputs=['y', 'dy'], outputs=['z'])
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
class ReciprocalGrad(Primitive):
|
|
112
|
-
"""Performs grad of Reciprocal operation."""
|
|
113
|
-
|
|
114
|
-
@prim_attr_register
|
|
115
|
-
def __init__(self):
|
|
116
|
-
"""Initialize ReciprocalGrad"""
|
|
117
|
-
self.init_prim_io_names(inputs=['y', 'dy'], outputs=['z'])
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
class RsqrtGrad(Primitive):
|
|
121
|
-
"""Performs grad of Rsqrt operation."""
|
|
122
|
-
|
|
123
|
-
@prim_attr_register
|
|
124
|
-
def __init__(self):
|
|
125
|
-
"""Initialize RsqrtGrad"""
|
|
126
|
-
|
|
127
|
-
|
|
128
52
|
class ScaleAndTranslateGrad(Primitive):
|
|
129
53
|
"""Performs grad of ScaleAndTranslate operation."""
|
|
130
54
|
|
|
@@ -137,39 +61,15 @@ class ScaleAndTranslateGrad(Primitive):
|
|
|
137
61
|
validator.check_value_type("antialias", antialias, [bool], self.name)
|
|
138
62
|
|
|
139
63
|
|
|
140
|
-
class SoftmaxGrad(
|
|
64
|
+
class SoftmaxGrad(Primitive):
|
|
141
65
|
"""Performs grad of Softmax operation."""
|
|
142
66
|
|
|
143
|
-
|
|
144
|
-
class SqrtGrad(Primitive):
|
|
145
|
-
"""Performs grad of Sqrt operation."""
|
|
146
|
-
|
|
147
67
|
@prim_attr_register
|
|
148
68
|
def __init__(self):
|
|
149
|
-
"""Initialize
|
|
69
|
+
"""Initialize SoftmaxGrad"""
|
|
150
70
|
self.init_prim_io_names(inputs=['y', 'dy'], outputs=['z'])
|
|
151
71
|
|
|
152
72
|
|
|
153
|
-
class BatchNormGrad(Primitive):
|
|
154
|
-
"""Performs grad of BatchNorm operation."""
|
|
155
|
-
|
|
156
|
-
@prim_attr_register
|
|
157
|
-
def __init__(self, is_training=False, epsilon=1e-5, data_format='NCHW'):
|
|
158
|
-
self.is_training = validator.check_value_type('is_training', is_training, (bool,), self.name)
|
|
159
|
-
self.epsilon = validator.check_float_range(epsilon, 0, 1, validator.INC_RIGHT, 'epsilon', self.name)
|
|
160
|
-
self.data_format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.name)
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
class BatchNormGradGrad(Primitive):
|
|
164
|
-
"""Performs grad of BatchNormGrad operation."""
|
|
165
|
-
|
|
166
|
-
@prim_attr_register
|
|
167
|
-
def __init__(self, is_training=False, epsilon=1e-5, data_format='NCHW'):
|
|
168
|
-
self.is_training = validator.check_value_type('is_training', is_training, (bool,), self.name)
|
|
169
|
-
self.epsilon = validator.check_float_range(epsilon, 0, 1, validator.INC_RIGHT, 'epsilon', self.name)
|
|
170
|
-
self.data_format = validator.check_string(data_format, ['NCHW', 'NHWC'], 'format', self.name)
|
|
171
|
-
|
|
172
|
-
|
|
173
73
|
class SyncBatchNormGrad(Primitive):
|
|
174
74
|
"""Performs grad of SyncBatchNorm operation."""
|
|
175
75
|
|
|
@@ -181,18 +81,6 @@ class SyncBatchNormGrad(Primitive):
|
|
|
181
81
|
validator.check_int(device_num, 2, validator.GE, "device_num", self.name)
|
|
182
82
|
|
|
183
83
|
|
|
184
|
-
class BiasAddGrad(Primitive):
|
|
185
|
-
"""Computes gradients of BiasAdd."""
|
|
186
|
-
|
|
187
|
-
@prim_attr_register
|
|
188
|
-
def __init__(self, data_format="NCHW"):
|
|
189
|
-
self.init_prim_io_names(inputs=['dout'], outputs=['output'])
|
|
190
|
-
self.format = validator.check_string(data_format, ['NCHW', 'NHWC', 'NCDHW'], 'format', self.name)
|
|
191
|
-
if self.format == "NCDHW":
|
|
192
|
-
self.format = "NCHW"
|
|
193
|
-
self.add_prim_attr('data_format', self.format)
|
|
194
|
-
|
|
195
|
-
|
|
196
84
|
class KLDivLossGrad(Primitive):
|
|
197
85
|
"""Computes gradients for `KLDivLoss` operation."""
|
|
198
86
|
|
|
@@ -210,14 +98,6 @@ class KLDivLossGrad(Primitive):
|
|
|
210
98
|
self.reduction = validator.check_string(reduction, support_mode, 'reduction', self.name)
|
|
211
99
|
|
|
212
100
|
|
|
213
|
-
class BinaryCrossEntropyGrad(Primitive):
|
|
214
|
-
"""Computes gradients for `BinaryCrossEntropy` operation."""
|
|
215
|
-
|
|
216
|
-
@prim_attr_register
|
|
217
|
-
def __init__(self, reduction='mean'):
|
|
218
|
-
self.reduction = validator.check_string(reduction, ['none', 'mean', 'sum'], 'reduction', self.name)
|
|
219
|
-
|
|
220
|
-
|
|
221
101
|
class LuUnpackGrad(Primitive):
|
|
222
102
|
"""Computes gradients for `LuUnpack` operation."""
|
|
223
103
|
|
|
@@ -713,22 +593,6 @@ class NeighborExchangeV2Grad(PrimitiveWithInfer):
|
|
|
713
593
|
'value': None}
|
|
714
594
|
|
|
715
595
|
|
|
716
|
-
class GeLUGrad(Primitive):
|
|
717
|
-
"""Gradients of GeLU operation."""
|
|
718
|
-
|
|
719
|
-
@prim_attr_register
|
|
720
|
-
def __init__(self):
|
|
721
|
-
self.init_prim_io_names(inputs=['dy', 'x', 'y'], outputs=['z'])
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
class FastGeLUGrad(Primitive):
|
|
725
|
-
"""Gradients of FastGeLU operation."""
|
|
726
|
-
|
|
727
|
-
@prim_attr_register
|
|
728
|
-
def __init__(self):
|
|
729
|
-
"""init FastGeLUGrad"""
|
|
730
|
-
|
|
731
|
-
|
|
732
596
|
class _PoolGrad(PrimitiveWithInfer):
|
|
733
597
|
"""Gradients of the max/avg pool operation."""
|
|
734
598
|
|
|
@@ -813,20 +677,6 @@ class AvgPoolGradGe(_PoolGrad):
|
|
|
813
677
|
return out
|
|
814
678
|
|
|
815
679
|
|
|
816
|
-
class AvgPoolGrad(_PoolGrad):
|
|
817
|
-
"""Gradients of the avg pool operation."""
|
|
818
|
-
|
|
819
|
-
@prim_attr_register
|
|
820
|
-
def __init__(self, kernel_size=1, strides=1, pad_mode="VALID", data_format="NCHW"):
|
|
821
|
-
super(AvgPoolGrad, self).__init__(kernel_size, strides, pad_mode, data_format)
|
|
822
|
-
|
|
823
|
-
def infer_shape(self, x1_shape, x2_shape, grad_shape):
|
|
824
|
-
return x1_shape
|
|
825
|
-
|
|
826
|
-
def infer_dtype(self, x1_dtype, x2_dtype, grad_dtype):
|
|
827
|
-
return x1_dtype
|
|
828
|
-
|
|
829
|
-
|
|
830
680
|
class AvgPoolGradV1(Primitive):
|
|
831
681
|
"""Gradients of the AvgPoolV1 operation."""
|
|
832
682
|
|
|
@@ -1192,25 +1042,6 @@ class MaxPool3DGradGrad(PrimitiveWithInfer):
|
|
|
1192
1042
|
return x_dtype
|
|
1193
1043
|
|
|
1194
1044
|
|
|
1195
|
-
class MaximumGrad(Primitive):
|
|
1196
|
-
"""Grad for maximum."""
|
|
1197
|
-
|
|
1198
|
-
@prim_attr_register
|
|
1199
|
-
def __init__(self, grad_x=True, grad_y=True):
|
|
1200
|
-
"""Initialize MaximumGrad"""
|
|
1201
|
-
self.init_prim_io_names(inputs=['x1', 'x2', 'grads'], outputs=['y1', 'y2'])
|
|
1202
|
-
|
|
1203
|
-
|
|
1204
|
-
class MaximumGradGrad(Primitive):
|
|
1205
|
-
"""Grad for maximum grad."""
|
|
1206
|
-
|
|
1207
|
-
@prim_attr_register
|
|
1208
|
-
def __init__(self, grad_x=True, grad_y=True):
|
|
1209
|
-
"""Initialize MaximumGradGrad"""
|
|
1210
|
-
super().__init__("MaximumGradGrad")
|
|
1211
|
-
self.init_prim_io_names(inputs=['x1', 'x2', 'dy1', 'dy2'], outputs=['sopd_x1', 'sopd_x2', 'sopd_grad'])
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
1045
|
class MaxPoolGradWithArgmax(Primitive):
|
|
1215
1046
|
"""Computes the gradients of MaxPoolWithArgmax."""
|
|
1216
1047
|
@prim_attr_register
|
|
@@ -1359,15 +1190,6 @@ class MaxPoolGradGradWithArgmax(_PoolGrad):
|
|
|
1359
1190
|
return grad_dtype
|
|
1360
1191
|
|
|
1361
1192
|
|
|
1362
|
-
class MinimumGrad(Primitive):
|
|
1363
|
-
"""Grad for minimum."""
|
|
1364
|
-
|
|
1365
|
-
@prim_attr_register
|
|
1366
|
-
def __init__(self, grad_x=True, grad_y=True):
|
|
1367
|
-
"""Initialize MinimumGrad"""
|
|
1368
|
-
self.init_prim_io_names(inputs=['x1', 'x2', 'grads'], outputs=['y1', 'y2'])
|
|
1369
|
-
|
|
1370
|
-
|
|
1371
1193
|
class MinimumGradGrad(Primitive):
|
|
1372
1194
|
"""Grad for minimum_grad."""
|
|
1373
1195
|
@prim_attr_register
|
|
@@ -1406,79 +1228,6 @@ class L2NormalizeGrad(Primitive):
|
|
|
1406
1228
|
raise TypeError("The length of axis must be 1, later will support multiple axis!")
|
|
1407
1229
|
|
|
1408
1230
|
|
|
1409
|
-
class LayerNormGrad(Primitive):
|
|
1410
|
-
"""
|
|
1411
|
-
Applies the layer Normalization to the input array.
|
|
1412
|
-
|
|
1413
|
-
This operator will calculate the input gradients of layernorm.
|
|
1414
|
-
|
|
1415
|
-
Args:
|
|
1416
|
-
begin_norm_axis (int): The begin axis for the input to apply layernorm. Default: 1.
|
|
1417
|
-
begin_params_axis (int): The begin axis for the parameter input to apply layernorm. Default: 1.
|
|
1418
|
-
|
|
1419
|
-
Returns:
|
|
1420
|
-
tuple[int], tuple of 3 values (the gradients of layernorm input, gamma, beta).
|
|
1421
|
-
"""
|
|
1422
|
-
|
|
1423
|
-
@prim_attr_register
|
|
1424
|
-
def __init__(self, begin_norm_axis=1, begin_params_axis=1):
|
|
1425
|
-
"""init"""
|
|
1426
|
-
self.begin_norm_axis = validator.check_value_type('begin_norm_axis', begin_norm_axis, [int], self.name)
|
|
1427
|
-
self.begin_params_axis = validator.check_value_type('begin_params_axis', begin_params_axis, [int], self.name)
|
|
1428
|
-
|
|
1429
|
-
|
|
1430
|
-
class LayerNormGradGrad(Primitive):
|
|
1431
|
-
"""
|
|
1432
|
-
Gets the gradient of LayerNormGrad operation.
|
|
1433
|
-
|
|
1434
|
-
Args:
|
|
1435
|
-
begin_norm_axis (int): The begin axis for the input to apply layernorm. Default: 1.
|
|
1436
|
-
begin_params_axis (int): The begin axis for the parameter input to apply layernorm. Default: 1.
|
|
1437
|
-
|
|
1438
|
-
Inputs:
|
|
1439
|
-
- **x** (Tensor) - The input tensor to be normalized, float32 or float16.
|
|
1440
|
-
- **dy** (Tensor) - The gradient of LayerNorm's output y, float32 or float16.
|
|
1441
|
-
- **variance** (Tensor) - The variance of x, float32 or float16.
|
|
1442
|
-
- **mean** (Tensor) - The mean of x, float32 or float16.
|
|
1443
|
-
- **gamma** (Tensor) - The original value of weight gamma initialized in LayerNorm, float32 or float16.
|
|
1444
|
-
Default: 'ones'.
|
|
1445
|
-
- **d_dx** (Tensor) - The gradient of dx, where dx is the gradient of LayerNorm's input x, float32 or float16.
|
|
1446
|
-
- **d_dg** (Tensor) - The gradient of dg, where dg is the gradient of LayerNorm's weight gamma,
|
|
1447
|
-
float32 or float16.
|
|
1448
|
-
- **d_db** (Tensor) - The gradient of db, where db is the gradient of LayerNorm's weight beta,
|
|
1449
|
-
float32 or float16.
|
|
1450
|
-
|
|
1451
|
-
Returns:
|
|
1452
|
-
Tuple[Tensor], tuple of 3 Tensors (the gradients of layernormgrad x, dy, gamma).
|
|
1453
|
-
|
|
1454
|
-
Raises:
|
|
1455
|
-
TypeError: If the 8 inputs don't have the same dtype.
|
|
1456
|
-
ValueError: If x, dy, d_dx don't have the same shape.
|
|
1457
|
-
ValueError: If variance, mean don't have the same shape.
|
|
1458
|
-
ValueError: If gamma, d_dg, d_db don't have the same shape.
|
|
1459
|
-
|
|
1460
|
-
Supported Platforms:
|
|
1461
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
1462
|
-
"""
|
|
1463
|
-
|
|
1464
|
-
@prim_attr_register
|
|
1465
|
-
def __init__(self, begin_norm_axis=1, begin_params_axis=1):
|
|
1466
|
-
"""init"""
|
|
1467
|
-
self.begin_norm_axis = validator.check_value_type('begin_norm_axis', begin_norm_axis, [int], self.name)
|
|
1468
|
-
self.begin_params_axis = validator.check_value_type('begin_params_axis', begin_params_axis, [int], self.name)
|
|
1469
|
-
self.init_prim_io_names(inputs=['x', 'dy', 'variance', 'mean', 'gamma', 'd_dx', 'd_dg', 'd_db'],
|
|
1470
|
-
outputs=['sopd_x', 'sopd_dy', 'sopd_gamma'])
|
|
1471
|
-
|
|
1472
|
-
|
|
1473
|
-
class LogSoftmaxGrad(Primitive):
|
|
1474
|
-
"""Computes gradient for the Log Softmax activation."""
|
|
1475
|
-
|
|
1476
|
-
@prim_attr_register
|
|
1477
|
-
def __init__(self, axis=-1):
|
|
1478
|
-
"""Initialize LogSoftmaxGrad"""
|
|
1479
|
-
validator.check_value_type("axis", axis, [int], self.name)
|
|
1480
|
-
|
|
1481
|
-
|
|
1482
1231
|
class LSTMGradData(Primitive):
|
|
1483
1232
|
"""Computes the data gradients of LSTM."""
|
|
1484
1233
|
|
|
@@ -1741,27 +1490,6 @@ class DynamicGRUV2Grad(Primitive):
|
|
|
1741
1490
|
])
|
|
1742
1491
|
|
|
1743
1492
|
|
|
1744
|
-
class PReLUGrad(Primitive):
|
|
1745
|
-
r"""
|
|
1746
|
-
Gradients of PReLU operation.
|
|
1747
|
-
|
|
1748
|
-
Note:
|
|
1749
|
-
1-dimensional input_x is not supported.
|
|
1750
|
-
|
|
1751
|
-
Inputs:
|
|
1752
|
-
- **y_backprop** (Tensor) - Representing the backprop of the next layer.
|
|
1753
|
-
- **input_x** (Tensor) - Must be the input `input_x` of forward operator PRelu.
|
|
1754
|
-
- **weight** (Tensor) - Float Tensor, w > 0, must be the input `weight` of forward operator PRelu.
|
|
1755
|
-
|
|
1756
|
-
Outputs:
|
|
1757
|
-
Tensor, with the same type as `input_x`.
|
|
1758
|
-
"""
|
|
1759
|
-
|
|
1760
|
-
@prim_attr_register
|
|
1761
|
-
def __init__(self):
|
|
1762
|
-
pass
|
|
1763
|
-
|
|
1764
|
-
|
|
1765
1493
|
class RandomGammaGrad(Primitive):
|
|
1766
1494
|
r"""
|
|
1767
1495
|
Computes the derivative of a random sample of Gamma with respect to alpha.:
|
|
@@ -1800,180 +1528,6 @@ class RandomGammaGrad(Primitive):
|
|
|
1800
1528
|
self.add_prim_attr("side_effect_hidden", True)
|
|
1801
1529
|
|
|
1802
1530
|
|
|
1803
|
-
class ReluGrad(Primitive):
|
|
1804
|
-
"""Performs grad of Relu operation."""
|
|
1805
|
-
|
|
1806
|
-
@prim_attr_register
|
|
1807
|
-
def __init__(self):
|
|
1808
|
-
"""Initialize ReluGrad"""
|
|
1809
|
-
self.init_prim_io_names(inputs=['y_backprop', 'x'], outputs=['output'])
|
|
1810
|
-
|
|
1811
|
-
|
|
1812
|
-
class SiLUGrad(Primitive):
|
|
1813
|
-
"""Performs grad of SiLU operation."""
|
|
1814
|
-
|
|
1815
|
-
@prim_attr_register
|
|
1816
|
-
def __init__(self):
|
|
1817
|
-
"""Initialize SiLUGrad"""
|
|
1818
|
-
self.init_prim_io_names(inputs=['dout', 'out'], outputs=['output'])
|
|
1819
|
-
|
|
1820
|
-
|
|
1821
|
-
class ReLU6Grad(Primitive):
|
|
1822
|
-
"""Performs grad of ReLU6 operation."""
|
|
1823
|
-
|
|
1824
|
-
@prim_attr_register
|
|
1825
|
-
def __init__(self):
|
|
1826
|
-
self.init_prim_io_names(inputs=['y_grad', 'x'], outputs=['output'])
|
|
1827
|
-
|
|
1828
|
-
|
|
1829
|
-
class ReluGradV2(Primitive):
|
|
1830
|
-
"""Performs grad of ReLUV2 operation."""
|
|
1831
|
-
|
|
1832
|
-
@prim_attr_register
|
|
1833
|
-
def __init__(self):
|
|
1834
|
-
self.init_prim_io_names(inputs=['gradients', 'mask'], outputs=['output'])
|
|
1835
|
-
|
|
1836
|
-
|
|
1837
|
-
class EluGrad(Primitive):
|
|
1838
|
-
"""Performs grad of Elu operation."""
|
|
1839
|
-
|
|
1840
|
-
@prim_attr_register
|
|
1841
|
-
def __init__(self):
|
|
1842
|
-
"""Initialize EluGrad"""
|
|
1843
|
-
self.init_prim_io_names(inputs=['y_backprop', 'x'], outputs=['output'])
|
|
1844
|
-
|
|
1845
|
-
|
|
1846
|
-
class GatherDGrad(Primitive):
|
|
1847
|
-
"""Performs grad of GatherD operation."""
|
|
1848
|
-
|
|
1849
|
-
@prim_attr_register
|
|
1850
|
-
def __init__(self, dim=0, shape=None):
|
|
1851
|
-
"""Initialize GatherDGrad"""
|
|
1852
|
-
validator.check_is_int(dim, int)
|
|
1853
|
-
self.add_prim_attr("dim", dim)
|
|
1854
|
-
self.dim = dim
|
|
1855
|
-
self.out_shape = shape
|
|
1856
|
-
self.init_prim_io_names(inputs=['index', 'grad'], outputs=['output'])
|
|
1857
|
-
|
|
1858
|
-
|
|
1859
|
-
class GatherDGradV2(Primitive):
|
|
1860
|
-
"""Performs grad of GatherD operation."""
|
|
1861
|
-
|
|
1862
|
-
@prim_attr_register
|
|
1863
|
-
def __init__(self):
|
|
1864
|
-
"""Initialize GatherDGradV2"""
|
|
1865
|
-
self.init_prim_io_names(inputs=['x', 'dim', 'index', 'grad'], outputs=['output'])
|
|
1866
|
-
|
|
1867
|
-
|
|
1868
|
-
class ResizeBilinearGrad(Primitive):
|
|
1869
|
-
"""Performs grad of ResizeBilinear operation."""
|
|
1870
|
-
|
|
1871
|
-
@prim_attr_register
|
|
1872
|
-
def __init__(self, align_corners=False, half_pixel_centers=False):
|
|
1873
|
-
"""init"""
|
|
1874
|
-
validator.check_value_type("align_corners", align_corners, [bool], self.name)
|
|
1875
|
-
validator.check_value_type("half_pixel_centers", half_pixel_centers, [bool], self.name)
|
|
1876
|
-
self.align_corners = validator.check_value_type("align_corners", align_corners, [bool], self.name)
|
|
1877
|
-
self.half_pixel_centers = validator.check_value_type("half_pixel_centers",
|
|
1878
|
-
half_pixel_centers, [bool], self.name)
|
|
1879
|
-
self.init_prim_io_names(inputs=['grads', 'original_image'], outputs=['y'])
|
|
1880
|
-
if half_pixel_centers and align_corners:
|
|
1881
|
-
raise ValueError(f"If half_pixel_centers is True, align_corners must be False, but got {align_corners}")
|
|
1882
|
-
|
|
1883
|
-
|
|
1884
|
-
class ResizeNearestNeighborGrad(Primitive):
|
|
1885
|
-
"""
|
|
1886
|
-
Compute gradient of `ResizeNearestNeighbor` operator.
|
|
1887
|
-
|
|
1888
|
-
Note:
|
|
1889
|
-
The shape of input parameter `size` must be (height, width).
|
|
1890
|
-
|
|
1891
|
-
Args:
|
|
1892
|
-
align_corners (bool): Whether the centers of the 4 corner pixels of the input
|
|
1893
|
-
and output tensors are aligned. Default: ``False``.
|
|
1894
|
-
"""
|
|
1895
|
-
|
|
1896
|
-
@prim_attr_register
|
|
1897
|
-
def __init__(self, align_corners=False):
|
|
1898
|
-
"""Initialize ResizeNearestNeighborGrad"""
|
|
1899
|
-
self.init_prim_io_names(inputs=['grads', 'size'], outputs=['y'])
|
|
1900
|
-
|
|
1901
|
-
|
|
1902
|
-
class ResizeLinear1DGrad(Primitive):
|
|
1903
|
-
"""
|
|
1904
|
-
Compute gradient of `ResizeLinear1D` operator.
|
|
1905
|
-
|
|
1906
|
-
.. warning::
|
|
1907
|
-
This is an experimental API that is subject to change.
|
|
1908
|
-
|
|
1909
|
-
Args:
|
|
1910
|
-
coordinate_transformation_mode (string): Default is 'align_corners'. Describes how to transform the coordinate
|
|
1911
|
-
in the resized tensor to the coordinate in the original tensor. Other optional: 'half_pixel'.
|
|
1912
|
-
"""
|
|
1913
|
-
|
|
1914
|
-
@prim_attr_register
|
|
1915
|
-
def __init__(self, coordinate_transformation_mode="align_corners"):
|
|
1916
|
-
"""Initialize ResizeLinear1DGrad"""
|
|
1917
|
-
self.init_prim_io_names(
|
|
1918
|
-
inputs=['grads', 'input_x'], outputs=['y'])
|
|
1919
|
-
validator.check_value_type(
|
|
1920
|
-
"coordinate_transformation_mode", coordinate_transformation_mode, [str], self.name)
|
|
1921
|
-
validator.check_string(coordinate_transformation_mode, ["align_corners", "half_pixel"],
|
|
1922
|
-
"coordinate_transformation_mode", self.name)
|
|
1923
|
-
|
|
1924
|
-
|
|
1925
|
-
class ResizeNearestNeighborV2Grad(Primitive):
|
|
1926
|
-
"""
|
|
1927
|
-
Compute gradient of `ResizeNearestNeighborV2` operator.
|
|
1928
|
-
|
|
1929
|
-
Args:
|
|
1930
|
-
align_corners (bool): Whether the centers of the 4 corner pixels of the input
|
|
1931
|
-
and output tensors are aligned. Default: ``False``.
|
|
1932
|
-
half_pixel_centers (bool): Default: ``False``.
|
|
1933
|
-
"""
|
|
1934
|
-
|
|
1935
|
-
@prim_attr_register
|
|
1936
|
-
def __init__(self, align_corners=False, half_pixel_centers=False):
|
|
1937
|
-
"""Initialize ResizeNearestNeighborV2Grad"""
|
|
1938
|
-
self.init_prim_io_names(inputs=['grads', 'size'], outputs=['y'])
|
|
1939
|
-
validator.check_value_type('align_corners', align_corners, [bool], self.name)
|
|
1940
|
-
validator.check_value_type('half_pixel_centers', half_pixel_centers, [bool], self.name)
|
|
1941
|
-
|
|
1942
|
-
|
|
1943
|
-
class UpsampleNearest3DGrad(Primitive):
|
|
1944
|
-
"""
|
|
1945
|
-
Upsample the 3-D gradient data with the nearest neighbor interpolation algorithm.
|
|
1946
|
-
|
|
1947
|
-
Note:
|
|
1948
|
-
Only one of 'scales' and 'output_size' can be specified, and it is an error if both are specified.
|
|
1949
|
-
|
|
1950
|
-
Inputs:
|
|
1951
|
-
- **dy** (Tensor) - Tensor of shape [N, C, D, H, W], Must be one of the following types:
|
|
1952
|
-
float16, float32, float64.
|
|
1953
|
-
- **input_size** (listInt): An required listInt, which contain 5 elements:
|
|
1954
|
-
[min_batch, channels, depth, height, width].
|
|
1955
|
-
Must: input_size[0] == dy_tensor_size[0], input_size[1] == dy_tensor_size[1].
|
|
1956
|
-
- **output_size** (listInt): An optional listInt. Default: ``None``.
|
|
1957
|
-
It contains 3 elements: depth, height, width, whose elements should be the same as `dy`.
|
|
1958
|
-
Must:
|
|
1959
|
-
dy_tensor_size[2] == floor(input_size[2] * scales[0]) == output_size[0],
|
|
1960
|
-
dy_tensor_size[3] == floor(input_size[3] * scales[1]) == output_size[1],
|
|
1961
|
-
dy_tensor_size[4] == floor(input_size[4] * scales[2]) == output_size[2].
|
|
1962
|
-
- **scales** (listFloat): An optional listFloat. Default: ``None``.
|
|
1963
|
-
The scale array along each dimension, contain 3 elements: scale_depth, scale_height, scale_width.
|
|
1964
|
-
The number of elements of 'scales' should be the same as the rank of `dy`.
|
|
1965
|
-
|
|
1966
|
-
Outputs:
|
|
1967
|
-
- **dx**- (Tensor) - A 5-D tensor. Has the same type as `dy`, shape depends on `input_size`.
|
|
1968
|
-
"""
|
|
1969
|
-
@prim_attr_register
|
|
1970
|
-
def __init__(self):
|
|
1971
|
-
"""Initialize UpsampleNearest3DGrad."""
|
|
1972
|
-
self.init_prim_io_names(
|
|
1973
|
-
inputs=['dy', 'input_size', 'output_size', 'scales'],
|
|
1974
|
-
outputs=['dx'])
|
|
1975
|
-
|
|
1976
|
-
|
|
1977
1531
|
class ROIAlignGrad(Primitive):
|
|
1978
1532
|
"""
|
|
1979
1533
|
ROIAlignGrad operator.
|
|
@@ -2034,15 +1588,6 @@ class PsROIPoolingGrad(PrimitiveWithInfer):
|
|
|
2034
1588
|
return ydiff_type
|
|
2035
1589
|
|
|
2036
1590
|
|
|
2037
|
-
class SigmoidGrad(Primitive):
|
|
2038
|
-
"""Gets the gradient of Sigmoid operation."""
|
|
2039
|
-
|
|
2040
|
-
@prim_attr_register
|
|
2041
|
-
def __init__(self):
|
|
2042
|
-
"""Initialize SigmoidGrad"""
|
|
2043
|
-
self.init_prim_io_names(inputs=['y', 'dy'], outputs=['output'])
|
|
2044
|
-
|
|
2045
|
-
|
|
2046
1591
|
class _ActivationGrad(PrimitiveWithInfer):
|
|
2047
1592
|
"""_ActivationGrad base class."""
|
|
2048
1593
|
|
|
@@ -2060,14 +1605,6 @@ class _ActivationGrad(PrimitiveWithInfer):
|
|
|
2060
1605
|
return x_dtype
|
|
2061
1606
|
|
|
2062
1607
|
|
|
2063
|
-
class HSwishGrad(Primitive):
|
|
2064
|
-
"""Gets the gradient of HSwish operation."""
|
|
2065
|
-
@prim_attr_register
|
|
2066
|
-
def __init__(self):
|
|
2067
|
-
"""Initialize HSwishGrad"""
|
|
2068
|
-
self.init_prim_io_names(inputs=['y_grad', 'x'], outputs=['output'])
|
|
2069
|
-
|
|
2070
|
-
|
|
2071
1608
|
class SigmoidCrossEntropyWithLogitsGrad(Primitive):
|
|
2072
1609
|
"""Computes the gradients of `SigmoidCrossEntropyWithLogits`."""
|
|
2073
1610
|
|
|
@@ -2102,19 +1639,6 @@ class SliceGrad(PrimitiveWithInfer):
|
|
|
2102
1639
|
'value': None}
|
|
2103
1640
|
|
|
2104
1641
|
|
|
2105
|
-
class NLLLossGrad(PrimitiveWithInfer):
|
|
2106
|
-
"""Computes the gradients of `NLLLoss`."""
|
|
2107
|
-
|
|
2108
|
-
@prim_attr_register
|
|
2109
|
-
def __init__(self, reduction="mean", ignore_index=-100):
|
|
2110
|
-
"""Initialize NLLLoss"""
|
|
2111
|
-
self.init_prim_io_names(inputs=['x', 'loss_grad', 'target', 'weight', 'total_weight'], outputs=['x_grad'])
|
|
2112
|
-
self.reduction = validator.check_string(reduction, ['none', 'sum', 'mean'], 'reduction', self.name)
|
|
2113
|
-
self.ignore_index = ignore_index
|
|
2114
|
-
self.add_prim_attr('reduction', self.reduction)
|
|
2115
|
-
self.add_prim_attr('ignore_index', self.ignore_index)
|
|
2116
|
-
|
|
2117
|
-
|
|
2118
1642
|
class SmoothL1LossGrad(Primitive):
|
|
2119
1643
|
"""Computes gradient for prediction on SmoothL1Loss."""
|
|
2120
1644
|
|
|
@@ -2301,19 +1825,6 @@ class RefToEmbed(Primitive):
|
|
|
2301
1825
|
pass
|
|
2302
1826
|
|
|
2303
1827
|
|
|
2304
|
-
class AtanGrad(Primitive):
|
|
2305
|
-
"""
|
|
2306
|
-
Computes AtanGrad of input element-wise.
|
|
2307
|
-
|
|
2308
|
-
Returns:
|
|
2309
|
-
Tensor, has the same type as input.
|
|
2310
|
-
"""
|
|
2311
|
-
|
|
2312
|
-
@prim_attr_register
|
|
2313
|
-
def __init__(self):
|
|
2314
|
-
"""Initialize AtanGrad"""
|
|
2315
|
-
|
|
2316
|
-
|
|
2317
1828
|
class BasicLSTMCellCStateGrad(PrimitiveWithInfer):
|
|
2318
1829
|
"""Computes the state gradients of BasicLSTMCell."""
|
|
2319
1830
|
|
|
@@ -2616,40 +2127,6 @@ class MultilabelMarginLossGrad(Primitive):
|
|
|
2616
2127
|
self.init_prim_io_names(inputs=['y_grad', 'x', 'target', 'is_target'], outputs=['x_grad'])
|
|
2617
2128
|
|
|
2618
2129
|
|
|
2619
|
-
class HShrinkGrad(Primitive):
|
|
2620
|
-
"""
|
|
2621
|
-
Computes gradients for HShrinkGrad operation.
|
|
2622
|
-
|
|
2623
|
-
Args:
|
|
2624
|
-
lambd (float): the λ value for the Hardshrink formulation. Default: 0.5
|
|
2625
|
-
|
|
2626
|
-
Inputs:
|
|
2627
|
-
- **Gradients** (Tensor) - the gradients of loss to output of HShrink function.
|
|
2628
|
-
Currently gradients data type only support float16 and float32.
|
|
2629
|
-
- **Features** (Tensor) - Must be the input `input_x` of the forward operator HSHrink.
|
|
2630
|
-
Currently features data type only support float16 and float32.
|
|
2631
|
-
|
|
2632
|
-
Outputs:
|
|
2633
|
-
backprops - Tensor, with the same shape and data type as `features`.
|
|
2634
|
-
|
|
2635
|
-
Rasise:
|
|
2636
|
-
ValueError: If `lambd` is not a float.
|
|
2637
|
-
ValueError: If shape of `gradients` is not the same as `features`.
|
|
2638
|
-
TypeError: If dtype of `gradients` is not the same as `features`.
|
|
2639
|
-
TypeError: If dtype of `gradients` or `features` is neither float16 nor float32.
|
|
2640
|
-
|
|
2641
|
-
Supported Platforms:
|
|
2642
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
2643
|
-
"""
|
|
2644
|
-
|
|
2645
|
-
@prim_attr_register
|
|
2646
|
-
def __init__(self, lambd=0.5):
|
|
2647
|
-
validator.check_value_type("lambd", lambd, [float], self.name)
|
|
2648
|
-
if lambd < 0.0:
|
|
2649
|
-
lambd = 0.0
|
|
2650
|
-
self.add_prim_attr('lambd', lambd)
|
|
2651
|
-
|
|
2652
|
-
|
|
2653
2130
|
class Dilation2DBackpropInput(Primitive):
|
|
2654
2131
|
"""
|
|
2655
2132
|
Computes the gradient of morphological 2-D dilation with respect to the input.
|
|
@@ -2962,6 +2439,12 @@ class MultiMarginLossGrad(Primitive):
|
|
|
2962
2439
|
Supported Platforms:
|
|
2963
2440
|
``Ascend`` ``CPU``
|
|
2964
2441
|
"""
|
|
2442
|
+
__mindspore_signature__ = (
|
|
2443
|
+
sig.make_sig('y_grad'),
|
|
2444
|
+
sig.make_sig('x'),
|
|
2445
|
+
sig.make_sig('target'),
|
|
2446
|
+
sig.make_sig('weight', default=None)
|
|
2447
|
+
)
|
|
2965
2448
|
|
|
2966
2449
|
@prim_attr_register
|
|
2967
2450
|
def __init__(self, p=1, margin=1.0, reduction="mean"):
|
|
@@ -2972,96 +2455,8 @@ class MultiMarginLossGrad(Primitive):
|
|
|
2972
2455
|
self.reduction = validator.check_string(reduction, ['none', 'sum', 'mean'], 'reduction', self.name)
|
|
2973
2456
|
self.init_prim_io_names(inputs=['y_grad', 'x', 'target', 'weight'], outputs=['x_grad'])
|
|
2974
2457
|
|
|
2975
|
-
|
|
2976
|
-
|
|
2977
|
-
r"""
|
|
2978
|
-
Upsample the 3-D gradient data with trilinear interpolation algorithm.
|
|
2979
|
-
|
|
2980
|
-
Note:
|
|
2981
|
-
One of 'scales' and 'output_size' must be specified. And it is an error if both are specified.
|
|
2982
|
-
|
|
2983
|
-
Args:
|
|
2984
|
-
align_corners (bool): An optional bool. Default: ``False``.
|
|
2985
|
-
|
|
2986
|
-
Inputs:
|
|
2987
|
-
- **dy** (Tensor) - Tensor of shape [N, C, D, H, W]. Must be one of the following types:
|
|
2988
|
-
float16, float32, float64.
|
|
2989
|
-
- **input_size** (Union[tuple[int], list[int]]): An required listInt which contains 5 elements:
|
|
2990
|
-
[batch, channels, depth, height, width]. Must:
|
|
2991
|
-
input_size[0] == dy_tensor_size[0]
|
|
2992
|
-
input_size[1] == dy_tensor_size[1].
|
|
2993
|
-
- **output_size** (Union[tuple[int], list[int]]): An optional listInt. Default: ``None``.
|
|
2994
|
-
It contains 3 elements: depth, height, width, whose elements should be the same as `dy`. Must:
|
|
2995
|
-
dy_tensor_size[2] == floor(input_size[2] * scales[0]) == output_size[0]
|
|
2996
|
-
dy_tensor_size[3] == floor(input_size[3] * scales[1]) == output_size[1]
|
|
2997
|
-
dy_tensor_size[4] == floor(input_size[4] * scales[2]) == output_size[2].
|
|
2998
|
-
- **scales** (Union[tuple[float], list[float]]): An optional listFloat. Default: ``None``.
|
|
2999
|
-
The scale array along each dimension, contain 3 elements: scale_depth, scale_height, scale_width.
|
|
3000
|
-
The number of elements of 'scales' should be the same as the rank of input `dy`.
|
|
3001
|
-
|
|
3002
|
-
Outputs:
|
|
3003
|
-
- **dx** (Tensor) - A Tensor with shape depending on intput_size, and its' dtype is the same as `dy`.
|
|
3004
|
-
"""
|
|
3005
|
-
@prim_attr_register
|
|
3006
|
-
def __init__(self, align_corners=False):
|
|
3007
|
-
"""Initialize UpsampleTrilinear3DGrad."""
|
|
3008
|
-
self.init_prim_io_names(
|
|
3009
|
-
inputs=['dy', 'input_size', 'output_size', 'scales'],
|
|
3010
|
-
outputs=['dx'])
|
|
3011
|
-
self.align_corners = align_corners
|
|
3012
|
-
self.add_prim_attr('align_corners', self.align_corners)
|
|
3013
|
-
|
|
3014
|
-
|
|
3015
|
-
class GridSampler3DGrad(Primitive):
|
|
3016
|
-
"""
|
|
3017
|
-
Computes gradients for GridSampler3D operation.
|
|
3018
|
-
|
|
3019
|
-
Args:
|
|
3020
|
-
interpolation_mode (str): An optional string specifying the interpolation method. The optional values are
|
|
3021
|
-
"bilinear" or "nearest". Default: "bilinear".
|
|
3022
|
-
padding_mode (str): An optional string specifying the pad method. The optional values are "zeros", "border" or
|
|
3023
|
-
"reflection". Default: "zeros".
|
|
3024
|
-
align_corners (bool): An optional bool. If "true", the centers of the corner pixels of the input and output
|
|
3025
|
-
tensors are aligned. Defaults to "false".
|
|
3026
|
-
|
|
3027
|
-
Inputs:
|
|
3028
|
-
- **grad** (Tensor) - A 5-D tensor whose dtype is float32 or float64 and whose shape is :math:`(N, C, D_{out},
|
|
3029
|
-
H_{out}, W_{out})`. The shape is inconsistent with the shape of the output result of forward calculation.
|
|
3030
|
-
- **input_x** (Tensor) - A 5-D tensor whose dtype is the same as `grad` and whose shape is :math:`(N, C,
|
|
3031
|
-
D_{in}, H_{in}, W_{in})`.
|
|
3032
|
-
- **grid** (Tensor) - A 5-D tensor whose dtype is the same as `grad` and whose shape is :math:`(N, D_{out},
|
|
3033
|
-
H_{out}, W_{out}, 3)`.
|
|
3034
|
-
|
|
3035
|
-
Outputs:
|
|
3036
|
-
- **dx** (Tensor) - A 5-D tensor whose dtype and shape are the same as `input_x`.
|
|
3037
|
-
- **dgrid** (Tensor) - A 5-D tensor whose dtype and shape are the same as `grid`.
|
|
3038
|
-
|
|
3039
|
-
Raises:
|
|
3040
|
-
TypeError: If `grad`, `input_x` or `grid` is not a Tensor.
|
|
3041
|
-
TypeError: If the dtypes of `grad`, `input_x` and `grid` are inconsistent.
|
|
3042
|
-
TypeError: If the dtype of `grad`, `input_x` or `grid` is not a valid type.
|
|
3043
|
-
TypeError: If `align_corners` is not a boolean value.
|
|
3044
|
-
ValueError: If the rank of `grad`, `input_x` or `grid` is not equal to 5.
|
|
3045
|
-
ValueError: If the first dimension of `grad`, `input_x` and `grid` are inconsistent.
|
|
3046
|
-
ValueError: If the last dimension of `grid` is not equal to 3.
|
|
3047
|
-
ValueError: If `interpolation_mode` is not "bilinear", "nearest" or a string value.
|
|
3048
|
-
ValueError: If `padding_mode` is not "zeros", "border", "reflection" or a string value.
|
|
3049
|
-
ValueError: If the shape of `grad` is inconsistent with the shape of the output result of forward calculation.
|
|
3050
|
-
|
|
3051
|
-
Supported Platforms:
|
|
3052
|
-
``GPU`` ``CPU``
|
|
3053
|
-
"""
|
|
3054
|
-
|
|
3055
|
-
@prim_attr_register
|
|
3056
|
-
def __init__(self, interpolation_mode='bilinear', padding_mode='zeros', align_corners=False):
|
|
3057
|
-
"""Initialize GridSampler3DGrad."""
|
|
3058
|
-
validator.check_string(interpolation_mode, ['bilinear', 'nearest'], 'interpolation_mode', self.name)
|
|
3059
|
-
validator.check_string(padding_mode, ['zeros', 'border', 'reflection'], 'padding_mode', self.name)
|
|
3060
|
-
validator.check_bool(align_corners, 'align_corners', self.name)
|
|
3061
|
-
self.init_prim_io_names(inputs=['grad', 'input_x', 'grid'], outputs=['dx', 'dgrid'])
|
|
3062
|
-
self.add_prim_attr('interpolation_mode', interpolation_mode)
|
|
3063
|
-
self.add_prim_attr('padding_mode', padding_mode)
|
|
3064
|
-
self.add_prim_attr('align_corners', align_corners)
|
|
2458
|
+
def __call__(self, y_grad, x, target, weight=None):
|
|
2459
|
+
return super().__call__(y_grad, x, target, weight)
|
|
3065
2460
|
|
|
3066
2461
|
|
|
3067
2462
|
class SparseSegmentMeanGrad(Primitive):
|
|
@@ -3466,136 +2861,6 @@ class SparseSegmentSqrtNGrad(Primitive):
|
|
|
3466
2861
|
self.init_prim_io_names(inputs=['x', 'indices', 'segment_ids', 'output_dim0'], outputs=['y'])
|
|
3467
2862
|
|
|
3468
2863
|
|
|
3469
|
-
class GridSampler2DGrad(Primitive):
|
|
3470
|
-
"""
|
|
3471
|
-
Computes gradients for GridSampler2D operation.
|
|
3472
|
-
|
|
3473
|
-
Args:
|
|
3474
|
-
interpolation_mode (str): An optional string specifying the interpolation method. The optional values are
|
|
3475
|
-
"bilinear" or "nearest". Default: "bilinear".
|
|
3476
|
-
padding_mode (str): An optional string specifying the pad method. The optional values are "zeros", "border" or
|
|
3477
|
-
"reflection". Default: "zeros".
|
|
3478
|
-
align_corners (bool): An optional bool. If "true", the centers of the corner pixels of the input and output
|
|
3479
|
-
tensors are aligned. Defaults to "false".
|
|
3480
|
-
|
|
3481
|
-
Inputs:
|
|
3482
|
-
- **grad** (Tensor) - A 4-D tensor whose dtype is float16 or float32 and whose shape is :math:`(N, C,
|
|
3483
|
-
H_{out}, W_{out})`. The shape is inconsistent with the shape of the output result of forward calculation.
|
|
3484
|
-
- **input_x** (Tensor) - A 4-D tensor whose dtype is the same as `grad` and whose shape is :math:`(N, C,
|
|
3485
|
-
H_{in}, W_{in})`.
|
|
3486
|
-
- **grid** (Tensor) - A 4-D tensor whose dtype is the same as `grad` and whose
|
|
3487
|
-
shape is :math:`(N, H_{out}, W_{out}, 2)`.
|
|
3488
|
-
|
|
3489
|
-
Outputs:
|
|
3490
|
-
- **dx** (Tensor) - A 4-D tensor whose dtype and shape are the same as `input_x`.
|
|
3491
|
-
- **dgrid** (Tensor) - A 4-D tensor whose dtype and shape are the same as `grid`.
|
|
3492
|
-
|
|
3493
|
-
Raises:
|
|
3494
|
-
TypeError: If `grad`, `input_x` or `grid` is not a Tensor.
|
|
3495
|
-
TypeError: If the dtypes of `grad`, `input_x` and `grid` are inconsistent.
|
|
3496
|
-
TypeError: If the dtype of `grad`, `input_x` or `grid` is not a valid type.
|
|
3497
|
-
TypeError: If `align_corners` is not a boolean value.
|
|
3498
|
-
ValueError: If the rank of `grad`, `input_x` or `grid` is not equal to 4.
|
|
3499
|
-
ValueError: If the first dimension of `grad`, `input_x` and `grid` are inconsistent.
|
|
3500
|
-
ValueError: If the last dimension of `grid` is not equal to 2.
|
|
3501
|
-
ValueError: If `interpolation_mode` is not "bilinear", "nearest" or a string value.
|
|
3502
|
-
ValueError: If `padding_mode` is not "zeros", "border", "reflection" or a string value.
|
|
3503
|
-
ValueError: If the shape of `grad` is inconsistent with the shape of the output result of forward calculation.
|
|
3504
|
-
|
|
3505
|
-
Supported Platforms:
|
|
3506
|
-
``GPU`` ``CPU``
|
|
3507
|
-
"""
|
|
3508
|
-
|
|
3509
|
-
@prim_attr_register
|
|
3510
|
-
def __init__(self, interpolation_mode='bilinear', padding_mode='zeros', align_corners=False):
|
|
3511
|
-
"""Initialize GridSampler2DGrad."""
|
|
3512
|
-
validator.check_string(interpolation_mode, ['bilinear', 'nearest'], 'interpolation_mode', self.name)
|
|
3513
|
-
validator.check_string(padding_mode, ['zeros', 'border', 'reflection'], 'padding_mode', self.name)
|
|
3514
|
-
validator.check_bool(align_corners, 'align_corners', self.name)
|
|
3515
|
-
self.init_prim_io_names(inputs=['grad', 'input_x', 'grid'], outputs=['dx', 'dgrid'])
|
|
3516
|
-
self.add_prim_attr('interpolation_mode', interpolation_mode)
|
|
3517
|
-
self.add_prim_attr('padding_mode', padding_mode)
|
|
3518
|
-
self.add_prim_attr('align_corners', align_corners)
|
|
3519
|
-
|
|
3520
|
-
|
|
3521
|
-
class ResizeBicubicGrad(Primitive):
|
|
3522
|
-
"""
|
|
3523
|
-
Computes gradients for ResizeBicubicGrad operation.
|
|
3524
|
-
|
|
3525
|
-
Args:
|
|
3526
|
-
align_corners (bool):If true, the centers of the 4 corner pixels of the input
|
|
3527
|
-
and output tensors are aligned, preserving the values at the corner pixels.Default: ``False``.
|
|
3528
|
-
half_pixel_centers (bool): An optional bool. Default: ``False``.
|
|
3529
|
-
|
|
3530
|
-
Inputs:
|
|
3531
|
-
- **grads** (Tensor) - A Tensor of type float. 4-D with shape
|
|
3532
|
-
[batch, height, width,channels]. The format must be NHWC.
|
|
3533
|
-
- **original_image** (Tensor) - A Tensor. Must be one of the following types: float,double.
|
|
3534
|
-
4-D with shape [batch, orig_height, orig_width, channels], The image tensor that was resized.
|
|
3535
|
-
The format must be NHWC.
|
|
3536
|
-
|
|
3537
|
-
Outputs:
|
|
3538
|
-
A 4-D Tensor , with the same shape and data type as `original_image`.
|
|
3539
|
-
|
|
3540
|
-
Rasise:
|
|
3541
|
-
TypeError: If `grads` is not allowed.
|
|
3542
|
-
TypeError: If `original_image` is not allowed.
|
|
3543
|
-
ValueError: If `images` dim is not 4.
|
|
3544
|
-
ValueError: If `size` dim is not 4.
|
|
3545
|
-
|
|
3546
|
-
Supported Platforms:
|
|
3547
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
3548
|
-
"""
|
|
3549
|
-
@prim_attr_register
|
|
3550
|
-
def __init__(self, align_corners=False, half_pixel_centers=False):
|
|
3551
|
-
"""Initialize CropAndResize"""
|
|
3552
|
-
validator.check_value_type('align_corners', align_corners, bool, self.name)
|
|
3553
|
-
validator.check_value_type('half_pixel_centers', half_pixel_centers, bool, self.name)
|
|
3554
|
-
self.init_prim_io_names(inputs=['grads', 'original_image'], outputs=['y'])
|
|
3555
|
-
|
|
3556
|
-
def __infer__(self, grads, original_image):
|
|
3557
|
-
# get shape
|
|
3558
|
-
grads_shape = list(grads['shape'])
|
|
3559
|
-
original_image_shape = list(original_image['shape'])
|
|
3560
|
-
# get value
|
|
3561
|
-
if grads['value'] is None:
|
|
3562
|
-
raise ValueError(
|
|
3563
|
-
f"For '{self.name}', the 'grads' cannot be None, but got {grads['value']}."
|
|
3564
|
-
)
|
|
3565
|
-
if original_image['value'] is None:
|
|
3566
|
-
raise ValueError(
|
|
3567
|
-
f"For '{self.name}', the 'original_image' cannot be None, but got {original_image['value']}."
|
|
3568
|
-
)
|
|
3569
|
-
# get dtype
|
|
3570
|
-
grads_dtype = grads['dtype']
|
|
3571
|
-
original_image_dtype = original_image['dtype']
|
|
3572
|
-
# check dytpe
|
|
3573
|
-
validator.check_tensor_dtype_valid("grads", grads_dtype,
|
|
3574
|
-
[mstype.float32], self.name)
|
|
3575
|
-
validator.check_tensor_dtype_valid("original_image", original_image_dtype,
|
|
3576
|
-
[mstype.float32, mstype.float64], self.name)
|
|
3577
|
-
# check input shape rank
|
|
3578
|
-
validator.check("grads rank", len(grads_shape), "expected", 4, validator.EQ, self.name)
|
|
3579
|
-
validator.check("original_image rank", len(original_image_shape), "expected", 4, validator.EQ, self.name)
|
|
3580
|
-
validator.check("batch_size equal", grads_shape[0], "expected",
|
|
3581
|
-
original_image_shape[0], validator.EQ, self.name)
|
|
3582
|
-
validator.check("channel equal", grads_shape[3], "expected", original_image_shape[3], validator.EQ, self.name)
|
|
3583
|
-
# check original_image_shape and grads_shape
|
|
3584
|
-
validator.check("original_image[0] and grads[0]", original_image_shape[0],
|
|
3585
|
-
"expected", grads_shape[0], validator.EQ, self.name)
|
|
3586
|
-
validator.check("original_image[3] and grads[3]", original_image_shape[3],
|
|
3587
|
-
"expected", grads_shape[3], validator.EQ, self.name)
|
|
3588
|
-
|
|
3589
|
-
batch_size = grads_shape[0]
|
|
3590
|
-
height = original_image_shape[1]
|
|
3591
|
-
width = original_image_shape[2]
|
|
3592
|
-
channel = grads_shape[3]
|
|
3593
|
-
out_shape = (batch_size, height, width, channel)
|
|
3594
|
-
return {'shape': out_shape,
|
|
3595
|
-
'dtype': original_image_dtype,
|
|
3596
|
-
'value': None}
|
|
3597
|
-
|
|
3598
|
-
|
|
3599
2864
|
class SparseSliceGrad(Primitive):
|
|
3600
2865
|
r"""
|
|
3601
2866
|
Computes gradients for SparseSlice operation.
|
|
@@ -3717,13 +2982,6 @@ class AffineGridGrad(Primitive):
|
|
|
3717
2982
|
self.init_prim_io_names(inputs=['y_grad', 'x_size'], outputs=['x_grad'])
|
|
3718
2983
|
|
|
3719
2984
|
|
|
3720
|
-
class HSigmoidGrad(Primitive):
|
|
3721
|
-
"""Gets the gradient of HSigmoid operation."""
|
|
3722
|
-
@prim_attr_register
|
|
3723
|
-
def __init__(self):
|
|
3724
|
-
"""Initialize HSigmoidGrad"""
|
|
3725
|
-
self.init_prim_io_names(inputs=['grads', 'input_x'], outputs=['output'])
|
|
3726
|
-
|
|
3727
2985
|
|
|
3728
2986
|
class GluGrad(Primitive):
|
|
3729
2987
|
"""
|
|
@@ -3737,46 +2995,6 @@ class GluGrad(Primitive):
|
|
|
3737
2995
|
validator.check_value_type("axis", axis, [int], self.name)
|
|
3738
2996
|
|
|
3739
2997
|
|
|
3740
|
-
class CholeskyGrad(Primitive):
|
|
3741
|
-
r"""
|
|
3742
|
-
Computes the reverse mode backpropgated gradient of the Cholesky algorithm.
|
|
3743
|
-
|
|
3744
|
-
Inputs:
|
|
3745
|
-
- **x** (Tensor) - A tensor with float32 or float64 data type.
|
|
3746
|
-
- **grad** (Tensor) - A tensor with float32 or float64 data type. `x` should have
|
|
3747
|
-
the same dtype with `a`.
|
|
3748
|
-
|
|
3749
|
-
Outputs:
|
|
3750
|
-
Tensor, has the same dtype as `a` and `x`.
|
|
3751
|
-
|
|
3752
|
-
Raises:
|
|
3753
|
-
TypeError: If x is not Tensor.
|
|
3754
|
-
TypeError: If grad is not Tensor.
|
|
3755
|
-
TypeError: If dtype of input x and grad is not float64 nor float32,
|
|
3756
|
-
TypeError: If x has different dtype with grad.
|
|
3757
|
-
ValueError: If input tensor's last two dims are not equal,
|
|
3758
|
-
ValueError: If the shape of x and grad mismatch.
|
|
3759
|
-
|
|
3760
|
-
Supported Platforms:
|
|
3761
|
-
``Ascend``
|
|
3762
|
-
|
|
3763
|
-
Examples:
|
|
3764
|
-
>>> x = Tensor(np.array([[4, 2],[2, 3]]), mstype.float64)
|
|
3765
|
-
>>> grad = Tensor(np.array([[4, 2],[2, 3]]), mstype.float64)
|
|
3766
|
-
>>> choleskygrad = G.CholeskyGrad()
|
|
3767
|
-
>>> output = choleskygrad(x, grad)
|
|
3768
|
-
>>> print (output)
|
|
3769
|
-
[[0.5 0. ]
|
|
3770
|
-
[0. 0.5]]
|
|
3771
|
-
|
|
3772
|
-
"""
|
|
3773
|
-
|
|
3774
|
-
@prim_attr_register
|
|
3775
|
-
def __init__(self):
|
|
3776
|
-
"""Initialize CholeskyGrad"""
|
|
3777
|
-
self.init_prim_io_names(inputs=['x', 'grad'], outputs=['y'])
|
|
3778
|
-
|
|
3779
|
-
|
|
3780
2998
|
class MapTensorGetGrad(Primitive):
|
|
3781
2999
|
"""
|
|
3782
3000
|
Computes gradients for MapTensorGet operation.
|
|
@@ -3832,53 +3050,3 @@ class WKVGrad(Primitive):
|
|
|
3832
3050
|
"""Initialize WKVGrad."""
|
|
3833
3051
|
self.init_prim_io_names(inputs=["time_first", "time_decay", "key", "value", "gy"],
|
|
3834
3052
|
outputs=["gw", "gu", "gk", "gv"])
|
|
3835
|
-
|
|
3836
|
-
|
|
3837
|
-
class FlashAttentionScoreGrad(Primitive):
|
|
3838
|
-
r"""
|
|
3839
|
-
Calculates the gradient of FlashAttentionScore operation.
|
|
3840
|
-
.. warning::
|
|
3841
|
-
This is an experimental API that is subject to change or deletion.
|
|
3842
|
-
|
|
3843
|
-
Supported Platforms:
|
|
3844
|
-
``Ascend``
|
|
3845
|
-
"""
|
|
3846
|
-
@prim_attr_register
|
|
3847
|
-
def __init__(self, head_num, keep_prob=1.0, scale_value=1.0, pre_tokens=65536, next_tokens=65536, inner_precise=1,
|
|
3848
|
-
input_layout='BSH', sparse_mode=0):
|
|
3849
|
-
"""Initialize FlashAttentionScoreGrad."""
|
|
3850
|
-
validator.check_value_type('head_num', head_num, [int], self.name)
|
|
3851
|
-
validator.check_value_type('keep_prob', keep_prob, [int, float], self.name)
|
|
3852
|
-
validator.check_float(keep_prob, 0.0, validator.GE, "keep_prob", self.name)
|
|
3853
|
-
validator.check_float(keep_prob, 1.0, validator.LE, "keep_prob", self.name)
|
|
3854
|
-
validator.check_value_type('scale_value', scale_value, [float], self.name)
|
|
3855
|
-
validator.check_value_type('pre_tokens', pre_tokens, [int], self.name)
|
|
3856
|
-
validator.check_value_type('next_tokens', next_tokens, [int], self.name)
|
|
3857
|
-
validator.check_value_type('inner_precise', inner_precise, [int], self.name)
|
|
3858
|
-
validator.check_value_type('sparse_mode', sparse_mode, [int], self.name)
|
|
3859
|
-
if inner_precise not in [0, 1]:
|
|
3860
|
-
raise ValueError(f"Attribute 'inner_precise' must be either 0 or 1, but got {inner_precise}")
|
|
3861
|
-
validator.check_value_type('input_layout', input_layout, [str], self.name)
|
|
3862
|
-
if input_layout not in ["BSH", "BNSD"]:
|
|
3863
|
-
raise ValueError(f"Attribute 'input_layout' must be either 'BSH' or 'BNSD', but got {input_layout}")
|
|
3864
|
-
self.init_prim_io_names(inputs=['query', 'key', 'value', 'dy', 'pse_shift', 'drop_mask', "padding_mask",
|
|
3865
|
-
'attn_mask', 'softmax_max', 'softmax_sum', 'softmax_out', 'attention_in',
|
|
3866
|
-
'prefix'],
|
|
3867
|
-
outputs=['dq', 'dk', 'dv', 'dpse'])
|
|
3868
|
-
|
|
3869
|
-
|
|
3870
|
-
class RmsNormGrad(Primitive):
|
|
3871
|
-
r"""
|
|
3872
|
-
Calculates the gradient of RmsNorm operation.
|
|
3873
|
-
.. warning::
|
|
3874
|
-
This is an experimental API that is subject to change or deletion.
|
|
3875
|
-
|
|
3876
|
-
Supported Platforms:
|
|
3877
|
-
``Ascend``
|
|
3878
|
-
"""
|
|
3879
|
-
|
|
3880
|
-
@prim_attr_register
|
|
3881
|
-
def __init__(self):
|
|
3882
|
-
"""Initialize RmsNormGrad."""
|
|
3883
|
-
self.init_prim_io_names(inputs=["dy", "x", "rstd", "gamma"],
|
|
3884
|
-
outputs=["dx", "dgamma"])
|