mindspore 2.2.11__cp37-cp37m-manylinux1_x86_64.whl → 2.3.0rc1__cp37-cp37m-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +5 -4
- mindspore/_akg/akg/composite/build_module.py +155 -11
- mindspore/_akg/akg/config/repository.json +38 -0
- mindspore/_akg/akg/ms/info_version_adapt.py +29 -0
- mindspore/_akg/akg/topi/cpp/impl.py +1 -1
- mindspore/_akg/akg/tvm/_ffi/base.py +1 -1
- mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -1
- mindspore/_akg/akg/utils/ascend_profilier/path_manager.py +2 -1
- mindspore/_akg/akg/utils/composite_op_helper.py +4 -2
- mindspore/_akg/akg/utils/dump_ascend_meta.py +2 -2
- mindspore/_akg/akg/utils/gen_random.py +14 -8
- mindspore/_akg/akg/utils/op_dsl.py +11 -0
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +5 -5
- mindspore/_c_dataengine.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +58 -0
- mindspore/_extends/builtin_operations.py +2 -1
- mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
- mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
- mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
- mindspore/_extends/parse/__init__.py +18 -14
- mindspore/_extends/parse/compile_config.py +229 -0
- mindspore/_extends/parse/parser.py +155 -59
- mindspore/_extends/parse/resources.py +40 -7
- mindspore/_extends/parse/standard_method.py +124 -204
- mindspore/_extends/remote/kernel_build_server.py +2 -0
- mindspore/_mindspore_offline_debug.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/_profiler.py +30 -0
- mindspore/amp.py +24 -18
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +3 -1
- mindspore/common/_jit_fallback_utils.py +2 -3
- mindspore/common/_register_for_adapter.py +7 -0
- mindspore/common/_stub_tensor.py +6 -1
- mindspore/common/_utils.py +5 -17
- mindspore/common/api.py +91 -48
- mindspore/common/auto_dynamic_shape.py +27 -14
- mindspore/common/dtype.py +5 -4
- mindspore/common/dump.py +5 -4
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +20 -11
- mindspore/common/lazy_inline.py +58 -17
- mindspore/common/mindir_util.py +12 -2
- mindspore/common/mutable.py +79 -14
- mindspore/common/parameter.py +19 -4
- mindspore/common/seed.py +9 -9
- mindspore/common/sparse_tensor.py +251 -18
- mindspore/common/symbol.py +122 -0
- mindspore/common/tensor.py +321 -435
- mindspore/communication/__init__.py +3 -3
- mindspore/communication/_comm_helper.py +5 -0
- mindspore/communication/management.py +56 -38
- mindspore/config/op_info.config +22 -54
- mindspore/context.py +192 -54
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +6 -6
- mindspore/dataset/audio/transforms.py +711 -158
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/engine/cache_client.py +2 -2
- mindspore/dataset/engine/datasets.py +95 -38
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +33 -3
- mindspore/dataset/engine/datasets_text.py +38 -38
- mindspore/dataset/engine/datasets_user_defined.py +7 -7
- mindspore/dataset/engine/datasets_vision.py +75 -71
- mindspore/dataset/engine/offload.py +5 -7
- mindspore/dataset/engine/validators.py +1 -1
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +408 -121
- mindspore/dataset/text/utils.py +9 -9
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/transforms.py +261 -76
- mindspore/dataset/utils/browse_dataset.py +9 -9
- mindspore/dataset/vision/__init__.py +3 -3
- mindspore/dataset/vision/c_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +2 -2
- mindspore/dataset/vision/transforms.py +2264 -514
- mindspore/dataset/vision/utils.py +40 -9
- mindspore/dataset/vision/validators.py +7 -1
- mindspore/experimental/optim/__init__.py +12 -2
- mindspore/experimental/optim/adadelta.py +161 -0
- mindspore/experimental/optim/adagrad.py +168 -0
- mindspore/experimental/optim/adam.py +35 -34
- mindspore/experimental/optim/adamax.py +170 -0
- mindspore/experimental/optim/adamw.py +40 -16
- mindspore/experimental/optim/asgd.py +153 -0
- mindspore/experimental/optim/lr_scheduler.py +65 -125
- mindspore/experimental/optim/nadam.py +157 -0
- mindspore/experimental/optim/optimizer.py +15 -8
- mindspore/experimental/optim/radam.py +194 -0
- mindspore/experimental/optim/rmsprop.py +154 -0
- mindspore/experimental/optim/rprop.py +164 -0
- mindspore/experimental/optim/sgd.py +28 -19
- mindspore/hal/__init__.py +34 -0
- mindspore/hal/_ascend.py +57 -0
- mindspore/hal/_base.py +57 -0
- mindspore/hal/_cpu.py +56 -0
- mindspore/hal/_gpu.py +57 -0
- mindspore/hal/device.py +356 -0
- mindspore/hal/event.py +179 -0
- mindspore/hal/stream.py +337 -0
- mindspore/include/api/data_type.h +2 -2
- mindspore/include/api/dual_abi_helper.h +16 -3
- mindspore/include/api/model.h +1 -3
- mindspore/include/api/status.h +14 -0
- mindspore/include/c_api/model_c.h +173 -0
- mindspore/include/c_api/ms/base/types.h +1 -0
- mindspore/include/c_api/types_c.h +19 -0
- mindspore/include/dataset/execute.h +1 -3
- mindspore/include/mindapi/base/format.h +125 -23
- mindspore/include/mindapi/base/types.h +7 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +2044 -154
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +2044 -33
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/build_tbe_kernel.py +529 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/compiler.py +56 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/custom.py +1109 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/get_file_path.py +36 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/tbe_topi.py +556 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6365 -1759
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_add_custom.h +49 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +59 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +59 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +52 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +232 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +232 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.cpp +192 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.cpp +274 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +39 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/cpu/libakg.so +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
- mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
- mindspore/mindrecord/__init__.py +5 -1
- mindspore/mindrecord/config.py +809 -0
- mindspore/mindrecord/filereader.py +25 -0
- mindspore/mindrecord/filewriter.py +74 -56
- mindspore/mindrecord/mindpage.py +40 -6
- mindspore/mindrecord/shardutils.py +3 -2
- mindspore/mindrecord/shardwriter.py +7 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +53 -66
- mindspore/mindrecord/tools/cifar10_to_mr.py +48 -63
- mindspore/mindrecord/tools/csv_to_mr.py +7 -17
- mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +11 -21
- mindspore/mindrecord/tools/tfrecord_to_mr.py +2 -10
- mindspore/multiprocessing/__init__.py +68 -0
- mindspore/nn/cell.py +86 -133
- mindspore/nn/dynamic_lr.py +2 -2
- mindspore/nn/layer/activation.py +80 -91
- mindspore/nn/layer/basic.py +4 -80
- mindspore/nn/layer/channel_shuffle.py +3 -16
- mindspore/nn/layer/container.py +3 -3
- mindspore/nn/layer/conv.py +71 -71
- mindspore/nn/layer/embedding.py +107 -46
- mindspore/nn/layer/image.py +4 -7
- mindspore/nn/layer/normalization.py +46 -38
- mindspore/nn/layer/padding.py +26 -39
- mindspore/nn/layer/pooling.py +13 -9
- mindspore/nn/layer/rnn_cells.py +5 -15
- mindspore/nn/layer/rnns.py +6 -5
- mindspore/nn/layer/thor_layer.py +1 -2
- mindspore/nn/layer/timedistributed.py +1 -1
- mindspore/nn/layer/transformer.py +52 -50
- mindspore/nn/learning_rate_schedule.py +6 -5
- mindspore/nn/loss/loss.py +44 -65
- mindspore/nn/optim/ada_grad.py +6 -4
- mindspore/nn/optim/adadelta.py +3 -1
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +102 -181
- mindspore/nn/optim/adamax.py +4 -2
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +4 -2
- mindspore/nn/optim/ftrl.py +31 -61
- mindspore/nn/optim/lamb.py +5 -3
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +6 -4
- mindspore/nn/optim/momentum.py +13 -25
- mindspore/nn/optim/optimizer.py +6 -3
- mindspore/nn/optim/proximal_ada_grad.py +4 -2
- mindspore/nn/optim/rmsprop.py +9 -3
- mindspore/nn/optim/rprop.py +4 -2
- mindspore/nn/optim/sgd.py +4 -2
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
- mindspore/nn/probability/distribution/beta.py +2 -2
- mindspore/nn/probability/distribution/categorical.py +4 -6
- mindspore/nn/probability/distribution/cauchy.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/poisson.py +2 -2
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +13 -1
- mindspore/nn/wrap/__init__.py +2 -1
- mindspore/nn/wrap/cell_wrapper.py +33 -12
- mindspore/nn/wrap/grad_reducer.py +148 -8
- mindspore/nn/wrap/loss_scale.py +7 -7
- mindspore/numpy/__init__.py +2 -0
- mindspore/numpy/array_creations.py +2 -0
- mindspore/numpy/array_ops.py +1 -5
- mindspore/numpy/fft.py +431 -0
- mindspore/numpy/math_ops.py +53 -59
- mindspore/numpy/utils.py +3 -0
- mindspore/ops/__init__.py +7 -3
- mindspore/ops/_grad_experimental/grad_array_ops.py +4 -160
- mindspore/ops/_grad_experimental/grad_comm_ops.py +14 -18
- mindspore/ops/_grad_experimental/grad_inner_ops.py +8 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +92 -287
- mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
- mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
- mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
- mindspore/ops/_op_impl/__init__.py +0 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +1 -0
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
- mindspore/ops/_op_impl/{cpu/concat.py → aicpu/generate_eod_mask.py} +16 -17
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
- mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -3
- mindspore/ops/_op_impl/cpu/adam.py +2 -2
- mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
- mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
- mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
- mindspore/ops/_vmap/vmap_array_ops.py +137 -101
- mindspore/ops/_vmap/vmap_base.py +8 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +102 -56
- mindspore/ops/_vmap/vmap_image_ops.py +70 -13
- mindspore/ops/_vmap/vmap_math_ops.py +74 -49
- mindspore/ops/_vmap/vmap_nn_ops.py +164 -89
- mindspore/ops/_vmap/vmap_other_ops.py +1 -1
- mindspore/ops/auto_generate/__init__.py +31 -0
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +133 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +248 -0
- mindspore/ops/auto_generate/gen_arg_handler.py +147 -0
- mindspore/ops/auto_generate/gen_extend_func.py +130 -0
- mindspore/ops/auto_generate/gen_ops_def.py +4786 -0
- mindspore/ops/auto_generate/gen_ops_prim.py +8335 -0
- mindspore/ops/auto_generate/pyboost_inner_prim.py +77 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +118 -17
- mindspore/ops/composite/math_ops.py +9 -48
- mindspore/ops/composite/multitype_ops/_compile_utils.py +166 -601
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +15 -133
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
- mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
- mindspore/ops/deprecated.py +14 -3
- mindspore/ops/extend/__init__.py +46 -0
- mindspore/ops/extend/array_func.py +152 -0
- mindspore/ops/extend/math_func.py +76 -0
- mindspore/ops/{_op_impl/tbe/atomic_addr_clean.py → extend/nn_func.py} +5 -15
- mindspore/ops/function/__init__.py +19 -11
- mindspore/ops/function/array_func.py +248 -1436
- mindspore/ops/function/clip_func.py +12 -13
- mindspore/ops/function/debug_func.py +2 -5
- mindspore/ops/function/fft_func.py +31 -0
- mindspore/ops/function/grad/grad_func.py +24 -17
- mindspore/ops/function/image_func.py +27 -21
- mindspore/ops/function/linalg_func.py +30 -53
- mindspore/ops/function/math_func.py +450 -2356
- mindspore/ops/function/nn_func.py +470 -789
- mindspore/ops/function/other_func.py +4 -5
- mindspore/ops/function/parameter_func.py +6 -92
- mindspore/ops/function/random_func.py +24 -80
- mindspore/ops/function/sparse_unary_func.py +11 -18
- mindspore/ops/function/spectral_func.py +1 -1
- mindspore/ops/function/vmap_func.py +15 -14
- mindspore/ops/functional.py +56 -62
- mindspore/ops/op_info_register.py +22 -19
- mindspore/ops/operations/__init__.py +19 -19
- mindspore/ops/operations/_embedding_cache_ops.py +1 -1
- mindspore/ops/operations/_grad_ops.py +20 -723
- mindspore/ops/operations/_inner_ops.py +233 -286
- mindspore/ops/operations/_quant_ops.py +4 -4
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +5 -480
- mindspore/ops/operations/_sequence_ops.py +4 -34
- mindspore/ops/operations/array_ops.py +100 -2481
- mindspore/ops/operations/comm_ops.py +38 -46
- mindspore/ops/operations/custom_ops.py +9 -9
- mindspore/ops/operations/debug_ops.py +101 -32
- mindspore/ops/operations/image_ops.py +3 -219
- mindspore/ops/operations/inner_ops.py +52 -38
- mindspore/ops/operations/linalg_ops.py +1 -49
- mindspore/{rewrite/ast_transformers → ops/operations/manually_defined}/__init__.py +11 -4
- mindspore/ops/operations/manually_defined/_inner.py +61 -0
- mindspore/ops/operations/manually_defined/ops_def.py +1391 -0
- mindspore/ops/operations/math_ops.py +752 -4588
- mindspore/ops/operations/nn_ops.py +380 -1750
- mindspore/ops/operations/other_ops.py +50 -42
- mindspore/ops/operations/random_ops.py +3 -50
- mindspore/ops/operations/sparse_ops.py +4 -4
- mindspore/ops/primitive.py +196 -96
- mindspore/ops/silent_check.py +162 -0
- mindspore/ops_generate/__init__.py +27 -0
- mindspore/ops_generate/arg_dtype_cast.py +248 -0
- mindspore/ops_generate/arg_handler.py +147 -0
- mindspore/ops_generate/gen_aclnn_implement.py +266 -0
- mindspore/ops_generate/gen_ops.py +1062 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +129 -0
- mindspore/ops_generate/gen_pyboost_func.py +932 -0
- mindspore/ops_generate/gen_utils.py +188 -0
- mindspore/ops_generate/op_proto.py +138 -0
- mindspore/ops_generate/pyboost_utils.py +364 -0
- mindspore/ops_generate/template.py +238 -0
- mindspore/parallel/__init__.py +6 -4
- mindspore/parallel/_auto_parallel_context.py +28 -4
- mindspore/parallel/_cell_wrapper.py +16 -9
- mindspore/parallel/_cost_model_context.py +1 -1
- mindspore/parallel/_dp_allreduce_fusion.py +159 -159
- mindspore/parallel/_parallel_serialization.py +28 -12
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +22 -8
- mindspore/parallel/_transformer/__init__.py +1 -1
- mindspore/parallel/_transformer/layers.py +1 -1
- mindspore/parallel/_transformer/loss.py +1 -1
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/op_parallel_config.py +1 -1
- mindspore/parallel/_transformer/transformer.py +9 -9
- mindspore/parallel/_utils.py +131 -6
- mindspore/parallel/algo_parameter_config.py +6 -6
- mindspore/parallel/checkpoint_transform.py +156 -26
- mindspore/parallel/cluster/__init__.py +15 -0
- mindspore/parallel/cluster/process_entity/__init__.py +18 -0
- mindspore/parallel/cluster/process_entity/_api.py +345 -0
- mindspore/parallel/cluster/process_entity/_utils.py +116 -0
- mindspore/parallel/cluster/run.py +139 -0
- mindspore/parallel/mpi/__init__.py +1 -1
- mindspore/parallel/mpi/_mpi_config.py +1 -1
- mindspore/parallel/parameter_broadcast.py +152 -0
- mindspore/parallel/shard.py +99 -2
- mindspore/profiler/common/util.py +20 -0
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +66 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +77 -0
- mindspore/profiler/parser/ascend_analysis/function_event.py +146 -0
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +108 -0
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +80 -0
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +52 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +104 -0
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +59 -0
- mindspore/profiler/parser/ascend_cluster_generator.py +116 -0
- mindspore/profiler/parser/ascend_communicate_generator.py +314 -0
- mindspore/profiler/parser/ascend_flops_generator.py +27 -5
- mindspore/profiler/parser/ascend_fpbp_generator.py +8 -2
- mindspore/profiler/parser/ascend_hccl_generator.py +27 -279
- mindspore/profiler/parser/ascend_msprof_exporter.py +122 -118
- mindspore/profiler/parser/ascend_msprof_generator.py +67 -273
- mindspore/profiler/parser/ascend_op_generator.py +68 -27
- mindspore/profiler/parser/ascend_timeline_generator.py +292 -131
- mindspore/profiler/parser/base_timeline_generator.py +17 -3
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +2 -1
- mindspore/profiler/parser/framework_parser.py +11 -4
- mindspore/profiler/parser/integrator.py +3 -1
- mindspore/profiler/parser/memory_usage_parser.py +8 -2
- mindspore/profiler/parser/minddata_analyzer.py +8 -2
- mindspore/profiler/parser/minddata_parser.py +1 -1
- mindspore/profiler/parser/msadvisor_analyzer.py +5 -3
- mindspore/profiler/parser/msadvisor_parser.py +10 -4
- mindspore/profiler/parser/profiler_info.py +5 -0
- mindspore/profiler/profiling.py +373 -171
- mindspore/rewrite/__init__.py +2 -13
- mindspore/rewrite/api/node.py +122 -36
- mindspore/rewrite/api/pattern_engine.py +2 -3
- mindspore/rewrite/api/scoped_value.py +16 -15
- mindspore/rewrite/api/symbol_tree.py +46 -30
- mindspore/rewrite/ast_helpers/__init__.py +3 -6
- mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
- mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
- mindspore/rewrite/common/__init__.py +1 -2
- mindspore/rewrite/common/config.py +24 -0
- mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
- mindspore/rewrite/{namer.py → common/namer.py} +63 -18
- mindspore/rewrite/common/namespace.py +118 -0
- mindspore/rewrite/node/__init__.py +5 -5
- mindspore/rewrite/node/call_function.py +23 -7
- mindspore/rewrite/node/cell_container.py +7 -3
- mindspore/rewrite/node/control_flow.py +53 -28
- mindspore/rewrite/node/node.py +212 -196
- mindspore/rewrite/node/node_manager.py +51 -22
- mindspore/rewrite/node/node_topological_manager.py +3 -23
- mindspore/rewrite/parsers/__init__.py +12 -0
- mindspore/rewrite/parsers/arguments_parser.py +8 -9
- mindspore/rewrite/parsers/assign_parser.py +635 -413
- mindspore/rewrite/parsers/attribute_parser.py +3 -4
- mindspore/rewrite/parsers/class_def_parser.py +107 -144
- mindspore/rewrite/parsers/constant_parser.py +5 -5
- mindspore/rewrite/parsers/container_parser.py +4 -6
- mindspore/rewrite/parsers/expr_parser.py +55 -0
- mindspore/rewrite/parsers/for_parser.py +31 -98
- mindspore/rewrite/parsers/function_def_parser.py +13 -5
- mindspore/rewrite/parsers/if_parser.py +28 -10
- mindspore/rewrite/parsers/module_parser.py +8 -182
- mindspore/rewrite/parsers/parser.py +1 -5
- mindspore/rewrite/parsers/parser_register.py +1 -1
- mindspore/rewrite/parsers/return_parser.py +5 -10
- mindspore/rewrite/parsers/while_parser.py +59 -0
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree/__init__.py +20 -0
- mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
- mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
- mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
- mindspore/run_check/_check_version.py +6 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +9 -19
- mindspore/scipy/__init__.py +2 -1
- mindspore/scipy/fft.py +133 -0
- mindspore/scipy/linalg.py +140 -55
- mindspore/scipy/ops.py +15 -71
- mindspore/scipy/ops_grad.py +5 -34
- mindspore/scipy/optimize/line_search.py +2 -2
- mindspore/scipy/optimize/minimize.py +1 -1
- mindspore/train/__init__.py +3 -2
- mindspore/train/_utils.py +178 -4
- mindspore/train/amp.py +167 -245
- mindspore/train/callback/_backup_and_restore.py +4 -4
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +47 -21
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_landscape.py +15 -10
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +13 -14
- mindspore/train/callback/_time_monitor.py +2 -2
- mindspore/train/data_sink.py +1 -1
- mindspore/train/dataset_helper.py +19 -4
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/accuracy.py +7 -7
- mindspore/train/metrics/confusion_matrix.py +8 -6
- mindspore/train/metrics/cosine_similarity.py +6 -4
- mindspore/train/metrics/error.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/perplexity.py +2 -1
- mindspore/train/metrics/topk.py +2 -2
- mindspore/train/mind_ir_pb2.py +75 -6
- mindspore/train/model.py +41 -27
- mindspore/train/serialization.py +262 -133
- mindspore/train/summary/_writer_pool.py +1 -1
- mindspore/train/summary/summary_record.py +56 -34
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/version.py +1 -1
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/METADATA +2 -2
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/RECORD +532 -1075
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/entry_points.txt +1 -0
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
- mindspore/config/super_bar_config.json +0 -544
- mindspore/gen_ops.py +0 -273
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
- mindspore/nn/layer/flash_attention.py +0 -189
- mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
- mindspore/ops/_op_impl/tbe/__init__.py +0 -47
- mindspore/ops/_op_impl/tbe/abs.py +0 -38
- mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/acos.py +0 -37
- mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/acosh.py +0 -37
- mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
- mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
- mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
- mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
- mindspore/ops/_op_impl/tbe/add.py +0 -42
- mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/add_n.py +0 -39
- mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
- mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
- mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
- mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
- mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
- mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
- mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
- mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/asin.py +0 -37
- mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/asinh.py +0 -37
- mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/assign.py +0 -79
- mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
- mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
- mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/atan.py +0 -37
- mindspore/ops/_op_impl/tbe/atan2.py +0 -38
- mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/atanh.py +0 -37
- mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
- mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
- mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
- mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
- mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
- mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
- mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
- mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
- mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
- mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
- mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cast.py +0 -55
- mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/cdist.py +0 -38
- mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/ceil.py +0 -37
- mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/celu.py +0 -39
- mindspore/ops/_op_impl/tbe/centralization.py +0 -39
- mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
- mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/concat.py +0 -40
- mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
- mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
- mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
- mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
- mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
- mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/cos.py +0 -37
- mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/cosh.py +0 -37
- mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
- mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cummin.py +0 -41
- mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
- mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
- mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
- mindspore/ops/_op_impl/tbe/diag.py +0 -38
- mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
- mindspore/ops/_op_impl/tbe/dilation.py +0 -40
- mindspore/ops/_op_impl/tbe/div.py +0 -41
- mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
- mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
- mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
- mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
- mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
- mindspore/ops/_op_impl/tbe/elu.py +0 -38
- mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/equal.py +0 -42
- mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/erf.py +0 -37
- mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfc.py +0 -37
- mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
- mindspore/ops/_op_impl/tbe/exp.py +0 -40
- mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
- mindspore/ops/_op_impl/tbe/expm1.py +0 -37
- mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
- mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/fill.py +0 -56
- mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/flatten.py +0 -48
- mindspore/ops/_op_impl/tbe/floor.py +0 -37
- mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
- mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
- mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
- mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
- mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
- mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
- mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
- mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/ger.py +0 -43
- mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/greater.py +0 -43
- mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
- mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
- mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
- mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
- mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
- mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
- mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
- mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/im2col.py +0 -42
- mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
- mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
- mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/inv.py +0 -38
- mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/invert.py +0 -37
- mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/iou.py +0 -38
- mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/is_close.py +0 -40
- mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
- mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
- mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
- mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
- mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
- mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
- mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/lerp.py +0 -38
- mindspore/ops/_op_impl/tbe/less.py +0 -41
- mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/log.py +0 -40
- mindspore/ops/_op_impl/tbe/log1p.py +0 -37
- mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
- mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
- mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
- mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
- mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/matmul.py +0 -53
- mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
- mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
- mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
- mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum.py +0 -39
- mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
- mindspore/ops/_op_impl/tbe/minimum.py +0 -40
- mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mish.py +0 -37
- mindspore/ops/_op_impl/tbe/mod.py +0 -41
- mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/mul.py +0 -37
- mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
- mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
- mindspore/ops/_op_impl/tbe/neg.py +0 -39
- mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
- mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
- mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
- mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
- mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
- mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/pack.py +0 -58
- mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
- mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
- mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/pdist.py +0 -36
- mindspore/ops/_op_impl/tbe/pooling.py +0 -46
- mindspore/ops/_op_impl/tbe/population_count.py +0 -38
- mindspore/ops/_op_impl/tbe/pow.py +0 -41
- mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/prelu.py +0 -37
- mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/range.py +0 -39
- mindspore/ops/_op_impl/tbe/real_div.py +0 -38
- mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
- mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
- mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
- mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
- mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6.py +0 -38
- mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/renorm.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
- mindspore/ops/_op_impl/tbe/rint.py +0 -37
- mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roll.py +0 -42
- mindspore/ops/_op_impl/tbe/round.py +0 -38
- mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
- mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
- mindspore/ops/_op_impl/tbe/select.py +0 -38
- mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/selu.py +0 -39
- mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sgd.py +0 -62
- mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sign.py +0 -38
- mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/sin.py +0 -37
- mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sinh.py +0 -37
- mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/slice.py +0 -58
- mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
- mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax.py +0 -37
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
- mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/softplus.py +0 -37
- mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softsign.py +0 -37
- mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sort.py +0 -38
- mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/split_d.py +0 -38
- mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/split_v.py +0 -39
- mindspore/ops/_op_impl/tbe/splitv.py +0 -39
- mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/square.py +0 -38
- mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
- mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
- mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
- mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
- mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
- mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
- mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
- mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
- mindspore/ops/_op_impl/tbe/sub.py +0 -39
- mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tan.py +0 -38
- mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh.py +0 -37
- mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
- mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
- mindspore/ops/_op_impl/tbe/tile.py +0 -37
- mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
- mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
- mindspore/ops/_op_impl/tbe/transpose.py +0 -60
- mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
- mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
- mindspore/ops/_op_impl/tbe/trunc.py +0 -39
- mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/unpack.py +0 -38
- mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
- mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
- mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
- mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
- mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
- mindspore/ops/_tracefunc.py +0 -241
- mindspore/ops/arg_dtype_cast.py +0 -54
- mindspore/rewrite/api/tree_node_helper.py +0 -60
- mindspore/rewrite/ast_creator_register.py +0 -37
- mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
- mindspore/rewrite/namespace.py +0 -53
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/top_level.txt +0 -0
mindspore/nn/optim/adam.py
CHANGED
|
@@ -17,7 +17,6 @@ from __future__ import absolute_import, division
|
|
|
17
17
|
|
|
18
18
|
import numpy as np
|
|
19
19
|
|
|
20
|
-
from mindspore import context
|
|
21
20
|
from mindspore.common import dtype as mstype
|
|
22
21
|
from mindspore.common.initializer import initializer
|
|
23
22
|
from mindspore.common.api import jit
|
|
@@ -39,12 +38,12 @@ _scaler_one = Tensor(1, mstype.int32)
|
|
|
39
38
|
_scaler_ten = Tensor(10, mstype.float32)
|
|
40
39
|
|
|
41
40
|
|
|
42
|
-
@_lazy_adam_opt.register("Function", "Function", "
|
|
43
|
-
"Tensor", "Tensor", "Tensor", "Tensor",
|
|
44
|
-
"
|
|
45
|
-
def _run_lazy_opt_with_sparse_dist(opt, sparse_opt,
|
|
46
|
-
beta2_power, beta1, beta2, eps, lr, gradient, params, m, v,
|
|
47
|
-
|
|
41
|
+
@_lazy_adam_opt.register("Function", "Function", "Bool", "Bool", "Bool", "Tensor", "Tensor",
|
|
42
|
+
"Tensor", "Tensor", "Tensor", "Tensor",
|
|
43
|
+
"RowTensor", "Tensor", "Tensor", "Tensor", "Function", "Bool", "Function", "Bool")
|
|
44
|
+
def _run_lazy_opt_with_sparse_dist(opt, sparse_opt, use_locking, use_nesterov, target, beta1_power,
|
|
45
|
+
beta2_power, beta1, beta2, eps, lr, gradient, params, m, v,
|
|
46
|
+
distributed_opt, use_flag, distributed_sparse_opt, use_sparse_flag):
|
|
48
47
|
"""Apply sparse lazy adam optimizer to the weight parameter when the gradient is sparse."""
|
|
49
48
|
success = True
|
|
50
49
|
indices = gradient.indices
|
|
@@ -53,14 +52,6 @@ def _run_lazy_opt_with_sparse_dist(opt, sparse_opt, push, pull, use_locking, use
|
|
|
53
52
|
success = F.depend(success, distributed_sparse_opt(params, m, v, beta1_power, beta2_power, lr, beta1, beta2,
|
|
54
53
|
eps, values, indices))
|
|
55
54
|
return success
|
|
56
|
-
if ps_parameter and not cache_enable:
|
|
57
|
-
op_shape = P.Shape()
|
|
58
|
-
shapes = (op_shape(params), op_shape(m), op_shape(v),
|
|
59
|
-
op_shape(beta1_power), op_shape(beta2_power), op_shape(lr), op_shape(beta1),
|
|
60
|
-
op_shape(beta2), op_shape(eps), op_shape(values), op_shape(indices))
|
|
61
|
-
success = F.depend(success, pull(push((beta1_power, beta2_power, lr, beta1, beta2,
|
|
62
|
-
eps, values, indices), shapes), params))
|
|
63
|
-
return success
|
|
64
55
|
|
|
65
56
|
if not target:
|
|
66
57
|
success = F.depend(success, sparse_opt(params, m, v, beta1_power, beta2_power, lr, beta1, beta2,
|
|
@@ -92,12 +83,12 @@ def _run_lazy_opt_with_sparse_dist(opt, sparse_opt, push, pull, use_locking, use
|
|
|
92
83
|
return success
|
|
93
84
|
|
|
94
85
|
|
|
95
|
-
@_lazy_adam_opt.register("Function", "Function", "
|
|
86
|
+
@_lazy_adam_opt.register("Function", "Function", "Bool", "Bool", "Bool", "Tensor", "Tensor",
|
|
96
87
|
"Tensor", "Tensor", "Tensor", "Tensor", "MapTensor", "MapTensor", "MapTensor", "MapTensor",
|
|
97
|
-
"
|
|
98
|
-
def _run_map_tensor_lazy_opt_with_sparse_dist(opt, sparse_opt,
|
|
88
|
+
"Function", "Bool", "Function", "Bool")
|
|
89
|
+
def _run_map_tensor_lazy_opt_with_sparse_dist(opt, sparse_opt, use_locking, use_nesterov, target,
|
|
99
90
|
beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, params, m, v,
|
|
100
|
-
|
|
91
|
+
distributed_opt, use_flag,
|
|
101
92
|
distributed_sparse_opt, use_sparse_flag):
|
|
102
93
|
"""Apply sparse lazy adam optimizer to the weight parameter when the gradient is sparse."""
|
|
103
94
|
success = True
|
|
@@ -132,46 +123,32 @@ def _run_map_tensor_lazy_opt_with_sparse_dist(opt, sparse_opt, push, pull, use_l
|
|
|
132
123
|
return success
|
|
133
124
|
|
|
134
125
|
|
|
135
|
-
@_lazy_adam_opt.register("Function", "Function", "
|
|
136
|
-
"Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor",
|
|
126
|
+
@_lazy_adam_opt.register("Function", "Function", "Bool", "Bool", "Bool", "Tensor", "Tensor",
|
|
127
|
+
"Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor",
|
|
137
128
|
"Function", "Bool", "Function", "Bool")
|
|
138
|
-
def _run_lazy_opt_with_one_number_dist(opt, sparse_opt,
|
|
129
|
+
def _run_lazy_opt_with_one_number_dist(opt, sparse_opt, use_locking, use_nesterov, target,
|
|
139
130
|
beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, params, moment1,
|
|
140
|
-
moment2,
|
|
131
|
+
moment2, distributed_opt, use_flag,
|
|
141
132
|
distributed_sparse_opt, use_sparse_flag):
|
|
142
133
|
"""Apply lazy adam optimizer to the weight parameter using Tensor."""
|
|
143
134
|
success = True
|
|
144
135
|
if use_flag:
|
|
145
136
|
success = F.depend(success, distributed_opt(params, moment1, moment2, beta1_power, beta2_power, lr, beta1,
|
|
146
137
|
beta2, eps, gradient))
|
|
147
|
-
elif ps_parameter and not cache_enable:
|
|
148
|
-
op_shape = P.Shape()
|
|
149
|
-
success = F.depend(success, pull(push((beta1_power, beta2_power, lr, beta1, beta2, eps, gradient),
|
|
150
|
-
(op_shape(params), op_shape(moment1), op_shape(moment2))), params))
|
|
151
138
|
else:
|
|
152
139
|
success = F.depend(success, opt(params, moment1, moment2, beta1_power, beta2_power, lr, beta1, beta2,
|
|
153
140
|
eps, gradient))
|
|
154
141
|
return success
|
|
155
142
|
|
|
156
143
|
|
|
157
|
-
@_lazy_adam_opt.register("Function", "Function", "
|
|
158
|
-
"Tensor", "Tensor", "Tensor", "Tensor", "RowTensor", "Tensor", "Tensor", "Tensor"
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
beta1, beta2, eps, lr, gradient, params, m, v, ps_parameter, cache_enable):
|
|
144
|
+
@_lazy_adam_opt.register("Function", "Function", "Bool", "Bool", "Bool", "Tensor", "Tensor",
|
|
145
|
+
"Tensor", "Tensor", "Tensor", "Tensor", "RowTensor", "Tensor", "Tensor", "Tensor")
|
|
146
|
+
def _run_lazy_opt_with_sparse(opt, sparse_opt, use_locking, use_nesterov, target, beta1_power, beta2_power,
|
|
147
|
+
beta1, beta2, eps, lr, gradient, params, m, v):
|
|
162
148
|
"""Apply sparse lazy adam optimizer to the weight parameter when the gradient is sparse."""
|
|
163
149
|
success = True
|
|
164
150
|
indices = gradient.indices
|
|
165
151
|
values = gradient.values
|
|
166
|
-
if ps_parameter and not cache_enable:
|
|
167
|
-
op_shape = P.Shape()
|
|
168
|
-
shapes = (op_shape(params), op_shape(m), op_shape(v),
|
|
169
|
-
op_shape(beta1_power), op_shape(beta2_power), op_shape(lr), op_shape(beta1),
|
|
170
|
-
op_shape(beta2), op_shape(eps), op_shape(values), op_shape(indices))
|
|
171
|
-
success = F.depend(success, pull(push((beta1_power, beta2_power, lr, beta1, beta2,
|
|
172
|
-
eps, values, indices), shapes), params))
|
|
173
|
-
return success
|
|
174
|
-
|
|
175
152
|
if not target:
|
|
176
153
|
success = F.depend(success, sparse_opt(params, m, v, beta1_power, beta2_power, lr, beta1, beta2,
|
|
177
154
|
eps, values, indices))
|
|
@@ -202,12 +179,10 @@ def _run_lazy_opt_with_sparse(opt, sparse_opt, push, pull, use_locking, use_nest
|
|
|
202
179
|
return success
|
|
203
180
|
|
|
204
181
|
|
|
205
|
-
@_lazy_adam_opt.register("Function", "Function", "
|
|
206
|
-
"Tensor", "Tensor", "Tensor", "Tensor", "MapTensor", "MapTensor", "MapTensor", "MapTensor"
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
beta2_power, beta1, beta2, eps, lr, gradient, params, m, v, ps_parameter,
|
|
210
|
-
cache_enable):
|
|
182
|
+
@_lazy_adam_opt.register("Function", "Function", "Bool", "Bool", "Bool", "Tensor", "Tensor",
|
|
183
|
+
"Tensor", "Tensor", "Tensor", "Tensor", "MapTensor", "MapTensor", "MapTensor", "MapTensor")
|
|
184
|
+
def _run_map_tensor_lazy_opt_with_sparse(opt, sparse_opt, use_locking, use_nesterov, target, beta1_power,
|
|
185
|
+
beta2_power, beta1, beta2, eps, lr, gradient, params, m, v):
|
|
211
186
|
"""Apply sparse lazy adam optimizer to the weight parameter when the gradient is sparse(MapTensor)."""
|
|
212
187
|
success = True
|
|
213
188
|
indices, values = gradient.get_data()
|
|
@@ -236,20 +211,14 @@ def _run_map_tensor_lazy_opt_with_sparse(opt, sparse_opt, push, pull, use_lockin
|
|
|
236
211
|
return success
|
|
237
212
|
|
|
238
213
|
|
|
239
|
-
@_lazy_adam_opt.register("Function", "Function", "
|
|
240
|
-
"Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor"
|
|
241
|
-
def _run_lazy_opt_with_one_number(opt, sparse_opt,
|
|
242
|
-
beta2_power, beta1, beta2, eps, lr, gradient, params, moment1, moment2
|
|
243
|
-
cache_enable):
|
|
214
|
+
@_lazy_adam_opt.register("Function", "Function", "Bool", "Bool", "Bool", "Tensor", "Tensor",
|
|
215
|
+
"Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor")
|
|
216
|
+
def _run_lazy_opt_with_one_number(opt, sparse_opt, use_locking, use_nesterov, target, beta1_power,
|
|
217
|
+
beta2_power, beta1, beta2, eps, lr, gradient, params, moment1, moment2):
|
|
244
218
|
"""Apply lazy adam optimizer to the weight parameter using Tensor."""
|
|
245
219
|
success = True
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
success = F.depend(success, pull(push((beta1_power, beta2_power, lr, beta1, beta2, eps, gradient),
|
|
249
|
-
(op_shape(params), op_shape(moment1), op_shape(moment2))), params))
|
|
250
|
-
else:
|
|
251
|
-
success = F.depend(success, opt(params, moment1, moment2, beta1_power, beta2_power, lr, beta1, beta2,
|
|
252
|
-
eps, gradient))
|
|
220
|
+
success = F.depend(success, opt(params, moment1, moment2, beta1_power, beta2_power, lr, beta1, beta2,
|
|
221
|
+
eps, gradient))
|
|
253
222
|
return success
|
|
254
223
|
|
|
255
224
|
|
|
@@ -309,11 +278,11 @@ def _update_run_op(beta1, beta2, eps, lr, weight_decay, param, m, v, gradient, d
|
|
|
309
278
|
return op_cast(gradient, F.dtype(param))
|
|
310
279
|
|
|
311
280
|
|
|
312
|
-
@_adam_opt.register("Function", "Function", "
|
|
313
|
-
"Tensor", "Tensor", "Tensor", "Tensor", "RowTensor", "Tensor", "Tensor", "Tensor",
|
|
281
|
+
@_adam_opt.register("Function", "Function", "Bool", "Bool", "Bool", "Tensor", "Tensor",
|
|
282
|
+
"Tensor", "Tensor", "Tensor", "Tensor", "RowTensor", "Tensor", "Tensor", "Tensor",
|
|
314
283
|
"Function", "Bool", "Function", "Bool")
|
|
315
|
-
def _run_opt_with_sparse_dist(opt, sparse_opt,
|
|
316
|
-
beta2_power, beta1, beta2, eps, lr, gradient, param, m, v,
|
|
284
|
+
def _run_opt_with_sparse_dist(opt, sparse_opt, use_locking, use_nesterov, target, beta1_power,
|
|
285
|
+
beta2_power, beta1, beta2, eps, lr, gradient, param, m, v,
|
|
317
286
|
distributed_opt, use_flag, distributed_sparse_opt, use_sparse_flag):
|
|
318
287
|
"""Apply sparse adam optimizer to the weight parameter when the gradient is sparse."""
|
|
319
288
|
success = True
|
|
@@ -323,14 +292,6 @@ def _run_opt_with_sparse_dist(opt, sparse_opt, push, pull, use_locking, use_nest
|
|
|
323
292
|
success = F.depend(success, distributed_sparse_opt(param, m, v, beta1_power, beta2_power, lr, beta1, beta2,
|
|
324
293
|
eps, values, indices))
|
|
325
294
|
return success
|
|
326
|
-
if ps_parameter and not cache_enable:
|
|
327
|
-
op_shape = P.Shape()
|
|
328
|
-
shapes = (op_shape(param), op_shape(m), op_shape(v),
|
|
329
|
-
op_shape(beta1_power), op_shape(beta2_power), op_shape(lr), op_shape(beta1),
|
|
330
|
-
op_shape(beta2), op_shape(eps), op_shape(values), op_shape(indices))
|
|
331
|
-
success = F.depend(success, pull(push((beta1_power, beta2_power, lr, beta1, beta2,
|
|
332
|
-
eps, values, indices), shapes), param))
|
|
333
|
-
return success
|
|
334
295
|
|
|
335
296
|
if not target:
|
|
336
297
|
success = F.depend(success, sparse_opt(param, m, v, beta1_power, beta2_power, lr, beta1, beta2,
|
|
@@ -376,49 +337,33 @@ def _run_opt_with_sparse_dist(opt, sparse_opt, push, pull, use_locking, use_nest
|
|
|
376
337
|
return success
|
|
377
338
|
|
|
378
339
|
|
|
379
|
-
@_adam_opt.register("Function", "Function", "
|
|
380
|
-
"Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor",
|
|
340
|
+
@_adam_opt.register("Function", "Function", "Bool", "Bool", "Bool", "Tensor", "Tensor",
|
|
341
|
+
"Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor",
|
|
381
342
|
"Function", "Bool", "Function", "Bool")
|
|
382
|
-
def _run_opt_with_one_number_dist(opt, sparse_opt,
|
|
383
|
-
beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param,
|
|
384
|
-
moment1, moment2, ps_parameter, cache_enable,
|
|
343
|
+
def _run_opt_with_one_number_dist(opt, sparse_opt, use_locking, use_nesterov, target,
|
|
344
|
+
beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, moment1, moment2,
|
|
385
345
|
distributed_opt, use_flag, distributed_sparse_opt, use_sparse_flag):
|
|
386
346
|
"""Apply adam optimizer to the weight parameter using Tensor."""
|
|
387
347
|
success = True
|
|
388
348
|
if use_flag:
|
|
389
349
|
success = F.depend(success, distributed_opt(param, moment1, moment2, beta1_power, beta2_power, lr, beta1, beta2,
|
|
390
350
|
eps, gradient))
|
|
391
|
-
elif ps_parameter and not cache_enable:
|
|
392
|
-
op_shape = P.Shape()
|
|
393
|
-
success = F.depend(success, pull(push((beta1_power, beta2_power, lr, beta1, beta2, eps, gradient),
|
|
394
|
-
(op_shape(param), op_shape(moment1), op_shape(moment2))), param))
|
|
395
351
|
else:
|
|
396
352
|
success = F.depend(success, opt(param, moment1, moment2, beta1_power, beta2_power, lr, beta1, beta2,
|
|
397
353
|
eps, gradient))
|
|
398
354
|
return success
|
|
399
355
|
|
|
400
356
|
|
|
401
|
-
@_adam_opt.register("Function", "Function", "
|
|
402
|
-
"Bool", "Bool", "Bool",
|
|
357
|
+
@_adam_opt.register("Function", "Function", "Bool", "Bool", "Bool",
|
|
403
358
|
"Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor",
|
|
404
|
-
"RowTensor", "Tensor", "Tensor", "Tensor"
|
|
405
|
-
def _run_opt_with_sparse(opt, sparse_opt,
|
|
406
|
-
use_locking, use_nesterov, target,
|
|
359
|
+
"RowTensor", "Tensor", "Tensor", "Tensor")
|
|
360
|
+
def _run_opt_with_sparse(opt, sparse_opt, use_locking, use_nesterov, target,
|
|
407
361
|
beta1_power, beta2_power, beta1, beta2, eps, lr,
|
|
408
|
-
gradient, param, m, v
|
|
362
|
+
gradient, param, m, v):
|
|
409
363
|
"""Apply sparse adam optimizer to the weight parameter when the gradient is sparse."""
|
|
410
364
|
success = True
|
|
411
365
|
indices = gradient.indices
|
|
412
366
|
values = gradient.values
|
|
413
|
-
if ps_parameter and not cache_enable:
|
|
414
|
-
op_shape = P.Shape()
|
|
415
|
-
shapes = (op_shape(param), op_shape(m), op_shape(v),
|
|
416
|
-
op_shape(beta1_power), op_shape(beta2_power), op_shape(lr), op_shape(beta1),
|
|
417
|
-
op_shape(beta2), op_shape(eps), op_shape(values), op_shape(indices))
|
|
418
|
-
success = F.depend(success, pull(push((beta1_power, beta2_power, lr, beta1, beta2,
|
|
419
|
-
eps, values, indices), shapes), param))
|
|
420
|
-
return success
|
|
421
|
-
|
|
422
367
|
if not target:
|
|
423
368
|
success = F.depend(success, sparse_opt(param, m, v, beta1_power, beta2_power, lr, beta1, beta2,
|
|
424
369
|
eps, values, indices))
|
|
@@ -463,44 +408,30 @@ def _run_opt_with_sparse(opt, sparse_opt, push, pull,
|
|
|
463
408
|
return success
|
|
464
409
|
|
|
465
410
|
|
|
466
|
-
@_adam_opt.register("Function", "Function", "
|
|
467
|
-
"Bool", "Bool", "Bool",
|
|
411
|
+
@_adam_opt.register("Function", "Function", "Bool", "Bool", "Bool",
|
|
468
412
|
"Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor",
|
|
469
|
-
"Tensor", "Tensor", "Tensor", "Tensor"
|
|
470
|
-
def _run_opt_with_one_number(opt, sparse_opt,
|
|
471
|
-
use_locking, use_nesterov, target,
|
|
413
|
+
"Tensor", "Tensor", "Tensor", "Tensor")
|
|
414
|
+
def _run_opt_with_one_number(opt, sparse_opt, use_locking, use_nesterov, target,
|
|
472
415
|
beta1_power, beta2_power, beta1, beta2, eps, lr,
|
|
473
|
-
gradient, param, moment1, moment2
|
|
416
|
+
gradient, param, moment1, moment2):
|
|
474
417
|
"""Apply adam optimizer to the weight parameter using Tensor."""
|
|
475
418
|
success = True
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
success = F.depend(success, pull(push((beta1_power, beta2_power, lr, beta1, beta2, eps, gradient),
|
|
479
|
-
(op_shape(param), op_shape(moment1), op_shape(moment2))), param))
|
|
480
|
-
else:
|
|
481
|
-
success = F.depend(success, opt(param, moment1, moment2, beta1_power, beta2_power, lr, beta1, beta2,
|
|
482
|
-
eps, gradient))
|
|
419
|
+
success = F.depend(success, opt(param, moment1, moment2, beta1_power, beta2_power, lr, beta1, beta2,
|
|
420
|
+
eps, gradient))
|
|
483
421
|
return success
|
|
484
422
|
|
|
485
423
|
|
|
486
|
-
@_adam_opt.register("Function", "Function", "
|
|
487
|
-
"Bool", "Bool", "Bool",
|
|
424
|
+
@_adam_opt.register("Function", "Function", "Bool", "Bool", "Bool",
|
|
488
425
|
"Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor",
|
|
489
|
-
"Tensor", "Tensor", "Tensor", "Tensor", "Tensor"
|
|
490
|
-
def _run_opt_with_one_number_use_amsgrad(opt, sparse_opt,
|
|
426
|
+
"Tensor", "Tensor", "Tensor", "Tensor", "Tensor")
|
|
427
|
+
def _run_opt_with_one_number_use_amsgrad(opt, sparse_opt,
|
|
491
428
|
use_locking, use_nesterov, target,
|
|
492
429
|
beta1_power, beta2_power, beta1, beta2, eps, lr,
|
|
493
|
-
gradient, param, moment1, moment2, vhat
|
|
430
|
+
gradient, param, moment1, moment2, vhat):
|
|
494
431
|
"""Apply adam optimizer to the weight parameter using Tensor and use amsgrad."""
|
|
495
432
|
success = True
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
success = F.depend(success, pull(push((beta1_power, beta2_power, lr, gradient),
|
|
499
|
-
(op_shape(param), op_shape(moment1), op_shape(moment2),
|
|
500
|
-
op_shape(vhat))), param))
|
|
501
|
-
else:
|
|
502
|
-
success = F.depend(success, opt(param, moment1, moment2, vhat, beta1_power, beta2_power,
|
|
503
|
-
lr, beta1, beta2, eps, gradient))
|
|
433
|
+
success = F.depend(success, opt(param, moment1, moment2, vhat, beta1_power, beta2_power,
|
|
434
|
+
lr, beta1, beta2, eps, gradient))
|
|
504
435
|
return success
|
|
505
436
|
|
|
506
437
|
|
|
@@ -647,7 +578,9 @@ class Adam(Optimizer):
|
|
|
647
578
|
- Iterable: Learning rate is dynamic. The i-th step will take the i-th value as the learning rate.
|
|
648
579
|
|
|
649
580
|
- LearningRateSchedule: Learning rate is dynamic. During training, the optimizer calls the instance of
|
|
650
|
-
LearningRateSchedule
|
|
581
|
+
`LearningRateSchedule
|
|
582
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.nn.html#learningrateschedule-class>`_
|
|
583
|
+
with step as the input to get the learning rate of current step.
|
|
651
584
|
|
|
652
585
|
beta1 (float): The exponential decay rate for the 1st moment estimations. Should be in range (0.0, 1.0).
|
|
653
586
|
Default: ``0.9`` .
|
|
@@ -708,8 +641,8 @@ class Adam(Optimizer):
|
|
|
708
641
|
ValueError: If `beta1`, `beta2` is not in range (0.0, 1.0).
|
|
709
642
|
ValueError: If `weight_decay` is less than 0.
|
|
710
643
|
ValueError: If `use_lazy` and `use_offload` are both ``true`` .
|
|
711
|
-
ValueError: If `use_amsgrad` is ``true
|
|
712
|
-
ValueError: If `use_amsgrad` while using distributed training.
|
|
644
|
+
ValueError: If `use_amsgrad` is ``true``, `use_lazy` or `use_offload` is ``true`` .
|
|
645
|
+
ValueError: If `use_amsgrad` is ``True`` while using distributed training.
|
|
713
646
|
|
|
714
647
|
Supported Platforms:
|
|
715
648
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -719,7 +652,7 @@ class Adam(Optimizer):
|
|
|
719
652
|
>>> from mindspore import nn
|
|
720
653
|
>>>
|
|
721
654
|
>>> # Define the network structure of LeNet5. Refer to
|
|
722
|
-
>>> # https://gitee.com/mindspore/docs/blob/r2.
|
|
655
|
+
>>> # https://gitee.com/mindspore/docs/blob/r2.3.q1/docs/mindspore/code/lenet.py
|
|
723
656
|
>>> net = LeNet5()
|
|
724
657
|
>>> #1) All parameters use the same learning rate and weight decay
|
|
725
658
|
>>> optim = nn.Adam(params=net.trainable_params())
|
|
@@ -790,9 +723,6 @@ class Adam(Optimizer):
|
|
|
790
723
|
self.opt = P.Adam(use_locking, use_nesterov)
|
|
791
724
|
self.sparse_opt = P.FusedSparseLazyAdam(use_locking, use_nesterov)
|
|
792
725
|
self.sparse_opt.set_device("CPU")
|
|
793
|
-
self._ps_pull = P.Pull()
|
|
794
|
-
self._ps_push = P.Push("Adam", [0, 1, 2])
|
|
795
|
-
self._ps_push.add_prim_attr("use_nesterov", use_nesterov)
|
|
796
726
|
self._init_distributed_opts(use_locking, use_nesterov)
|
|
797
727
|
|
|
798
728
|
else:
|
|
@@ -803,13 +733,6 @@ class Adam(Optimizer):
|
|
|
803
733
|
self.opt = P.Adam(use_locking, use_nesterov)
|
|
804
734
|
self.sparse_opt = P.FusedSparseAdam(use_locking, use_nesterov)
|
|
805
735
|
self.sparse_opt.set_device("CPU")
|
|
806
|
-
self._ps_pull = P.Pull()
|
|
807
|
-
if use_amsgrad:
|
|
808
|
-
self._ps_push = P.Push("ApplyAdamWithAmsgrad", [0, 1, 2, 3])
|
|
809
|
-
else:
|
|
810
|
-
self._ps_push = P.Push("Adam", [0, 1, 2])
|
|
811
|
-
self._ps_push.add_prim_attr("use_nesterov", use_nesterov)
|
|
812
|
-
|
|
813
736
|
self._init_distributed_opts(use_locking, use_nesterov)
|
|
814
737
|
|
|
815
738
|
def _apply_adam(self, params, beta1_power, beta2_power, moment1, moment2, lr, gradients):
|
|
@@ -830,79 +753,76 @@ class Adam(Optimizer):
|
|
|
830
753
|
if self.is_group_lr:
|
|
831
754
|
if self.use_lazy:
|
|
832
755
|
success = self.map_reverse(F.partial(_lazy_adam_opt, self.opt, self.sparse_opt,
|
|
833
|
-
self.
|
|
834
|
-
self.use_nesterov,
|
|
756
|
+
self.use_locking, self.use_nesterov,
|
|
835
757
|
self._is_device, beta1_power, beta2_power,
|
|
836
758
|
self.beta1, self.beta2, self.eps),
|
|
837
759
|
lr, gradients, self._parameters, self.moment1, self.moment2,
|
|
838
|
-
self.
|
|
760
|
+
self.dense_lazyadam_opts,
|
|
839
761
|
self.use_dense_opt_flags, self.sparse_lazyadam_opts,
|
|
840
762
|
self.use_sparse_opt_flags)
|
|
841
763
|
# Normal Adam
|
|
842
764
|
else:
|
|
843
|
-
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt, self.
|
|
844
|
-
self.
|
|
845
|
-
self.
|
|
846
|
-
|
|
847
|
-
self.
|
|
848
|
-
self.
|
|
765
|
+
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt, self.use_locking,
|
|
766
|
+
self.use_nesterov, self._is_device, beta1_power, beta2_power,
|
|
767
|
+
self.beta1, self.beta2, self.eps),
|
|
768
|
+
lr, gradients, params, moment1, moment2,
|
|
769
|
+
self.dense_adam_opts, self.use_dense_opt_flags,
|
|
770
|
+
self.sparse_adam_opts, self.use_sparse_opt_flags)
|
|
849
771
|
else:
|
|
850
772
|
if self.use_lazy:
|
|
851
|
-
success = self.map_reverse(F.partial(_lazy_adam_opt, self.opt, self.sparse_opt,
|
|
852
|
-
self.
|
|
773
|
+
success = self.map_reverse(F.partial(_lazy_adam_opt, self.opt, self.sparse_opt,
|
|
774
|
+
self.use_locking, self.use_nesterov,
|
|
853
775
|
self._is_device, beta1_power, beta2_power, self.beta1,
|
|
854
776
|
self.beta2, self.eps, lr), gradients, self._parameters,
|
|
855
|
-
self.moment1, self.moment2,
|
|
777
|
+
self.moment1, self.moment2,
|
|
856
778
|
self.dense_lazyadam_opts, self.use_dense_opt_flags,
|
|
857
779
|
self.sparse_lazyadam_opts, self.use_sparse_opt_flags)
|
|
858
780
|
else:
|
|
859
|
-
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt,
|
|
860
|
-
self.
|
|
781
|
+
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt,
|
|
782
|
+
self.use_locking, self.use_nesterov,
|
|
861
783
|
self._is_device, beta1_power, beta2_power, self.beta1, self.beta2,
|
|
862
784
|
self.eps, lr), gradients, params, moment1, moment2,
|
|
863
|
-
self.
|
|
785
|
+
self.dense_adam_opts,
|
|
864
786
|
self.use_dense_opt_flags, self.sparse_adam_opts, self.use_sparse_opt_flags)
|
|
865
787
|
else:
|
|
866
788
|
if self.is_group_lr:
|
|
867
789
|
if self.use_lazy:
|
|
868
|
-
success = self.map_(F.partial(_lazy_adam_opt, self.opt, self.sparse_opt,
|
|
869
|
-
self.
|
|
790
|
+
success = self.map_(F.partial(_lazy_adam_opt, self.opt, self.sparse_opt,
|
|
791
|
+
self.use_locking, self.use_nesterov,
|
|
870
792
|
self._is_device, beta1_power, beta2_power, self.beta1, self.beta2,
|
|
871
|
-
self.eps), lr, gradients, params, moment1, moment2
|
|
872
|
-
self.ps_parameters, self.cache_enable)
|
|
793
|
+
self.eps), lr, gradients, params, moment1, moment2)
|
|
873
794
|
else:
|
|
874
795
|
if self.use_amsgrad:
|
|
875
|
-
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt,
|
|
876
|
-
self.
|
|
796
|
+
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt,
|
|
797
|
+
self.use_locking, self.use_nesterov,
|
|
877
798
|
self._is_device, beta1_power, beta2_power,
|
|
878
799
|
self.beta1, self.beta2, self.eps), lr, gradients, params,
|
|
879
|
-
moment1, moment2, self.vhat
|
|
800
|
+
moment1, moment2, self.vhat)
|
|
880
801
|
else:
|
|
881
|
-
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt,
|
|
882
|
-
self.
|
|
802
|
+
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt,
|
|
803
|
+
self.use_locking, self.use_nesterov,
|
|
883
804
|
self._is_device, beta1_power, beta2_power,
|
|
884
805
|
self.beta1, self.beta2, self.eps), lr, gradients, params,
|
|
885
|
-
moment1, moment2
|
|
806
|
+
moment1, moment2)
|
|
886
807
|
else:
|
|
887
808
|
if self.use_lazy:
|
|
888
|
-
success = self.map_(F.partial(_lazy_adam_opt, self.opt, self.sparse_opt,
|
|
889
|
-
self.
|
|
809
|
+
success = self.map_(F.partial(_lazy_adam_opt, self.opt, self.sparse_opt,
|
|
810
|
+
self.use_locking, self.use_nesterov,
|
|
890
811
|
self._is_device, beta1_power, beta2_power, self.beta1, self.beta2,
|
|
891
|
-
self.eps, lr), gradients, params, moment1, moment2
|
|
892
|
-
self.ps_parameters, self.cache_enable)
|
|
812
|
+
self.eps, lr), gradients, params, moment1, moment2)
|
|
893
813
|
else:
|
|
894
814
|
if self.use_amsgrad:
|
|
895
|
-
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt,
|
|
896
|
-
self.
|
|
815
|
+
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt,
|
|
816
|
+
self.use_locking, self.use_nesterov,
|
|
897
817
|
self._is_device, beta1_power, beta2_power,
|
|
898
818
|
self.beta1, self.beta2, self.eps, lr), gradients, params,
|
|
899
|
-
moment1, moment2, self.vhat
|
|
819
|
+
moment1, moment2, self.vhat)
|
|
900
820
|
else:
|
|
901
|
-
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt,
|
|
902
|
-
self.
|
|
821
|
+
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt,
|
|
822
|
+
self.use_locking, self.use_nesterov,
|
|
903
823
|
self._is_device, beta1_power, beta2_power,
|
|
904
824
|
self.beta1, self.beta2, self.eps, lr), gradients, params,
|
|
905
|
-
moment1, moment2
|
|
825
|
+
moment1, moment2)
|
|
906
826
|
|
|
907
827
|
return success
|
|
908
828
|
|
|
@@ -967,8 +887,8 @@ class AdamWeightDecay(Optimizer):
|
|
|
967
887
|
\boldsymbol{g}_{t} \\
|
|
968
888
|
&\hspace{5mm}\boldsymbol{v}_{t} \leftarrow \beta_{2} \boldsymbol{v}_{t-1}+\left(1-\beta_{2}\right)
|
|
969
889
|
\boldsymbol{g}_{t}^{2} \\
|
|
970
|
-
&\hspace{5mm}\boldsymbol{w}_{t} \leftarrow \boldsymbol{w}_{t-1}-\left(
|
|
971
|
-
/\left(\sqrt{
|
|
890
|
+
&\hspace{5mm}\boldsymbol{w}_{t} \leftarrow \boldsymbol{w}_{t-1}-\gamma\left({\boldsymbol{m}}_{t}
|
|
891
|
+
/\left(\sqrt{{\boldsymbol{v}}_{t}}+\epsilon\right)+\lambda \boldsymbol{w}_{t-1}\right) \\
|
|
972
892
|
&\textbf{until}\text { stopping criterion is met } \\[-1.ex]
|
|
973
893
|
&\newline
|
|
974
894
|
&\hline \\[-1.ex]
|
|
@@ -986,7 +906,7 @@ class AdamWeightDecay(Optimizer):
|
|
|
986
906
|
There is usually no connection between a optimizer and mixed precision. But when `FixedLossScaleManager` is used
|
|
987
907
|
and `drop_overflow_update` in `FixedLossScaleManager` is set to False, optimizer needs to set the 'loss_scale'.
|
|
988
908
|
As this optimizer has no argument of `loss_scale`, so `loss_scale` needs to be processed by other means, refer
|
|
989
|
-
document `LossScale <https://www.mindspore.cn/tutorials/en/r2.
|
|
909
|
+
document `LossScale <https://www.mindspore.cn/tutorials/en/r2.3.q1/advanced/mixed_precision.html>`_ to
|
|
990
910
|
process `loss_scale` correctly.
|
|
991
911
|
|
|
992
912
|
If parameters are not grouped, the `weight_decay` in optimizer will be applied on the network parameters without
|
|
@@ -1029,7 +949,9 @@ class AdamWeightDecay(Optimizer):
|
|
|
1029
949
|
- Iterable: Learning rate is dynamic. The i-th step will take the i-th value as the learning rate.
|
|
1030
950
|
|
|
1031
951
|
- LearningRateSchedule: Learning rate is dynamic. During training, the optimizer calls the instance of
|
|
1032
|
-
LearningRateSchedule
|
|
952
|
+
`LearningRateSchedule
|
|
953
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.nn.html#learningrateschedule-class>`_
|
|
954
|
+
with step as the input to get the learning rate of current step.
|
|
1033
955
|
|
|
1034
956
|
beta1 (float): The exponential decay rate for the 1st moment estimations. Default: ``0.9`` .
|
|
1035
957
|
Should be in range (0.0, 1.0).
|
|
@@ -1070,7 +992,7 @@ class AdamWeightDecay(Optimizer):
|
|
|
1070
992
|
>>> from mindspore import nn
|
|
1071
993
|
>>>
|
|
1072
994
|
>>> # Define the network structure of LeNet5. Refer to
|
|
1073
|
-
>>> # https://gitee.com/mindspore/docs/blob/r2.
|
|
995
|
+
>>> # https://gitee.com/mindspore/docs/blob/r2.3.q1/docs/mindspore/code/lenet.py
|
|
1074
996
|
>>> net = LeNet5()
|
|
1075
997
|
>>> #1) All parameters use the same learning rate and weight decay
|
|
1076
998
|
>>> optim = nn.AdamWeightDecay(params=net.trainable_params())
|
|
@@ -1100,10 +1022,7 @@ class AdamWeightDecay(Optimizer):
|
|
|
1100
1022
|
self.moments1 = self._parameters.clone(prefix="adam_m", init='zeros')
|
|
1101
1023
|
self.moments2 = self._parameters.clone(prefix="adam_v", init='zeros')
|
|
1102
1024
|
self.fused_opt = P.AdamWeightDecay()
|
|
1103
|
-
|
|
1104
|
-
self.use_fused_opt = False
|
|
1105
|
-
else:
|
|
1106
|
-
self.use_fused_opt = True
|
|
1025
|
+
self.use_fused_opt = True
|
|
1107
1026
|
|
|
1108
1027
|
@jit
|
|
1109
1028
|
def construct(self, gradients):
|
|
@@ -1230,7 +1149,9 @@ class AdamOffload(Optimizer):
|
|
|
1230
1149
|
- Iterable: Learning rate is dynamic. The i-th step will take the i-th value as the learning rate.
|
|
1231
1150
|
|
|
1232
1151
|
- LearningRateSchedule: Learning rate is dynamic. During training, the optimizer calls the instance of
|
|
1233
|
-
LearningRateSchedule
|
|
1152
|
+
`LearningRateSchedule
|
|
1153
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.nn.html#learningrateschedule-class>`_
|
|
1154
|
+
with step as the input to get the learning rate of current step.
|
|
1234
1155
|
|
|
1235
1156
|
beta1 (float): The exponential decay rate for the 1st moment estimations. Should be in range (0.0, 1.0).
|
|
1236
1157
|
Default: ``0.9`` .
|
|
@@ -1284,7 +1205,7 @@ class AdamOffload(Optimizer):
|
|
|
1284
1205
|
>>> from mindspore import nn
|
|
1285
1206
|
>>>
|
|
1286
1207
|
>>> # Define the network structure of LeNet5. Refer to
|
|
1287
|
-
>>> # https://gitee.com/mindspore/docs/blob/r2.
|
|
1208
|
+
>>> # https://gitee.com/mindspore/docs/blob/r2.3.q1/docs/mindspore/code/lenet.py
|
|
1288
1209
|
>>> net = LeNet5()
|
|
1289
1210
|
>>> #1) All parameters use the same learning rate and weight decay
|
|
1290
1211
|
>>> optim = nn.AdamOffload(params=net.trainable_params())
|
mindspore/nn/optim/adamax.py
CHANGED
|
@@ -114,7 +114,9 @@ class AdaMax(Optimizer):
|
|
|
114
114
|
- Iterable: Learning rate is dynamic. The i-th step will take the i-th value as the learning rate.
|
|
115
115
|
|
|
116
116
|
- LearningRateSchedule: Learning rate is dynamic. During training, the optimizer calls the instance of
|
|
117
|
-
LearningRateSchedule
|
|
117
|
+
`LearningRateSchedule
|
|
118
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.nn.html#learningrateschedule-class>`_
|
|
119
|
+
with step as the input to get the learning rate of current step.
|
|
118
120
|
|
|
119
121
|
beta1 (float): The exponential decay rate for the 1st moment estimations. Should be in range (0.0, 1.0).
|
|
120
122
|
Default: ``0.9`` .
|
|
@@ -161,7 +163,7 @@ class AdaMax(Optimizer):
|
|
|
161
163
|
>>> from mindspore import nn
|
|
162
164
|
>>>
|
|
163
165
|
>>> # Define the network structure of LeNet5. Refer to
|
|
164
|
-
>>> # https://gitee.com/mindspore/docs/blob/r2.
|
|
166
|
+
>>> # https://gitee.com/mindspore/docs/blob/r2.3.q1/docs/mindspore/code/lenet.py
|
|
165
167
|
>>> net = LeNet5()
|
|
166
168
|
>>> #1) All parameters use the same learning rate and weight decay
|
|
167
169
|
>>> optim = nn.AdaMax(params=net.trainable_params())
|
mindspore/nn/optim/adasum.py
CHANGED
|
@@ -445,7 +445,7 @@ class AdaSumByGradWrapCell(Cell):
|
|
|
445
445
|
>>> import mindspore as ms
|
|
446
446
|
>>> from mindspore import nn
|
|
447
447
|
>>> # Define the network structure of LeNet5. Refer to
|
|
448
|
-
>>> # https://gitee.com/mindspore/docs/blob/r2.
|
|
448
|
+
>>> # https://gitee.com/mindspore/docs/blob/r2.3.q1/docs/mindspore/code/lenet.py
|
|
449
449
|
>>> net = LeNet5()
|
|
450
450
|
>>> optim = nn.AdaSumByGradWrapCell(nn.Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9))
|
|
451
451
|
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
|
@@ -514,7 +514,7 @@ class AdaSumByDeltaWeightWrapCell(Cell):
|
|
|
514
514
|
>>> import mindspore as ms
|
|
515
515
|
>>> from mindspore import nn
|
|
516
516
|
>>> # Define the network structure of LeNet5. Refer to
|
|
517
|
-
>>> # https://gitee.com/mindspore/docs/blob/r2.
|
|
517
|
+
>>> # https://gitee.com/mindspore/docs/blob/r2.3.q1/docs/mindspore/code/lenet.py
|
|
518
518
|
>>> net = LeNet5()
|
|
519
519
|
>>> optim = nn.AdaSumByDeltaWeightWrapCell(nn.Momentum(params=net.trainable_params(),
|
|
520
520
|
... learning_rate=0.1, momentum=0.9))
|
mindspore/nn/optim/asgd.py
CHANGED
|
@@ -93,7 +93,9 @@ class ASGD(Optimizer):
|
|
|
93
93
|
- Iterable: Learning rate is dynamic. The i-th step will take the i-th value as the learning rate.
|
|
94
94
|
|
|
95
95
|
- LearningRateSchedule: Learning rate is dynamic. During training, the optimizer calls the instance of
|
|
96
|
-
LearningRateSchedule
|
|
96
|
+
`LearningRateSchedule
|
|
97
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.nn.html#learningrateschedule-class>`_
|
|
98
|
+
with step as the input to get the learning rate of current step.
|
|
97
99
|
|
|
98
100
|
lambd (float): The decay term. Default: ``1e-4`` .
|
|
99
101
|
alpha (float): The power for :math:`\eta` update. Default: ``0.75`` .
|
|
@@ -128,7 +130,7 @@ class ASGD(Optimizer):
|
|
|
128
130
|
>>> from mindspore import nn
|
|
129
131
|
>>>
|
|
130
132
|
>>> # Define the network structure of LeNet5. Refer to
|
|
131
|
-
>>> # https://gitee.com/mindspore/docs/blob/r2.
|
|
133
|
+
>>> # https://gitee.com/mindspore/docs/blob/r2.3.q1/docs/mindspore/code/lenet.py
|
|
132
134
|
>>> net = LeNet5()
|
|
133
135
|
>>> #1) All parameters use the same learning rate and weight decay
|
|
134
136
|
>>> optim = nn.ASGD(params=net.trainable_params())
|