mindspore 2.2.11__cp37-cp37m-manylinux1_x86_64.whl → 2.3.0rc1__cp37-cp37m-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +5 -4
- mindspore/_akg/akg/composite/build_module.py +155 -11
- mindspore/_akg/akg/config/repository.json +38 -0
- mindspore/_akg/akg/ms/info_version_adapt.py +29 -0
- mindspore/_akg/akg/topi/cpp/impl.py +1 -1
- mindspore/_akg/akg/tvm/_ffi/base.py +1 -1
- mindspore/_akg/akg/tvm/contrib/nvcc.py +4 -1
- mindspore/_akg/akg/utils/ascend_profilier/path_manager.py +2 -1
- mindspore/_akg/akg/utils/composite_op_helper.py +4 -2
- mindspore/_akg/akg/utils/dump_ascend_meta.py +2 -2
- mindspore/_akg/akg/utils/gen_random.py +14 -8
- mindspore/_akg/akg/utils/op_dsl.py +11 -0
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +5 -5
- mindspore/_c_dataengine.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +58 -0
- mindspore/_extends/builtin_operations.py +2 -1
- mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
- mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
- mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
- mindspore/_extends/parse/__init__.py +18 -14
- mindspore/_extends/parse/compile_config.py +229 -0
- mindspore/_extends/parse/parser.py +155 -59
- mindspore/_extends/parse/resources.py +40 -7
- mindspore/_extends/parse/standard_method.py +124 -204
- mindspore/_extends/remote/kernel_build_server.py +2 -0
- mindspore/_mindspore_offline_debug.cpython-37m-x86_64-linux-gnu.so +0 -0
- mindspore/_profiler.py +30 -0
- mindspore/amp.py +24 -18
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +3 -1
- mindspore/common/_jit_fallback_utils.py +2 -3
- mindspore/common/_register_for_adapter.py +7 -0
- mindspore/common/_stub_tensor.py +6 -1
- mindspore/common/_utils.py +5 -17
- mindspore/common/api.py +91 -48
- mindspore/common/auto_dynamic_shape.py +27 -14
- mindspore/common/dtype.py +5 -4
- mindspore/common/dump.py +5 -4
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +20 -11
- mindspore/common/lazy_inline.py +58 -17
- mindspore/common/mindir_util.py +12 -2
- mindspore/common/mutable.py +79 -14
- mindspore/common/parameter.py +19 -4
- mindspore/common/seed.py +9 -9
- mindspore/common/sparse_tensor.py +251 -18
- mindspore/common/symbol.py +122 -0
- mindspore/common/tensor.py +321 -435
- mindspore/communication/__init__.py +3 -3
- mindspore/communication/_comm_helper.py +5 -0
- mindspore/communication/management.py +56 -38
- mindspore/config/op_info.config +22 -54
- mindspore/context.py +192 -54
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +6 -6
- mindspore/dataset/audio/transforms.py +711 -158
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/engine/cache_client.py +2 -2
- mindspore/dataset/engine/datasets.py +95 -38
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +33 -3
- mindspore/dataset/engine/datasets_text.py +38 -38
- mindspore/dataset/engine/datasets_user_defined.py +7 -7
- mindspore/dataset/engine/datasets_vision.py +75 -71
- mindspore/dataset/engine/offload.py +5 -7
- mindspore/dataset/engine/validators.py +1 -1
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +408 -121
- mindspore/dataset/text/utils.py +9 -9
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/transforms.py +261 -76
- mindspore/dataset/utils/browse_dataset.py +9 -9
- mindspore/dataset/vision/__init__.py +3 -3
- mindspore/dataset/vision/c_transforms.py +5 -5
- mindspore/dataset/vision/py_transforms_util.py +2 -2
- mindspore/dataset/vision/transforms.py +2264 -514
- mindspore/dataset/vision/utils.py +40 -9
- mindspore/dataset/vision/validators.py +7 -1
- mindspore/experimental/optim/__init__.py +12 -2
- mindspore/experimental/optim/adadelta.py +161 -0
- mindspore/experimental/optim/adagrad.py +168 -0
- mindspore/experimental/optim/adam.py +35 -34
- mindspore/experimental/optim/adamax.py +170 -0
- mindspore/experimental/optim/adamw.py +40 -16
- mindspore/experimental/optim/asgd.py +153 -0
- mindspore/experimental/optim/lr_scheduler.py +65 -125
- mindspore/experimental/optim/nadam.py +157 -0
- mindspore/experimental/optim/optimizer.py +15 -8
- mindspore/experimental/optim/radam.py +194 -0
- mindspore/experimental/optim/rmsprop.py +154 -0
- mindspore/experimental/optim/rprop.py +164 -0
- mindspore/experimental/optim/sgd.py +28 -19
- mindspore/hal/__init__.py +34 -0
- mindspore/hal/_ascend.py +57 -0
- mindspore/hal/_base.py +57 -0
- mindspore/hal/_cpu.py +56 -0
- mindspore/hal/_gpu.py +57 -0
- mindspore/hal/device.py +356 -0
- mindspore/hal/event.py +179 -0
- mindspore/hal/stream.py +337 -0
- mindspore/include/api/data_type.h +2 -2
- mindspore/include/api/dual_abi_helper.h +16 -3
- mindspore/include/api/model.h +1 -3
- mindspore/include/api/status.h +14 -0
- mindspore/include/c_api/model_c.h +173 -0
- mindspore/include/c_api/ms/base/types.h +1 -0
- mindspore/include/c_api/types_c.h +19 -0
- mindspore/include/dataset/execute.h +1 -3
- mindspore/include/mindapi/base/format.h +125 -23
- mindspore/include/mindapi/base/types.h +7 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libmpi_collective.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +2044 -154
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +2044 -33
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/build_tbe_kernel.py +529 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/compiler.py +56 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/custom.py +1109 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/get_file_path.py +36 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/ai_core/tbe/custom_aicore_ops_impl/tbe_topi.py +556 -0
- mindspore/lib/plugin/ascend/custom_aicore_ops/op_impl/vector_core/tbe/custom_aicore_ops_impl/kv_cache_mgr.py +0 -2
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6365 -1759
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_add_custom.h +49 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_decoder_kv_cache.h +59 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/include/aclnn_prompt_kv_cache.h +59 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_api/lib/libcust_opapi.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend310p/aic-ascend310p-ops-info.json +52 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910/aic-ascend910-ops-info.json +232 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/config/ascend910b/aic-ascend910b-ops-info.json +232 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.cpp +81 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/add_custom.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.cpp +192 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/decoder_kv_cache.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.cpp +274 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/custom_ascendc_ops_impl/dynamic/prompt_kv_cache.py +134 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/lib/linux/x86_64/libcust_opmaster_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_impl/ai_core/tbe/op_tiling/liboptiling.so +0 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/inc/op_proto.h +39 -0
- mindspore/lib/plugin/ascend/custom_ascendc_ops/op_proto/lib/linux/x86_64/libcust_opsproto_rt2.0.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/cpu/libakg.so +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
- mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
- mindspore/mindrecord/__init__.py +5 -1
- mindspore/mindrecord/config.py +809 -0
- mindspore/mindrecord/filereader.py +25 -0
- mindspore/mindrecord/filewriter.py +74 -56
- mindspore/mindrecord/mindpage.py +40 -6
- mindspore/mindrecord/shardutils.py +3 -2
- mindspore/mindrecord/shardwriter.py +7 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +53 -66
- mindspore/mindrecord/tools/cifar10_to_mr.py +48 -63
- mindspore/mindrecord/tools/csv_to_mr.py +7 -17
- mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +11 -21
- mindspore/mindrecord/tools/tfrecord_to_mr.py +2 -10
- mindspore/multiprocessing/__init__.py +68 -0
- mindspore/nn/cell.py +86 -133
- mindspore/nn/dynamic_lr.py +2 -2
- mindspore/nn/layer/activation.py +80 -91
- mindspore/nn/layer/basic.py +4 -80
- mindspore/nn/layer/channel_shuffle.py +3 -16
- mindspore/nn/layer/container.py +3 -3
- mindspore/nn/layer/conv.py +71 -71
- mindspore/nn/layer/embedding.py +107 -46
- mindspore/nn/layer/image.py +4 -7
- mindspore/nn/layer/normalization.py +46 -38
- mindspore/nn/layer/padding.py +26 -39
- mindspore/nn/layer/pooling.py +13 -9
- mindspore/nn/layer/rnn_cells.py +5 -15
- mindspore/nn/layer/rnns.py +6 -5
- mindspore/nn/layer/thor_layer.py +1 -2
- mindspore/nn/layer/timedistributed.py +1 -1
- mindspore/nn/layer/transformer.py +52 -50
- mindspore/nn/learning_rate_schedule.py +6 -5
- mindspore/nn/loss/loss.py +44 -65
- mindspore/nn/optim/ada_grad.py +6 -4
- mindspore/nn/optim/adadelta.py +3 -1
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +102 -181
- mindspore/nn/optim/adamax.py +4 -2
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +4 -2
- mindspore/nn/optim/ftrl.py +31 -61
- mindspore/nn/optim/lamb.py +5 -3
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +6 -4
- mindspore/nn/optim/momentum.py +13 -25
- mindspore/nn/optim/optimizer.py +6 -3
- mindspore/nn/optim/proximal_ada_grad.py +4 -2
- mindspore/nn/optim/rmsprop.py +9 -3
- mindspore/nn/optim/rprop.py +4 -2
- mindspore/nn/optim/sgd.py +4 -2
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
- mindspore/nn/probability/distribution/beta.py +2 -2
- mindspore/nn/probability/distribution/categorical.py +4 -6
- mindspore/nn/probability/distribution/cauchy.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/poisson.py +2 -2
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +13 -1
- mindspore/nn/wrap/__init__.py +2 -1
- mindspore/nn/wrap/cell_wrapper.py +33 -12
- mindspore/nn/wrap/grad_reducer.py +148 -8
- mindspore/nn/wrap/loss_scale.py +7 -7
- mindspore/numpy/__init__.py +2 -0
- mindspore/numpy/array_creations.py +2 -0
- mindspore/numpy/array_ops.py +1 -5
- mindspore/numpy/fft.py +431 -0
- mindspore/numpy/math_ops.py +53 -59
- mindspore/numpy/utils.py +3 -0
- mindspore/ops/__init__.py +7 -3
- mindspore/ops/_grad_experimental/grad_array_ops.py +4 -160
- mindspore/ops/_grad_experimental/grad_comm_ops.py +14 -18
- mindspore/ops/_grad_experimental/grad_inner_ops.py +8 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +92 -287
- mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
- mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
- mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
- mindspore/ops/_op_impl/__init__.py +0 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +1 -0
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
- mindspore/ops/_op_impl/{cpu/concat.py → aicpu/generate_eod_mask.py} +16 -17
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
- mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -3
- mindspore/ops/_op_impl/cpu/adam.py +2 -2
- mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
- mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
- mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
- mindspore/ops/_vmap/vmap_array_ops.py +137 -101
- mindspore/ops/_vmap/vmap_base.py +8 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +102 -56
- mindspore/ops/_vmap/vmap_image_ops.py +70 -13
- mindspore/ops/_vmap/vmap_math_ops.py +74 -49
- mindspore/ops/_vmap/vmap_nn_ops.py +164 -89
- mindspore/ops/_vmap/vmap_other_ops.py +1 -1
- mindspore/ops/auto_generate/__init__.py +31 -0
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +133 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +248 -0
- mindspore/ops/auto_generate/gen_arg_handler.py +147 -0
- mindspore/ops/auto_generate/gen_extend_func.py +130 -0
- mindspore/ops/auto_generate/gen_ops_def.py +4786 -0
- mindspore/ops/auto_generate/gen_ops_prim.py +8335 -0
- mindspore/ops/auto_generate/pyboost_inner_prim.py +77 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +118 -17
- mindspore/ops/composite/math_ops.py +9 -48
- mindspore/ops/composite/multitype_ops/_compile_utils.py +166 -601
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +15 -133
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
- mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
- mindspore/ops/deprecated.py +14 -3
- mindspore/ops/extend/__init__.py +46 -0
- mindspore/ops/extend/array_func.py +152 -0
- mindspore/ops/extend/math_func.py +76 -0
- mindspore/ops/{_op_impl/tbe/atomic_addr_clean.py → extend/nn_func.py} +5 -15
- mindspore/ops/function/__init__.py +19 -11
- mindspore/ops/function/array_func.py +248 -1436
- mindspore/ops/function/clip_func.py +12 -13
- mindspore/ops/function/debug_func.py +2 -5
- mindspore/ops/function/fft_func.py +31 -0
- mindspore/ops/function/grad/grad_func.py +24 -17
- mindspore/ops/function/image_func.py +27 -21
- mindspore/ops/function/linalg_func.py +30 -53
- mindspore/ops/function/math_func.py +450 -2356
- mindspore/ops/function/nn_func.py +470 -789
- mindspore/ops/function/other_func.py +4 -5
- mindspore/ops/function/parameter_func.py +6 -92
- mindspore/ops/function/random_func.py +24 -80
- mindspore/ops/function/sparse_unary_func.py +11 -18
- mindspore/ops/function/spectral_func.py +1 -1
- mindspore/ops/function/vmap_func.py +15 -14
- mindspore/ops/functional.py +56 -62
- mindspore/ops/op_info_register.py +22 -19
- mindspore/ops/operations/__init__.py +19 -19
- mindspore/ops/operations/_embedding_cache_ops.py +1 -1
- mindspore/ops/operations/_grad_ops.py +20 -723
- mindspore/ops/operations/_inner_ops.py +233 -286
- mindspore/ops/operations/_quant_ops.py +4 -4
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/_scalar_ops.py +5 -480
- mindspore/ops/operations/_sequence_ops.py +4 -34
- mindspore/ops/operations/array_ops.py +100 -2481
- mindspore/ops/operations/comm_ops.py +38 -46
- mindspore/ops/operations/custom_ops.py +9 -9
- mindspore/ops/operations/debug_ops.py +101 -32
- mindspore/ops/operations/image_ops.py +3 -219
- mindspore/ops/operations/inner_ops.py +52 -38
- mindspore/ops/operations/linalg_ops.py +1 -49
- mindspore/{rewrite/ast_transformers → ops/operations/manually_defined}/__init__.py +11 -4
- mindspore/ops/operations/manually_defined/_inner.py +61 -0
- mindspore/ops/operations/manually_defined/ops_def.py +1391 -0
- mindspore/ops/operations/math_ops.py +752 -4588
- mindspore/ops/operations/nn_ops.py +380 -1750
- mindspore/ops/operations/other_ops.py +50 -42
- mindspore/ops/operations/random_ops.py +3 -50
- mindspore/ops/operations/sparse_ops.py +4 -4
- mindspore/ops/primitive.py +196 -96
- mindspore/ops/silent_check.py +162 -0
- mindspore/ops_generate/__init__.py +27 -0
- mindspore/ops_generate/arg_dtype_cast.py +248 -0
- mindspore/ops_generate/arg_handler.py +147 -0
- mindspore/ops_generate/gen_aclnn_implement.py +266 -0
- mindspore/ops_generate/gen_ops.py +1062 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +129 -0
- mindspore/ops_generate/gen_pyboost_func.py +932 -0
- mindspore/ops_generate/gen_utils.py +188 -0
- mindspore/ops_generate/op_proto.py +138 -0
- mindspore/ops_generate/pyboost_utils.py +364 -0
- mindspore/ops_generate/template.py +238 -0
- mindspore/parallel/__init__.py +6 -4
- mindspore/parallel/_auto_parallel_context.py +28 -4
- mindspore/parallel/_cell_wrapper.py +16 -9
- mindspore/parallel/_cost_model_context.py +1 -1
- mindspore/parallel/_dp_allreduce_fusion.py +159 -159
- mindspore/parallel/_parallel_serialization.py +28 -12
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +22 -8
- mindspore/parallel/_transformer/__init__.py +1 -1
- mindspore/parallel/_transformer/layers.py +1 -1
- mindspore/parallel/_transformer/loss.py +1 -1
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/op_parallel_config.py +1 -1
- mindspore/parallel/_transformer/transformer.py +9 -9
- mindspore/parallel/_utils.py +131 -6
- mindspore/parallel/algo_parameter_config.py +6 -6
- mindspore/parallel/checkpoint_transform.py +156 -26
- mindspore/parallel/cluster/__init__.py +15 -0
- mindspore/parallel/cluster/process_entity/__init__.py +18 -0
- mindspore/parallel/cluster/process_entity/_api.py +345 -0
- mindspore/parallel/cluster/process_entity/_utils.py +116 -0
- mindspore/parallel/cluster/run.py +139 -0
- mindspore/parallel/mpi/__init__.py +1 -1
- mindspore/parallel/mpi/_mpi_config.py +1 -1
- mindspore/parallel/parameter_broadcast.py +152 -0
- mindspore/parallel/shard.py +99 -2
- mindspore/profiler/common/util.py +20 -0
- mindspore/profiler/envprofiling.py +1 -1
- mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +66 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +77 -0
- mindspore/profiler/parser/ascend_analysis/function_event.py +146 -0
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +108 -0
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +80 -0
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +52 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +104 -0
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +59 -0
- mindspore/profiler/parser/ascend_cluster_generator.py +116 -0
- mindspore/profiler/parser/ascend_communicate_generator.py +314 -0
- mindspore/profiler/parser/ascend_flops_generator.py +27 -5
- mindspore/profiler/parser/ascend_fpbp_generator.py +8 -2
- mindspore/profiler/parser/ascend_hccl_generator.py +27 -279
- mindspore/profiler/parser/ascend_msprof_exporter.py +122 -118
- mindspore/profiler/parser/ascend_msprof_generator.py +67 -273
- mindspore/profiler/parser/ascend_op_generator.py +68 -27
- mindspore/profiler/parser/ascend_timeline_generator.py +292 -131
- mindspore/profiler/parser/base_timeline_generator.py +17 -3
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +2 -1
- mindspore/profiler/parser/framework_parser.py +11 -4
- mindspore/profiler/parser/integrator.py +3 -1
- mindspore/profiler/parser/memory_usage_parser.py +8 -2
- mindspore/profiler/parser/minddata_analyzer.py +8 -2
- mindspore/profiler/parser/minddata_parser.py +1 -1
- mindspore/profiler/parser/msadvisor_analyzer.py +5 -3
- mindspore/profiler/parser/msadvisor_parser.py +10 -4
- mindspore/profiler/parser/profiler_info.py +5 -0
- mindspore/profiler/profiling.py +373 -171
- mindspore/rewrite/__init__.py +2 -13
- mindspore/rewrite/api/node.py +122 -36
- mindspore/rewrite/api/pattern_engine.py +2 -3
- mindspore/rewrite/api/scoped_value.py +16 -15
- mindspore/rewrite/api/symbol_tree.py +46 -30
- mindspore/rewrite/ast_helpers/__init__.py +3 -6
- mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
- mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
- mindspore/rewrite/common/__init__.py +1 -2
- mindspore/rewrite/common/config.py +24 -0
- mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
- mindspore/rewrite/{namer.py → common/namer.py} +63 -18
- mindspore/rewrite/common/namespace.py +118 -0
- mindspore/rewrite/node/__init__.py +5 -5
- mindspore/rewrite/node/call_function.py +23 -7
- mindspore/rewrite/node/cell_container.py +7 -3
- mindspore/rewrite/node/control_flow.py +53 -28
- mindspore/rewrite/node/node.py +212 -196
- mindspore/rewrite/node/node_manager.py +51 -22
- mindspore/rewrite/node/node_topological_manager.py +3 -23
- mindspore/rewrite/parsers/__init__.py +12 -0
- mindspore/rewrite/parsers/arguments_parser.py +8 -9
- mindspore/rewrite/parsers/assign_parser.py +635 -413
- mindspore/rewrite/parsers/attribute_parser.py +3 -4
- mindspore/rewrite/parsers/class_def_parser.py +107 -144
- mindspore/rewrite/parsers/constant_parser.py +5 -5
- mindspore/rewrite/parsers/container_parser.py +4 -6
- mindspore/rewrite/parsers/expr_parser.py +55 -0
- mindspore/rewrite/parsers/for_parser.py +31 -98
- mindspore/rewrite/parsers/function_def_parser.py +13 -5
- mindspore/rewrite/parsers/if_parser.py +28 -10
- mindspore/rewrite/parsers/module_parser.py +8 -182
- mindspore/rewrite/parsers/parser.py +1 -5
- mindspore/rewrite/parsers/parser_register.py +1 -1
- mindspore/rewrite/parsers/return_parser.py +5 -10
- mindspore/rewrite/parsers/while_parser.py +59 -0
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree/__init__.py +20 -0
- mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
- mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
- mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
- mindspore/run_check/_check_version.py +6 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +9 -19
- mindspore/scipy/__init__.py +2 -1
- mindspore/scipy/fft.py +133 -0
- mindspore/scipy/linalg.py +140 -55
- mindspore/scipy/ops.py +15 -71
- mindspore/scipy/ops_grad.py +5 -34
- mindspore/scipy/optimize/line_search.py +2 -2
- mindspore/scipy/optimize/minimize.py +1 -1
- mindspore/train/__init__.py +3 -2
- mindspore/train/_utils.py +178 -4
- mindspore/train/amp.py +167 -245
- mindspore/train/callback/_backup_and_restore.py +4 -4
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +47 -21
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_landscape.py +15 -10
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +13 -14
- mindspore/train/callback/_time_monitor.py +2 -2
- mindspore/train/data_sink.py +1 -1
- mindspore/train/dataset_helper.py +19 -4
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/accuracy.py +7 -7
- mindspore/train/metrics/confusion_matrix.py +8 -6
- mindspore/train/metrics/cosine_similarity.py +6 -4
- mindspore/train/metrics/error.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/perplexity.py +2 -1
- mindspore/train/metrics/topk.py +2 -2
- mindspore/train/mind_ir_pb2.py +75 -6
- mindspore/train/model.py +41 -27
- mindspore/train/serialization.py +262 -133
- mindspore/train/summary/_writer_pool.py +1 -1
- mindspore/train/summary/summary_record.py +56 -34
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/version.py +1 -1
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/METADATA +2 -2
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/RECORD +532 -1075
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/entry_points.txt +1 -0
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
- mindspore/config/super_bar_config.json +0 -544
- mindspore/gen_ops.py +0 -273
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
- mindspore/nn/layer/flash_attention.py +0 -189
- mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
- mindspore/ops/_op_impl/tbe/__init__.py +0 -47
- mindspore/ops/_op_impl/tbe/abs.py +0 -38
- mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/acos.py +0 -37
- mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/acosh.py +0 -37
- mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
- mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
- mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
- mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
- mindspore/ops/_op_impl/tbe/add.py +0 -42
- mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/add_n.py +0 -39
- mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
- mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
- mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
- mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
- mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
- mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
- mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
- mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/asin.py +0 -37
- mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/asinh.py +0 -37
- mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/assign.py +0 -79
- mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
- mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
- mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/atan.py +0 -37
- mindspore/ops/_op_impl/tbe/atan2.py +0 -38
- mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/atanh.py +0 -37
- mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
- mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
- mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
- mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
- mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
- mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
- mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
- mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
- mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
- mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
- mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cast.py +0 -55
- mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/cdist.py +0 -38
- mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/ceil.py +0 -37
- mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/celu.py +0 -39
- mindspore/ops/_op_impl/tbe/centralization.py +0 -39
- mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
- mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/concat.py +0 -40
- mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
- mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
- mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
- mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
- mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
- mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/cos.py +0 -37
- mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/cosh.py +0 -37
- mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
- mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cummin.py +0 -41
- mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
- mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
- mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
- mindspore/ops/_op_impl/tbe/diag.py +0 -38
- mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
- mindspore/ops/_op_impl/tbe/dilation.py +0 -40
- mindspore/ops/_op_impl/tbe/div.py +0 -41
- mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
- mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
- mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
- mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
- mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
- mindspore/ops/_op_impl/tbe/elu.py +0 -38
- mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/equal.py +0 -42
- mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/erf.py +0 -37
- mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfc.py +0 -37
- mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
- mindspore/ops/_op_impl/tbe/exp.py +0 -40
- mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
- mindspore/ops/_op_impl/tbe/expm1.py +0 -37
- mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
- mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/fill.py +0 -56
- mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/flatten.py +0 -48
- mindspore/ops/_op_impl/tbe/floor.py +0 -37
- mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
- mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
- mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
- mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
- mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
- mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
- mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
- mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/ger.py +0 -43
- mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/greater.py +0 -43
- mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
- mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
- mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
- mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
- mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
- mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
- mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
- mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/im2col.py +0 -42
- mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
- mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
- mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/inv.py +0 -38
- mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/invert.py +0 -37
- mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/iou.py +0 -38
- mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/is_close.py +0 -40
- mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
- mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
- mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
- mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
- mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
- mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
- mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/lerp.py +0 -38
- mindspore/ops/_op_impl/tbe/less.py +0 -41
- mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/log.py +0 -40
- mindspore/ops/_op_impl/tbe/log1p.py +0 -37
- mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
- mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
- mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
- mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
- mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/matmul.py +0 -53
- mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
- mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
- mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
- mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum.py +0 -39
- mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
- mindspore/ops/_op_impl/tbe/minimum.py +0 -40
- mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mish.py +0 -37
- mindspore/ops/_op_impl/tbe/mod.py +0 -41
- mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/mul.py +0 -37
- mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
- mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
- mindspore/ops/_op_impl/tbe/neg.py +0 -39
- mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
- mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
- mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
- mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
- mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
- mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/pack.py +0 -58
- mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
- mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
- mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/pdist.py +0 -36
- mindspore/ops/_op_impl/tbe/pooling.py +0 -46
- mindspore/ops/_op_impl/tbe/population_count.py +0 -38
- mindspore/ops/_op_impl/tbe/pow.py +0 -41
- mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/prelu.py +0 -37
- mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/range.py +0 -39
- mindspore/ops/_op_impl/tbe/real_div.py +0 -38
- mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
- mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
- mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
- mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
- mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6.py +0 -38
- mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/renorm.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
- mindspore/ops/_op_impl/tbe/rint.py +0 -37
- mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roll.py +0 -42
- mindspore/ops/_op_impl/tbe/round.py +0 -38
- mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
- mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
- mindspore/ops/_op_impl/tbe/select.py +0 -38
- mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/selu.py +0 -39
- mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sgd.py +0 -62
- mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sign.py +0 -38
- mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/sin.py +0 -37
- mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sinh.py +0 -37
- mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/slice.py +0 -58
- mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
- mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax.py +0 -37
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
- mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/softplus.py +0 -37
- mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softsign.py +0 -37
- mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sort.py +0 -38
- mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/split_d.py +0 -38
- mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/split_v.py +0 -39
- mindspore/ops/_op_impl/tbe/splitv.py +0 -39
- mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/square.py +0 -38
- mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
- mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
- mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
- mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
- mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
- mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
- mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
- mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
- mindspore/ops/_op_impl/tbe/sub.py +0 -39
- mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tan.py +0 -38
- mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh.py +0 -37
- mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
- mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
- mindspore/ops/_op_impl/tbe/tile.py +0 -37
- mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
- mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
- mindspore/ops/_op_impl/tbe/transpose.py +0 -60
- mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
- mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
- mindspore/ops/_op_impl/tbe/trunc.py +0 -39
- mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/unpack.py +0 -38
- mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
- mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
- mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
- mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
- mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
- mindspore/ops/_tracefunc.py +0 -241
- mindspore/ops/arg_dtype_cast.py +0 -54
- mindspore/rewrite/api/tree_node_helper.py +0 -60
- mindspore/rewrite/ast_creator_register.py +0 -37
- mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
- mindspore/rewrite/namespace.py +0 -53
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.2.11.dist-info → mindspore-2.3.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -25,7 +25,7 @@ from mindspore.ops import operations as P
|
|
|
25
25
|
from mindspore import _checkparam as Validator
|
|
26
26
|
|
|
27
27
|
|
|
28
|
-
__all__ = ['StepLR', 'LinearLR', 'LRScheduler', 'ExponentialLR', 'PolynomialLR',
|
|
28
|
+
__all__ = ['StepLR', 'LinearLR', 'LRScheduler', 'ExponentialLR', 'PolynomialLR',
|
|
29
29
|
'MultiplicativeLR', 'ConstantLR', 'MultiStepLR', 'LambdaLR', 'SequentialLR', 'ReduceLROnPlateau',
|
|
30
30
|
'CyclicLR', 'CosineAnnealingWarmRestarts', 'CosineAnnealingLR']
|
|
31
31
|
|
|
@@ -38,7 +38,7 @@ class LRScheduler:
|
|
|
38
38
|
.. warning::
|
|
39
39
|
This is an experimental lr scheduler module that is subject to change.
|
|
40
40
|
This module must be used with optimizers in `Experimental Optimizer
|
|
41
|
-
<https://www.mindspore.cn/docs/en/r2.
|
|
41
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.experimental.html#experimental-optimizer>`_ .
|
|
42
42
|
|
|
43
43
|
Args:
|
|
44
44
|
optimizer (:class:`mindspore.experimental.optim.Optimizer`): The optimizer instance.
|
|
@@ -64,12 +64,11 @@ class LRScheduler:
|
|
|
64
64
|
... super(ConstantLR, self).__init__(optimizer, last_epoch)
|
|
65
65
|
...
|
|
66
66
|
... def get_lr(self):
|
|
67
|
-
... lrs = [lr.value() for lr in self._last_lr]
|
|
68
67
|
... if self.last_epoch == 0:
|
|
69
|
-
... return [lr * self.factor for lr in
|
|
68
|
+
... return [lr * self.factor for lr in self._last_lr]
|
|
70
69
|
... if self.last_epoch != self.total_iters:
|
|
71
|
-
... return
|
|
72
|
-
... return
|
|
70
|
+
... return [lr * 1. for lr in self._last_lr]
|
|
71
|
+
... return [lr / self.factor for lr in self._last_lr]
|
|
73
72
|
>>>
|
|
74
73
|
>>> net = nn.Dense(8, 2)
|
|
75
74
|
>>> optimizer = optim.SGD(net.trainable_params(), 0.01)
|
|
@@ -100,7 +99,7 @@ class LRScheduler:
|
|
|
100
99
|
f"in param_groups[{i}] when resuming an optimizer")
|
|
101
100
|
self.base_lrs = [group['initial_lr'] for group in optimizer.param_groups]
|
|
102
101
|
self.optimizer = optimizer
|
|
103
|
-
self._last_lr = [
|
|
102
|
+
self._last_lr = [lr for lr in optimizer.lrs]
|
|
104
103
|
self.groups_num = len(optimizer.param_groups)
|
|
105
104
|
self.last_epoch = Parameter(Tensor(last_epoch, dtype=mstype.float32),
|
|
106
105
|
name='last_epoch_' + self.__class__.__name__)
|
|
@@ -136,7 +135,7 @@ class LRScheduler:
|
|
|
136
135
|
|
|
137
136
|
for i in range(self.groups_num):
|
|
138
137
|
lr = values[i]
|
|
139
|
-
ops.assign(self.
|
|
138
|
+
ops.assign(self._last_lr[i], lr)
|
|
140
139
|
|
|
141
140
|
return True
|
|
142
141
|
|
|
@@ -150,7 +149,7 @@ class StepLR(LRScheduler):
|
|
|
150
149
|
.. warning::
|
|
151
150
|
This is an experimental lr scheduler module that is subject to change.
|
|
152
151
|
This module must be used with optimizers in `Experimental Optimizer
|
|
153
|
-
<https://www.mindspore.cn/docs/en/r2.
|
|
152
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.experimental.html#experimental-optimizer>`_ .
|
|
154
153
|
|
|
155
154
|
Args:
|
|
156
155
|
optimizer (:class:`mindspore.experimental.optim.Optimizer`): Wrapped optimizer.
|
|
@@ -167,7 +166,7 @@ class StepLR(LRScheduler):
|
|
|
167
166
|
>>> from mindspore import nn
|
|
168
167
|
>>> from mindspore.experimental import optim
|
|
169
168
|
>>> # Define the network structure of LeNet5. Refer to
|
|
170
|
-
>>> # https://gitee.com/mindspore/docs/blob/r2.
|
|
169
|
+
>>> # https://gitee.com/mindspore/docs/blob/r2.3.q1/docs/mindspore/code/lenet.py
|
|
171
170
|
>>> net = LeNet5()
|
|
172
171
|
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
|
|
173
172
|
>>> optimizer = optim.Adam(net.trainable_params(), lr=0.05)
|
|
@@ -187,22 +186,25 @@ class StepLR(LRScheduler):
|
|
|
187
186
|
... return loss
|
|
188
187
|
>>> for epoch in range(6):
|
|
189
188
|
... # Create the dataset taking MNIST as an example. Refer to
|
|
190
|
-
... # https://gitee.com/mindspore/docs/blob/r2.
|
|
189
|
+
... # https://gitee.com/mindspore/docs/blob/r2.3.q1/docs/mindspore/code/mnist.py
|
|
191
190
|
... for data, label in create_dataset():
|
|
192
191
|
... train_step(data, label)
|
|
193
192
|
... scheduler.step()
|
|
194
193
|
... current_lr = scheduler.get_last_lr()
|
|
195
194
|
"""
|
|
196
195
|
def __init__(self, optimizer, step_size, gamma=0.1, last_epoch=-1):
|
|
196
|
+
if not isinstance(step_size, int) and not isinstance(step_size, bool):
|
|
197
|
+
raise TypeError(f"For 'StepLR', the 'step_size' must be int, but got {type(step_size)}.")
|
|
198
|
+
if not isinstance(gamma, float):
|
|
199
|
+
raise TypeError(f"For 'StepLR', the 'gamma' must be float, but got {type(gamma)}.")
|
|
197
200
|
self.step_size = step_size
|
|
198
201
|
self.gamma = gamma
|
|
199
202
|
super(StepLR, self).__init__(optimizer, last_epoch)
|
|
200
203
|
|
|
201
204
|
def get_lr(self):
|
|
202
|
-
lrs = [lr.value() for lr in self._last_lr]
|
|
203
205
|
if self.last_epoch == 0 or self.last_epoch % self.step_size != 0:
|
|
204
|
-
return
|
|
205
|
-
return [lr * self.gamma for lr in
|
|
206
|
+
return [lr * 1. for lr in self._last_lr]
|
|
207
|
+
return [lr * self.gamma for lr in self._last_lr]
|
|
206
208
|
|
|
207
209
|
def _get_closed_form_lr(self):
|
|
208
210
|
return [base_lr * self.gamma ** (self.last_epoch // self.step_size)
|
|
@@ -219,7 +221,7 @@ class LinearLR(LRScheduler):
|
|
|
219
221
|
.. warning::
|
|
220
222
|
This is an experimental lr scheduler module that is subject to change.
|
|
221
223
|
This module must be used with optimizers in `Experimental Optimizer
|
|
222
|
-
<https://www.mindspore.cn/docs/en/r2.
|
|
224
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.experimental.html#experimental-optimizer>`_ .
|
|
223
225
|
|
|
224
226
|
Args:
|
|
225
227
|
optimizer (:class:`mindspore.experimental.optim.Optimizer`): Wrapped optimizer.
|
|
@@ -244,7 +246,7 @@ class LinearLR(LRScheduler):
|
|
|
244
246
|
>>> from mindspore import nn
|
|
245
247
|
>>> from mindspore.experimental import optim
|
|
246
248
|
>>> # Define the network structure of LeNet5. Refer to
|
|
247
|
-
>>> # https://gitee.com/mindspore/docs/blob/r2.
|
|
249
|
+
>>> # https://gitee.com/mindspore/docs/blob/r2.3.q1/docs/mindspore/code/lenet.py
|
|
248
250
|
>>> net = LeNet5()
|
|
249
251
|
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
|
|
250
252
|
>>> optimizer = optim.Adam(net.trainable_params(), lr=0.05)
|
|
@@ -266,7 +268,7 @@ class LinearLR(LRScheduler):
|
|
|
266
268
|
... return loss
|
|
267
269
|
>>> for epoch in range(5):
|
|
268
270
|
... # Create the dataset taking MNIST as an example. Refer to
|
|
269
|
-
... # https://gitee.com/mindspore/docs/blob/r2.
|
|
271
|
+
... # https://gitee.com/mindspore/docs/blob/r2.3.q1/docs/mindspore/code/mnist.py
|
|
270
272
|
... for data, label in create_dataset():
|
|
271
273
|
... train_step(data, label)
|
|
272
274
|
... scheduler.step()
|
|
@@ -287,17 +289,16 @@ class LinearLR(LRScheduler):
|
|
|
287
289
|
super(LinearLR, self).__init__(optimizer, last_epoch)
|
|
288
290
|
|
|
289
291
|
def get_lr(self):
|
|
290
|
-
lrs = [lr.value() for lr in self._last_lr]
|
|
291
292
|
|
|
292
293
|
if self.last_epoch == 0:
|
|
293
|
-
return [lr * self.start_factor for lr in
|
|
294
|
+
return [lr * self.start_factor for lr in self._last_lr]
|
|
294
295
|
|
|
295
296
|
if self.last_epoch > self.total_iters:
|
|
296
|
-
return
|
|
297
|
+
return [lr * 1. for lr in self._last_lr]
|
|
297
298
|
|
|
298
299
|
factor = 1. + (self.end_factor - self.start_factor) / (
|
|
299
300
|
self.total_iters * self.start_factor + (self.last_epoch - 1) * (self.end_factor - self.start_factor))
|
|
300
|
-
return [lr * factor for lr in
|
|
301
|
+
return [lr * factor for lr in self._last_lr]
|
|
301
302
|
|
|
302
303
|
def _get_closed_form_lr(self):
|
|
303
304
|
return [base_lr * (self.start_factor +
|
|
@@ -315,7 +316,7 @@ class ExponentialLR(LRScheduler):
|
|
|
315
316
|
.. warning::
|
|
316
317
|
This is an experimental lr scheduler module that is subject to change.
|
|
317
318
|
This module must be used with optimizers in `Experimental Optimizer
|
|
318
|
-
<https://www.mindspore.cn/docs/en/r2.
|
|
319
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.experimental.html#experimental-optimizer>`_ .
|
|
319
320
|
|
|
320
321
|
Args:
|
|
321
322
|
optimizer (:class:`mindspore.experimental.optim.Optimizer`): Wrapped optimizer.
|
|
@@ -347,14 +348,15 @@ class ExponentialLR(LRScheduler):
|
|
|
347
348
|
"""
|
|
348
349
|
|
|
349
350
|
def __init__(self, optimizer, gamma, last_epoch=-1):
|
|
351
|
+
if not isinstance(gamma, float):
|
|
352
|
+
raise TypeError(f"For 'ExponentialLR', the 'gamma' must be float, but got {type(gamma)}.")
|
|
350
353
|
self.gamma = gamma
|
|
351
354
|
super(ExponentialLR, self).__init__(optimizer, last_epoch)
|
|
352
355
|
|
|
353
356
|
def get_lr(self):
|
|
354
|
-
lrs = [lr.value() for lr in self._last_lr]
|
|
355
357
|
if self.last_epoch == 0:
|
|
356
|
-
return
|
|
357
|
-
return [lr * self.gamma for lr in
|
|
358
|
+
return [lr * 1. for lr in self._last_lr]
|
|
359
|
+
return [lr * self.gamma for lr in self._last_lr]
|
|
358
360
|
|
|
359
361
|
def _get_closed_form_lr(self):
|
|
360
362
|
return [base_lr * self.gamma ** self.last_epoch
|
|
@@ -381,7 +383,7 @@ class PolynomialLR(LRScheduler):
|
|
|
381
383
|
.. warning::
|
|
382
384
|
This is an experimental lr scheduler module that is subject to change.
|
|
383
385
|
This module must be used with optimizers in `Experimental Optimizer
|
|
384
|
-
<https://www.mindspore.cn/docs/en/r2.
|
|
386
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.experimental.html#experimental-optimizer>`_ .
|
|
385
387
|
|
|
386
388
|
Args:
|
|
387
389
|
optimizer (:class:`mindspore.experimental.optim.Optimizer`): Wrapped optimizer.
|
|
@@ -417,6 +419,10 @@ class PolynomialLR(LRScheduler):
|
|
|
417
419
|
[Tensor(shape=[], dtype=Float32, value= 0)]
|
|
418
420
|
"""
|
|
419
421
|
def __init__(self, optimizer, total_iters=5, power=1.0, last_epoch=-1):
|
|
422
|
+
if not isinstance(power, float):
|
|
423
|
+
raise TypeError(f"For 'PolynomialLR', the 'power' must be float, but got {type(power)}.")
|
|
424
|
+
if power < 0:
|
|
425
|
+
raise ValueError(f"For 'PolynomialLR', the 'power' must be >= 0, but got {power}.")
|
|
420
426
|
self.total_iters = total_iters
|
|
421
427
|
self.power = power
|
|
422
428
|
self.min = P.Minimum()
|
|
@@ -424,13 +430,11 @@ class PolynomialLR(LRScheduler):
|
|
|
424
430
|
super(PolynomialLR, self).__init__(optimizer, last_epoch)
|
|
425
431
|
|
|
426
432
|
def get_lr(self):
|
|
427
|
-
lrs = [lr.value() for lr in self._last_lr]
|
|
428
|
-
|
|
429
433
|
if self.last_epoch == 0 or self.last_epoch > self.total_iters:
|
|
430
|
-
return
|
|
434
|
+
return [lr * 1. for lr in self._last_lr]
|
|
431
435
|
factor = ((1.0 - self.last_epoch / self.total_iters) / (
|
|
432
436
|
1.0 - (self.last_epoch - 1) / self.total_iters)) ** self.power
|
|
433
|
-
return [lr * factor for lr in
|
|
437
|
+
return [lr * factor for lr in self._last_lr]
|
|
434
438
|
|
|
435
439
|
def _get_closed_form_lr(self):
|
|
436
440
|
return [
|
|
@@ -438,68 +442,6 @@ class PolynomialLR(LRScheduler):
|
|
|
438
442
|
for base_lr in self.base_lrs]
|
|
439
443
|
|
|
440
444
|
|
|
441
|
-
@jit_class
|
|
442
|
-
class ChainedScheduler:
|
|
443
|
-
r"""
|
|
444
|
-
Save the learning rate scheduler chain list of multiple learning rate schedulers,
|
|
445
|
-
and call the step() function to execute the step() function of each learning rate scheduler.
|
|
446
|
-
|
|
447
|
-
.. warning::
|
|
448
|
-
This is an experimental lr scheduler module that is subject to change.
|
|
449
|
-
This module must be used with optimizers in `Experimental Optimizer
|
|
450
|
-
<https://www.mindspore.cn/docs/en/r2.2/api_python/mindspore.experimental.html#experimental-optimizer>`_ .
|
|
451
|
-
|
|
452
|
-
Args:
|
|
453
|
-
schedulers (list[:class:`mindspore.experimental.optim.lr_scheduler.LRScheduler`]):
|
|
454
|
-
List of learning rate schedulers.
|
|
455
|
-
|
|
456
|
-
Supported Platforms:
|
|
457
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
458
|
-
|
|
459
|
-
Examples:
|
|
460
|
-
>>> from mindspore import nn
|
|
461
|
-
>>> from mindspore.experimental import optim
|
|
462
|
-
>>> class Net(nn.Cell):
|
|
463
|
-
... def __init__(self):
|
|
464
|
-
... super(Net, self).__init__()
|
|
465
|
-
... self.fc = nn.Dense(16 * 5 * 5, 120)
|
|
466
|
-
... def construct(self, x):
|
|
467
|
-
... return self.fc(x)
|
|
468
|
-
>>> net = Net()
|
|
469
|
-
>>> optimizer = optim.Adam(net.trainable_params(), 0.01)
|
|
470
|
-
>>> scheduler1 = optim.lr_scheduler.PolynomialLR(optimizer)
|
|
471
|
-
>>> scheduler2 = optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.5)
|
|
472
|
-
>>> scheduler = optim.lr_scheduler.ChainedScheduler([scheduler1, scheduler2])
|
|
473
|
-
>>> for i in range(6):
|
|
474
|
-
... scheduler.step()
|
|
475
|
-
... current_lr = scheduler.get_last_lr()
|
|
476
|
-
... print(current_lr)
|
|
477
|
-
[Tensor(shape=[], dtype=Float32, value= 0.004)]
|
|
478
|
-
[Tensor(shape=[], dtype=Float32, value= 0.0015)]
|
|
479
|
-
[Tensor(shape=[], dtype=Float32, value= 0.0005)]
|
|
480
|
-
[Tensor(shape=[], dtype=Float32, value= 0.000125)]
|
|
481
|
-
[Tensor(shape=[], dtype=Float32, value= 0)]
|
|
482
|
-
[Tensor(shape=[], dtype=Float32, value= 0)]
|
|
483
|
-
"""
|
|
484
|
-
def __init__(self, schedulers):
|
|
485
|
-
self._schedulers = list(schedulers)
|
|
486
|
-
self.optimizer = schedulers[0].optimizer
|
|
487
|
-
self._last_lr = [lr for lr in self._schedulers[-1]._last_lr] # pylint: disable=W0212
|
|
488
|
-
|
|
489
|
-
def step(self):
|
|
490
|
-
"""
|
|
491
|
-
Sequential execution of the saved learning rate scheduler's step() function.
|
|
492
|
-
"""
|
|
493
|
-
for scheduler in self._schedulers:
|
|
494
|
-
scheduler.step()
|
|
495
|
-
|
|
496
|
-
def get_last_lr(self):
|
|
497
|
-
"""
|
|
498
|
-
Return last computed learning rate by current scheduler.
|
|
499
|
-
"""
|
|
500
|
-
return [lr.value() for lr in self._last_lr]
|
|
501
|
-
|
|
502
|
-
|
|
503
445
|
@jit_class
|
|
504
446
|
class LambdaLR(LRScheduler):
|
|
505
447
|
"""Sets the learning rate of each parameter group to the initial lr
|
|
@@ -508,7 +450,7 @@ class LambdaLR(LRScheduler):
|
|
|
508
450
|
.. warning::
|
|
509
451
|
This is an experimental lr scheduler module that is subject to change.
|
|
510
452
|
This module must be used with optimizers in `Experimental Optimizer
|
|
511
|
-
<https://www.mindspore.cn/docs/en/r2.
|
|
453
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.experimental.html#experimental-optimizer>`_ .
|
|
512
454
|
|
|
513
455
|
Args:
|
|
514
456
|
optimizer (:class:`mindspore.experimental.optim.Optimizer`): Wrapped optimizer.
|
|
@@ -561,7 +503,7 @@ class MultiplicativeLR(LRScheduler):
|
|
|
561
503
|
.. warning::
|
|
562
504
|
This is an experimental lr scheduler module that is subject to change.
|
|
563
505
|
This module must be used with optimizers in `Experimental Optimizer
|
|
564
|
-
<https://www.mindspore.cn/docs/en/r2.
|
|
506
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.experimental.html#experimental-optimizer>`_ .
|
|
565
507
|
|
|
566
508
|
Args:
|
|
567
509
|
optimizer (:class:`mindspore.experimental.optim.Optimizer`): Wrapped optimizer.
|
|
@@ -599,11 +541,10 @@ class MultiplicativeLR(LRScheduler):
|
|
|
599
541
|
super(MultiplicativeLR, self).__init__(optimizer, last_epoch)
|
|
600
542
|
|
|
601
543
|
def get_lr(self):
|
|
602
|
-
lrs = [lr.value() for lr in self._last_lr]
|
|
603
544
|
if self.last_epoch > 0:
|
|
604
545
|
return [lr * lmbda(self.last_epoch)
|
|
605
|
-
for lmbda, lr in zip(self.lr_lambdas,
|
|
606
|
-
return
|
|
546
|
+
for lmbda, lr in zip(self.lr_lambdas, self._last_lr)]
|
|
547
|
+
return [lr * 1. for lr in self._last_lr]
|
|
607
548
|
|
|
608
549
|
|
|
609
550
|
@jit_class
|
|
@@ -616,7 +557,7 @@ class MultiStepLR(LRScheduler):
|
|
|
616
557
|
.. warning::
|
|
617
558
|
This is an experimental lr scheduler module that is subject to change.
|
|
618
559
|
This module must be used with optimizers in `Experimental Optimizer
|
|
619
|
-
<https://www.mindspore.cn/docs/en/r2.
|
|
560
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.experimental.html#experimental-optimizer>`_ .
|
|
620
561
|
|
|
621
562
|
Args:
|
|
622
563
|
optimizer (:class:`mindspore.experimental.optim.Optimizer`): Wrapped optimizer.
|
|
@@ -655,11 +596,10 @@ class MultiStepLR(LRScheduler):
|
|
|
655
596
|
[Tensor(shape=[], dtype=Float32, value= 0.0005)]
|
|
656
597
|
[Tensor(shape=[], dtype=Float32, value= 0.0005)]
|
|
657
598
|
"""
|
|
658
|
-
|
|
659
599
|
def __init__(self, optimizer, milestones, gamma=0.1, last_epoch=-1):
|
|
660
600
|
Validator.check_value_type('milestones', milestones, [list])
|
|
661
601
|
for milestone in milestones:
|
|
662
|
-
if not isinstance(milestone, int):
|
|
602
|
+
if not isinstance(milestone, int) and not isinstance(milestone, bool):
|
|
663
603
|
raise TypeError(f"For 'MultiStepLR', elements of the 'milestones' must be type of int, "
|
|
664
604
|
f"but got one element of 'milestones' type: {type(milestone)}.")
|
|
665
605
|
Validator.check_value_type('gamma', gamma, [float, int])
|
|
@@ -693,7 +633,7 @@ class ConstantLR(LRScheduler):
|
|
|
693
633
|
.. warning::
|
|
694
634
|
This is an experimental lr scheduler module that is subject to change.
|
|
695
635
|
This module must be used with optimizers in `Experimental Optimizer
|
|
696
|
-
<https://www.mindspore.cn/docs/en/r2.
|
|
636
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.experimental.html#experimental-optimizer>`_ .
|
|
697
637
|
|
|
698
638
|
Args:
|
|
699
639
|
optimizer (:class:`mindspore.experimental.optim.Optimizer`): Wrapped optimizer.
|
|
@@ -734,12 +674,11 @@ class ConstantLR(LRScheduler):
|
|
|
734
674
|
super(ConstantLR, self).__init__(optimizer, last_epoch)
|
|
735
675
|
|
|
736
676
|
def get_lr(self):
|
|
737
|
-
lrs = [lr.value() for lr in self._last_lr]
|
|
738
677
|
if self.last_epoch == 0:
|
|
739
|
-
return [lr * self.factor for lr in
|
|
678
|
+
return [lr * self.factor for lr in self._last_lr]
|
|
740
679
|
if self.last_epoch != self.total_iters:
|
|
741
|
-
return
|
|
742
|
-
return [lr / self.factor for lr in
|
|
680
|
+
return [lr * 1. for lr in self._last_lr]
|
|
681
|
+
return [lr / self.factor for lr in self._last_lr]
|
|
743
682
|
|
|
744
683
|
def _get_closed_form_lr(self):
|
|
745
684
|
return [base_lr * (self.factor + (self.last_epoch >= self.total_iters) * (1 - self.factor))
|
|
@@ -756,7 +695,7 @@ class SequentialLR:
|
|
|
756
695
|
.. warning::
|
|
757
696
|
This is an experimental lr scheduler module that is subject to change.
|
|
758
697
|
This module must be used with optimizers in `Experimental Optimizer
|
|
759
|
-
<https://www.mindspore.cn/docs/en/r2.
|
|
698
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.experimental.html#experimental-optimizer>`_ .
|
|
760
699
|
|
|
761
700
|
Args:
|
|
762
701
|
optimizer (:class:`mindspore.experimental.optim.Optimizer`): Wrapped optimizer.
|
|
@@ -813,7 +752,7 @@ class SequentialLR:
|
|
|
813
752
|
self._schedulers = schedulers
|
|
814
753
|
self.milestones = milestones
|
|
815
754
|
self.milestones_len = len(milestones)
|
|
816
|
-
self.last_epoch = Parameter(Tensor(last_epoch+1, dtype=mstype.float32),
|
|
755
|
+
self.last_epoch = Parameter(Tensor(last_epoch + 1, dtype=mstype.float32),
|
|
817
756
|
name='last_epoch_' + self.__class__.__name__)
|
|
818
757
|
self.increase_tensor = Tensor(1, mstype.int32)
|
|
819
758
|
|
|
@@ -827,7 +766,6 @@ class SequentialLR:
|
|
|
827
766
|
self._schedulers[0].step()
|
|
828
767
|
self._last_lr = schedulers[0]._last_lr # pylint: disable=W0212
|
|
829
768
|
|
|
830
|
-
|
|
831
769
|
def step(self):
|
|
832
770
|
"""
|
|
833
771
|
Get the current learning rate and change the learning rate.
|
|
@@ -861,7 +799,7 @@ class ReduceLROnPlateau:
|
|
|
861
799
|
.. warning::
|
|
862
800
|
This is an experimental lr scheduler module that is subject to change.
|
|
863
801
|
This module must be used with optimizers in `Experimental Optimizer
|
|
864
|
-
<https://www.mindspore.cn/docs/en/r2.
|
|
802
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.experimental.html#experimental-optimizer>`_ .
|
|
865
803
|
|
|
866
804
|
Args:
|
|
867
805
|
optimizer (:class:`mindspore.experimental.optim.Optimizer`): Wrapped optimizer.
|
|
@@ -913,7 +851,7 @@ class ReduceLROnPlateau:
|
|
|
913
851
|
>>> metrics = [1, 1.5, 1.8, 0.4, 0.5]
|
|
914
852
|
>>> for i in range(5):
|
|
915
853
|
... scheduler.step(metrics[i])
|
|
916
|
-
... current_lr = scheduler.
|
|
854
|
+
... current_lr = scheduler.get_last_lr()
|
|
917
855
|
... print(current_lr)
|
|
918
856
|
[Tensor(shape=[], dtype=Float32, value= 0.1)]
|
|
919
857
|
[Tensor(shape=[], dtype=Float32, value= 0.01)]
|
|
@@ -987,7 +925,7 @@ class ReduceLROnPlateau:
|
|
|
987
925
|
else:
|
|
988
926
|
ops.assign_add(self.wait, self.increase_tensor)
|
|
989
927
|
|
|
990
|
-
if self.in_cooldown:
|
|
928
|
+
if self.in_cooldown():
|
|
991
929
|
ops.assign_sub(self.cooldown_counter, self.increase_tensor)
|
|
992
930
|
ops.assign(self.wait, 0)
|
|
993
931
|
|
|
@@ -1006,7 +944,6 @@ class ReduceLROnPlateau:
|
|
|
1006
944
|
ops.assign(lr, new_lr)
|
|
1007
945
|
return True
|
|
1008
946
|
|
|
1009
|
-
@property
|
|
1010
947
|
def in_cooldown(self):
|
|
1011
948
|
""" Whether in cooldown period. """
|
|
1012
949
|
return self.cooldown_counter > 0
|
|
@@ -1057,7 +994,7 @@ class CyclicLR(LRScheduler):
|
|
|
1057
994
|
.. warning::
|
|
1058
995
|
This is an experimental lr scheduler module that is subject to change.
|
|
1059
996
|
This module must be used with optimizers in `Experimental Optimizer
|
|
1060
|
-
<https://www.mindspore.cn/docs/en/r2.
|
|
997
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.experimental.html#experimental-optimizer>`_ .
|
|
1061
998
|
|
|
1062
999
|
Args:
|
|
1063
1000
|
optimizer (:class:`mindspore.experimental.optim.Optimizer`): Wrapped optimizer.
|
|
@@ -1128,7 +1065,7 @@ class CyclicLR(LRScheduler):
|
|
|
1128
1065
|
|
|
1129
1066
|
if last_epoch == -1:
|
|
1130
1067
|
for lr, group in zip(base_lrs, optimizer.param_groups):
|
|
1131
|
-
group['lr']
|
|
1068
|
+
ops.assign(group['lr'], Parameter(lr))
|
|
1132
1069
|
|
|
1133
1070
|
self.max_lrs = self._preprocess_input_param(optimizer, max_lr, 'max_lr')
|
|
1134
1071
|
self.max_lrs = [Tensor(lr) for lr in self.max_lrs]
|
|
@@ -1234,7 +1171,7 @@ class CosineAnnealingWarmRestarts(LRScheduler):
|
|
|
1234
1171
|
.. warning::
|
|
1235
1172
|
This is an experimental lr scheduler module that is subject to change.
|
|
1236
1173
|
This module must be used with optimizers in `Experimental Optimizer
|
|
1237
|
-
<https://www.mindspore.cn/docs/en/r2.
|
|
1174
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.experimental.html#experimental-optimizer>`_ .
|
|
1238
1175
|
|
|
1239
1176
|
Args:
|
|
1240
1177
|
optimizer (:class:`mindspore.experimental.optim.Optimizer`): Wrapped optimizer.
|
|
@@ -1258,7 +1195,7 @@ class CosineAnnealingWarmRestarts(LRScheduler):
|
|
|
1258
1195
|
>>> optimizer = optim.SGD(net.trainable_params(), lr=0.1, momentum=0.9)
|
|
1259
1196
|
>>> scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, 2)
|
|
1260
1197
|
>>> iters = 3
|
|
1261
|
-
>>> for epoch in range(
|
|
1198
|
+
>>> for epoch in range(2):
|
|
1262
1199
|
... for i in range(iters):
|
|
1263
1200
|
... scheduler.step(epoch + i / iters)
|
|
1264
1201
|
... current_lr = scheduler.get_last_lr()
|
|
@@ -1347,7 +1284,7 @@ class CosineAnnealingLR(LRScheduler):
|
|
|
1347
1284
|
r"""
|
|
1348
1285
|
Set the learning rate of each parameter group using a cosine annealing lr
|
|
1349
1286
|
schedule. Where :math:`\eta_{max}` is set to the initial lr, :math:`\eta_{min}` is the minimum value
|
|
1350
|
-
for learning rate, :math:`\eta_{t}` is the current learning rate, :math
|
|
1287
|
+
for learning rate, :math:`\eta_{t}` is the current learning rate, :math:`T_{max}` is iteration number of cosine
|
|
1351
1288
|
function, and :math:`T_{cur}` is the number of epochs since the last restart in SGDR.
|
|
1352
1289
|
|
|
1353
1290
|
.. math::
|
|
@@ -1361,17 +1298,17 @@ class CosineAnnealingLR(LRScheduler):
|
|
|
1361
1298
|
\end{aligned}
|
|
1362
1299
|
|
|
1363
1300
|
For more details, please refer to: `SGDR: Stochastic Gradient Descent with Warm Restarts
|
|
1364
|
-
<https://arxiv.org/abs/1608.03983>`_
|
|
1301
|
+
<https://arxiv.org/abs/1608.03983>`_ .
|
|
1365
1302
|
|
|
1366
1303
|
.. warning::
|
|
1367
1304
|
This is an experimental lr scheduler module that is subject to change.
|
|
1368
1305
|
This module must be used with optimizers in `Experimental Optimizer
|
|
1369
|
-
<https://www.mindspore.cn/docs/en/r2.
|
|
1306
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.experimental.html#experimental-optimizer>`_ .
|
|
1370
1307
|
|
|
1371
1308
|
Args:
|
|
1372
1309
|
optimizer (:class:`mindspore.experimental.optim.Optimizer`): Wrapped optimizer.
|
|
1373
1310
|
T_max (int): Maximum number of iterations.
|
|
1374
|
-
eta_min (float, optional): Minimum learning rate. Default: ``0``.
|
|
1311
|
+
eta_min (float, optional): Minimum learning rate. Default: ``0.0``.
|
|
1375
1312
|
last_epoch (int, optional): The index of the last epoch. Default: ``-1``.
|
|
1376
1313
|
|
|
1377
1314
|
Supported Platforms:
|
|
@@ -1395,7 +1332,11 @@ class CosineAnnealingLR(LRScheduler):
|
|
|
1395
1332
|
[Tensor(shape=[], dtype=Float32, value= 0.05)]
|
|
1396
1333
|
[Tensor(shape=[], dtype=Float32, value= 0)]
|
|
1397
1334
|
"""
|
|
1398
|
-
def __init__(self, optimizer, T_max, eta_min=0, last_epoch=-1):
|
|
1335
|
+
def __init__(self, optimizer, T_max, eta_min=0.0, last_epoch=-1):
|
|
1336
|
+
if not isinstance(eta_min, (float, int)):
|
|
1337
|
+
raise TypeError(f"For 'CosineAnnealingLR', the 'eta_min' must be float or int, but got {type(eta_min)}.")
|
|
1338
|
+
if not isinstance(T_max, int) and not isinstance(T_max, bool):
|
|
1339
|
+
raise TypeError(f"For 'CosineAnnealingLR', the 'T_max' must be int, but got {type(eta_min)}.")
|
|
1399
1340
|
self.T_max = T_max
|
|
1400
1341
|
self.eta_min = eta_min
|
|
1401
1342
|
self.math_pi = math.pi
|
|
@@ -1404,22 +1345,21 @@ class CosineAnnealingLR(LRScheduler):
|
|
|
1404
1345
|
super(CosineAnnealingLR, self).__init__(optimizer, last_epoch)
|
|
1405
1346
|
|
|
1406
1347
|
def get_lr(self):
|
|
1407
|
-
lrs = [lr.value() for lr in self._last_lr]
|
|
1408
1348
|
|
|
1409
1349
|
if self.last_epoch == 0:
|
|
1410
|
-
return
|
|
1350
|
+
return [lr * 1. for lr in self._last_lr]
|
|
1411
1351
|
|
|
1412
1352
|
if (self.last_epoch - 1 - self.T_max) % (2 * self.T_max) == 0:
|
|
1413
1353
|
pct_pi = self.cast(self.math_pi / self.T_max, mstype.float32)
|
|
1414
1354
|
return [lr + (base_lr - self.eta_min) *
|
|
1415
1355
|
(1 - self.cos(pct_pi)) / 2
|
|
1416
1356
|
for base_lr, lr in
|
|
1417
|
-
zip(self.base_lrs,
|
|
1357
|
+
zip(self.base_lrs, self._last_lr)]
|
|
1418
1358
|
|
|
1419
1359
|
return [(1 + self.cos(self.math_pi * self.last_epoch / self.T_max)) /
|
|
1420
1360
|
(1 + self.cos(self.math_pi * (self.last_epoch - 1) / self.T_max)) *
|
|
1421
1361
|
(lr - self.eta_min) + self.eta_min
|
|
1422
|
-
for lr in
|
|
1362
|
+
for lr in self._last_lr]
|
|
1423
1363
|
|
|
1424
1364
|
def _get_closed_form_lr(self):
|
|
1425
1365
|
return [self.eta_min + (base_lr - self.eta_min) *
|
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
# Copyright 2023 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
"""nadam"""
|
|
16
|
+
from __future__ import absolute_import
|
|
17
|
+
|
|
18
|
+
from mindspore.ops import functional as F, composite as C, operations as P
|
|
19
|
+
from mindspore.common import Parameter, Tensor
|
|
20
|
+
import mindspore.common.dtype as mstype
|
|
21
|
+
from mindspore import _checkparam as validator
|
|
22
|
+
from mindspore.experimental.optim.optimizer import Optimizer, check_not_less_than, check_not_less_than_without_equal
|
|
23
|
+
from mindspore import jit
|
|
24
|
+
|
|
25
|
+
_nadam_opt = C.MultitypeFuncGraph("nadam_opt")
|
|
26
|
+
|
|
27
|
+
op_sqrt = P.Sqrt()
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@_nadam_opt.register("Number", "Number", "Number", "Number", "Tensor", "Tensor", "Tensor",
|
|
31
|
+
"Tensor", "Tensor", "Tensor", "Tensor")
|
|
32
|
+
def _tensor_run_opt(beta1, beta2, momentum_decay, eps, step_t, lr, param, grad, exp_avg, exp_avg_sq, mu_product):
|
|
33
|
+
"""Apply nadam optimizer to the weight parameter."""
|
|
34
|
+
bias_correction2 = 1 - beta2 ** step_t
|
|
35
|
+
mu = beta1 * (1. - 0.5 * (0.96 ** (step_t * momentum_decay)))
|
|
36
|
+
mu_next = beta1 * (1. - 0.5 * (0.96 ** ((step_t + 1) * momentum_decay)))
|
|
37
|
+
F.assign(mu_product, mu_product * mu)
|
|
38
|
+
F.assign(exp_avg, exp_avg * beta1 + grad * (1 - beta1))
|
|
39
|
+
F.assign(exp_avg_sq, exp_avg_sq * beta2 + grad * grad * (1 - beta2))
|
|
40
|
+
|
|
41
|
+
denom = op_sqrt(exp_avg_sq / bias_correction2) + eps
|
|
42
|
+
|
|
43
|
+
mu_product_next = mu_product * mu_next
|
|
44
|
+
F.assign(param, param - lr * (1. - mu) / (1. - mu_product) * grad / denom)
|
|
45
|
+
F.assign(param, param - (lr * mu_next) / (1. - mu_product_next) * exp_avg / denom)
|
|
46
|
+
|
|
47
|
+
return True
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class NAdam(Optimizer):
|
|
51
|
+
r"""
|
|
52
|
+
Implements NAdam algorithm.
|
|
53
|
+
|
|
54
|
+
.. _Incorporating Nesterov Momentum into Adam:
|
|
55
|
+
https://openreview.net/forum?id=OM0jvwB8jIp57ZJjtNEZ
|
|
56
|
+
|
|
57
|
+
.. warning::
|
|
58
|
+
This is an experimental optimizer API that is subject to change.
|
|
59
|
+
This module must be used with lr scheduler module in `LRScheduler Class
|
|
60
|
+
<https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore.experimental.html#lrscheduler-class>`_ .
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
params (Union[list(Parameter), list(dict)]): list of parameters to optimize or dicts defining
|
|
64
|
+
parameter groups.
|
|
65
|
+
lr (Union[int, float, Tensor], optional): learning rate. Default: ``2e-3``.
|
|
66
|
+
betas (Tuple[float, float], optional): coefficients used for computing
|
|
67
|
+
running averages of gradient and its square. Default: ``(0.9, 0.999)``.
|
|
68
|
+
eps (float, optional): term added to the denominator to improve
|
|
69
|
+
numerical stability. Default: ``1e-8``.
|
|
70
|
+
weight_decay (float, optional): weight decay (L2 penalty). Default: ``0.``.
|
|
71
|
+
momentum_decay (float, optional): momentum momentum_decay. Default: ``4e-3``.
|
|
72
|
+
|
|
73
|
+
Inputs:
|
|
74
|
+
- **gradients** (tuple[Tensor]) - The gradients of `params`.
|
|
75
|
+
|
|
76
|
+
Raises:
|
|
77
|
+
ValueError: If the learning rate is not int, float or Tensor.
|
|
78
|
+
ValueError: If the learning rate is less than 0.
|
|
79
|
+
ValueError: If the `eps` is less than 0.0.
|
|
80
|
+
ValueError: If the `weight_decay` is less than 0.
|
|
81
|
+
ValueError: If the `momentum_decay` is less than 0.
|
|
82
|
+
ValueError: If elements of `betas` not in the range of [0, 1).
|
|
83
|
+
|
|
84
|
+
Supported Platforms:
|
|
85
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
86
|
+
|
|
87
|
+
Examples:
|
|
88
|
+
>>> import mindspore
|
|
89
|
+
>>> from mindspore import nn
|
|
90
|
+
>>> from mindspore.experimental import optim
|
|
91
|
+
>>> # Define the network structure of LeNet5. Refer to
|
|
92
|
+
>>> # https://gitee.com/mindspore/docs/blob/r2.3.q1/docs/mindspore/code/lenet.py
|
|
93
|
+
>>> net = LeNet5()
|
|
94
|
+
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=True)
|
|
95
|
+
>>> optimizer = optim.NAdam(net.trainable_params(), lr=0.1)
|
|
96
|
+
>>> def forward_fn(data, label):
|
|
97
|
+
... logits = net(data)
|
|
98
|
+
... loss = loss_fn(logits, label)
|
|
99
|
+
... return loss, logits
|
|
100
|
+
>>> grad_fn = mindspore.value_and_grad(forward_fn, None, optimizer.parameters, has_aux=True)
|
|
101
|
+
>>> def train_step(data, label):
|
|
102
|
+
... (loss, _), grads = grad_fn(data, label)
|
|
103
|
+
... optimizer(grads)
|
|
104
|
+
... return loss
|
|
105
|
+
"""
|
|
106
|
+
|
|
107
|
+
def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0.0, momentum_decay=4e-3):
|
|
108
|
+
check_not_less_than_without_equal(lr, "lr", self.cls_name)
|
|
109
|
+
check_not_less_than_without_equal(eps, "eps", self.cls_name)
|
|
110
|
+
check_not_less_than(weight_decay, "weight_decay", self.cls_name)
|
|
111
|
+
check_not_less_than(momentum_decay, "momentum_decay", self.cls_name)
|
|
112
|
+
|
|
113
|
+
validator.check_float_range(betas[0], 0., 1., validator.INC_LEFT, "betas[0]", self.cls_name)
|
|
114
|
+
validator.check_float_range(betas[1], 0., 1., validator.INC_LEFT, "betas[1]", self.cls_name)
|
|
115
|
+
|
|
116
|
+
defaults = dict(lr=lr, betas=betas, eps=eps,
|
|
117
|
+
weight_decay=weight_decay, momentum_decay=momentum_decay)
|
|
118
|
+
super(NAdam, self).__init__(params, defaults)
|
|
119
|
+
self.step_t = Parameter(Tensor(0, mstype.int32), "step_t")
|
|
120
|
+
self.exp_avg = self.parameters.clone(prefix="exp_avg", init='zeros')
|
|
121
|
+
self.exp_avg_sq = self.parameters.clone(prefix="exp_avg_sq", init='zeros')
|
|
122
|
+
self.mu_product = [Parameter(Tensor(1.), "mu_product_" + param.name) for param in self.parameters]
|
|
123
|
+
|
|
124
|
+
self.increase_tensor = Tensor(1, mstype.int32)
|
|
125
|
+
self.assignadd = P.AssignAdd()
|
|
126
|
+
self.op_cast = P.Cast()
|
|
127
|
+
|
|
128
|
+
@jit
|
|
129
|
+
def implementation(self, lr, beta1, beta2, weight_decay, momentum_decay, eps, start_id, end_id, gradients):
|
|
130
|
+
"""Extract the common computing part for acceleration"""
|
|
131
|
+
params = self.parameters[start_id: end_id]
|
|
132
|
+
grads = gradients[start_id: end_id]
|
|
133
|
+
grads = self._decay_weight(weight_decay, params, grads)
|
|
134
|
+
exp_avg = self.exp_avg[start_id: end_id]
|
|
135
|
+
exp_avg_sq = self.exp_avg_sq[start_id: end_id]
|
|
136
|
+
mu_product = self.mu_product[start_id: end_id]
|
|
137
|
+
|
|
138
|
+
self.hyper_map(F.partial(_nadam_opt, beta1, beta2, momentum_decay, eps, self.step_t, lr),
|
|
139
|
+
params, grads, exp_avg, exp_avg_sq, mu_product)
|
|
140
|
+
return True
|
|
141
|
+
|
|
142
|
+
def construct(self, gradients):
|
|
143
|
+
self.assignadd(self.step_t, self.increase_tensor)
|
|
144
|
+
for group_id, group in enumerate(self.param_groups):
|
|
145
|
+
|
|
146
|
+
lr = self.lrs[group_id]
|
|
147
|
+
if isinstance(group.get("lr"), float):
|
|
148
|
+
lr = self.op_cast(group.get("lr"), mstype.float32)
|
|
149
|
+
|
|
150
|
+
beta1, beta2 = group["betas"]
|
|
151
|
+
start_id = self.group_start_id[group_id]
|
|
152
|
+
end_id = self.group_start_id[group_id + 1]
|
|
153
|
+
weight_decay = group["weight_decay"]
|
|
154
|
+
momentum_decay = group["momentum_decay"]
|
|
155
|
+
eps = group["eps"]
|
|
156
|
+
self.implementation(lr, beta1, beta2, weight_decay, momentum_decay, eps, start_id, end_id, gradients)
|
|
157
|
+
return True
|