mindspore 1.10.0__cp39-cp39-win_amd64.whl → 2.0.0rc1__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/ConcurrencyCheck.dll +0 -0
- mindspore/CppBuildInsights.dll +0 -0
- mindspore/CppCoreCheck.dll +0 -0
- mindspore/EnumIndex.dll +0 -0
- mindspore/EspXEngine.dll +0 -0
- mindspore/HResultCheck.dll +0 -0
- mindspore/KernelTraceControl.dll +0 -0
- mindspore/LocalESPC.dll +0 -0
- mindspore/Microsoft.Diagnostics.Tracing.EventSource.dll +0 -0
- mindspore/Microsoft.VisualStudio.RemoteControl.dll +0 -0
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Microsoft.VisualStudio.Utilities.Internal.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/System.Runtime.CompilerServices.Unsafe.dll +0 -0
- mindspore/VariantClear.dll +0 -0
- mindspore/__init__.py +9 -4
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_check_jit_forbidden_api.py +102 -0
- mindspore/_checkparam.py +1066 -1001
- mindspore/_extends/builtin_operations.py +32 -4
- mindspore/_extends/graph_kernel/model/graph_split.py +66 -222
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +12 -9
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +119 -26
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +50 -50
- mindspore/_extends/parallel_compile/akg_compiler/util.py +9 -6
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +4 -25
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +9 -4
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -27
- mindspore/_extends/parse/__init__.py +5 -3
- mindspore/_extends/parse/namespace.py +17 -2
- mindspore/_extends/parse/parser.py +193 -34
- mindspore/_extends/parse/resources.py +7 -8
- mindspore/_extends/parse/standard_method.py +1780 -435
- mindspore/_extends/parse/trope.py +3 -1
- mindspore/amp.py +53 -58
- mindspore/atlprov.dll +0 -0
- mindspore/boost/adasum.py +3 -2
- mindspore/boost/boost.py +2 -2
- mindspore/boost/boost_cell_wrapper.py +46 -26
- mindspore/boost/dim_reduce.py +6 -5
- mindspore/boost/grad_accumulation.py +2 -1
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/cfgpersist.dll +0 -0
- mindspore/clang_rt.asan_dbg_dynamic-x86_64.dll +0 -0
- mindspore/clang_rt.asan_dynamic-x86_64.dll +0 -0
- mindspore/common/__init__.py +11 -10
- mindspore/common/_decorator.py +2 -0
- mindspore/common/_register_for_adapter.py +55 -0
- mindspore/common/_stub_tensor.py +201 -0
- mindspore/common/_utils.py +57 -0
- mindspore/common/api.py +582 -297
- mindspore/common/dtype.py +66 -18
- mindspore/common/dump.py +2 -2
- mindspore/common/initializer.py +38 -1
- mindspore/common/jit_config.py +25 -13
- mindspore/common/mutable.py +53 -24
- mindspore/common/parameter.py +60 -37
- mindspore/common/seed.py +8 -24
- mindspore/common/sparse_tensor.py +927 -0
- mindspore/common/tensor.py +1627 -3900
- mindspore/communication/__init__.py +10 -5
- mindspore/communication/_comm_helper.py +78 -214
- mindspore/communication/_hccl_management.py +2 -1
- mindspore/communication/management.py +136 -47
- mindspore/config/op_info.config +501 -1008
- mindspore/context.py +291 -56
- mindspore/d3dcompiler_47.dll +0 -0
- mindspore/dataset/__init__.py +12 -8
- mindspore/dataset/audio/__init__.py +9 -9
- mindspore/dataset/audio/transforms.py +1090 -228
- mindspore/dataset/audio/utils.py +87 -39
- mindspore/dataset/audio/validators.py +223 -1
- mindspore/dataset/callback/ds_callback.py +17 -15
- mindspore/dataset/core/config.py +246 -17
- mindspore/dataset/core/py_util_helpers.py +4 -3
- mindspore/dataset/core/validator_helpers.py +10 -10
- mindspore/{parallel/nn/layers.py → dataset/debug/__init__.py} +7 -8
- mindspore/dataset/debug/debug_hook.py +65 -0
- mindspore/dataset/debug/pre_defined_hook.py +67 -0
- mindspore/dataset/engine/__init__.py +7 -3
- mindspore/dataset/engine/cache_client.py +9 -9
- mindspore/dataset/engine/datasets.py +648 -477
- mindspore/dataset/engine/datasets_audio.py +165 -167
- mindspore/dataset/engine/datasets_standard_format.py +93 -67
- mindspore/dataset/engine/datasets_text.py +492 -342
- mindspore/dataset/engine/datasets_user_defined.py +85 -50
- mindspore/dataset/engine/datasets_vision.py +1224 -699
- mindspore/dataset/engine/graphdata.py +134 -69
- mindspore/dataset/engine/iterators.py +50 -9
- mindspore/dataset/engine/offload.py +52 -31
- mindspore/dataset/engine/samplers.py +27 -24
- mindspore/dataset/engine/serializer_deserializer.py +14 -15
- mindspore/dataset/engine/validators.py +213 -52
- mindspore/dataset/text/__init__.py +10 -8
- mindspore/dataset/text/transforms.py +152 -57
- mindspore/dataset/text/utils.py +98 -49
- mindspore/dataset/text/validators.py +25 -0
- mindspore/dataset/transforms/__init__.py +4 -2
- mindspore/dataset/transforms/c_transforms.py +11 -13
- mindspore/dataset/transforms/py_transforms.py +2 -2
- mindspore/dataset/transforms/py_transforms_util.py +10 -0
- mindspore/dataset/transforms/transforms.py +13 -15
- mindspore/dataset/transforms/validators.py +7 -7
- mindspore/dataset/utils/__init__.py +2 -1
- mindspore/dataset/utils/browse_dataset.py +13 -13
- mindspore/dataset/utils/line_reader.py +121 -0
- mindspore/dataset/vision/__init__.py +8 -7
- mindspore/dataset/vision/c_transforms.py +125 -126
- mindspore/dataset/vision/py_transforms.py +37 -37
- mindspore/dataset/vision/py_transforms_util.py +23 -20
- mindspore/dataset/vision/transforms.py +316 -315
- mindspore/dataset/vision/utils.py +313 -17
- mindspore/dataset/vision/validators.py +6 -6
- mindspore/default_config.py +0 -1
- mindspore/dpcmi.dll +0 -0
- mindspore/{compression → experimental}/__init__.py +6 -5
- mindspore/experimental/map_parameter.py +275 -0
- mindspore/include/OWNERS +0 -1
- mindspore/include/api/callback/callback.h +9 -13
- mindspore/include/api/callback/ckpt_saver.h +2 -2
- mindspore/include/api/callback/loss_monitor.h +2 -2
- mindspore/include/api/callback/lr_scheduler.h +5 -5
- mindspore/include/api/callback/time_monitor.h +2 -2
- mindspore/include/api/callback/train_accuracy.h +4 -6
- mindspore/include/api/cfg.h +19 -6
- mindspore/include/api/context.h +70 -9
- mindspore/include/api/delegate.h +8 -1
- mindspore/include/api/dual_abi_helper.h +8 -24
- mindspore/include/api/metrics/accuracy.h +2 -2
- mindspore/include/api/metrics/metrics.h +4 -3
- mindspore/include/api/model.h +9 -4
- mindspore/include/api/model_group.h +68 -0
- mindspore/include/api/model_parallel_runner.h +17 -17
- mindspore/include/api/net.h +12 -11
- mindspore/include/api/serialization.h +20 -4
- mindspore/include/api/status.h +7 -1
- mindspore/include/api/types.h +25 -21
- mindspore/include/api/visible.h +4 -0
- mindspore/include/c_api/model_c.h +5 -0
- mindspore/include/c_api/status_c.h +1 -1
- mindspore/include/dataset/config.h +1 -1
- mindspore/include/dataset/constants.h +14 -0
- mindspore/include/dataset/text.h +59 -0
- mindspore/include/dataset/vision.h +56 -117
- mindspore/include/dataset/vision_lite.h +102 -0
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +28 -28
- mindspore/mindrecord/common/exceptions.py +2 -4
- mindspore/mindrecord/filereader.py +19 -1
- mindspore/mindrecord/filewriter.py +250 -88
- mindspore/mindrecord/mindpage.py +13 -13
- mindspore/mindrecord/shardheader.py +15 -15
- mindspore/mindrecord/shardreader.py +9 -0
- mindspore/mindrecord/shardwriter.py +29 -29
- mindspore/mindrecord/tools/cifar100_to_mr.py +9 -9
- mindspore/mindrecord/tools/cifar10_to_mr.py +9 -9
- mindspore/mindrecord/tools/csv_to_mr.py +4 -4
- mindspore/mindrecord/tools/imagenet_to_mr.py +70 -65
- mindspore/mindrecord/tools/mnist_to_mr.py +41 -41
- mindspore/mindrecord/tools/tfrecord_to_mr.py +6 -6
- mindspore/{libmindspore_backend.dll → mindspore_backend.dll} +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/__init__.py +1 -5
- mindspore/nn/cell.py +297 -234
- mindspore/nn/dynamic_lr.py +1 -1
- mindspore/nn/grad/cell_grad.py +17 -42
- mindspore/nn/layer/__init__.py +7 -4
- mindspore/nn/layer/activation.py +131 -88
- mindspore/nn/layer/basic.py +313 -613
- mindspore/nn/layer/channel_shuffle.py +103 -0
- mindspore/nn/layer/combined.py +1 -1
- mindspore/nn/layer/container.py +52 -6
- mindspore/nn/layer/conv.py +112 -43
- mindspore/nn/layer/dense.py +10 -9
- mindspore/nn/layer/embedding.py +36 -34
- mindspore/nn/layer/image.py +123 -27
- mindspore/nn/layer/math.py +108 -107
- mindspore/nn/layer/normalization.py +212 -366
- mindspore/nn/layer/padding.py +370 -42
- mindspore/nn/layer/pooling.py +1443 -219
- mindspore/nn/layer/rnn_cells.py +11 -16
- mindspore/nn/layer/rnns.py +38 -39
- mindspore/nn/layer/thor_layer.py +24 -25
- mindspore/nn/layer/timedistributed.py +5 -5
- mindspore/nn/layer/transformer.py +701 -0
- mindspore/nn/learning_rate_schedule.py +8 -8
- mindspore/nn/loss/__init__.py +9 -6
- mindspore/nn/loss/loss.py +678 -142
- mindspore/nn/metrics.py +53 -0
- mindspore/nn/optim/_dist_optimizer_registry.py +2 -2
- mindspore/nn/optim/ada_grad.py +8 -8
- mindspore/nn/optim/adadelta.py +2 -3
- mindspore/nn/optim/adafactor.py +18 -14
- mindspore/nn/optim/adam.py +429 -87
- mindspore/nn/optim/adamax.py +5 -6
- mindspore/nn/optim/adasum.py +10 -8
- mindspore/nn/optim/asgd.py +7 -7
- mindspore/nn/optim/ftrl.py +81 -11
- mindspore/nn/optim/lamb.py +7 -8
- mindspore/nn/optim/lars.py +4 -4
- mindspore/nn/optim/lazyadam.py +82 -7
- mindspore/nn/optim/momentum.py +8 -7
- mindspore/nn/optim/optimizer.py +19 -10
- mindspore/nn/optim/proximal_ada_grad.py +6 -5
- mindspore/nn/optim/rmsprop.py +3 -3
- mindspore/nn/optim/rprop.py +20 -16
- mindspore/nn/optim/sgd.py +21 -15
- mindspore/nn/optim/thor.py +23 -21
- mindspore/nn/probability/__init__.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +7 -6
- mindspore/nn/probability/bijector/invert.py +4 -2
- mindspore/nn/probability/bijector/softplus.py +2 -2
- mindspore/nn/probability/bnn_layers/dense_variational.py +1 -1
- mindspore/nn/probability/bnn_layers/layer_distribution.py +2 -2
- mindspore/nn/probability/distribution/__init__.py +6 -0
- mindspore/nn/probability/distribution/_utils/custom_ops.py +3 -2
- mindspore/nn/probability/distribution/_utils/utils.py +11 -17
- mindspore/nn/probability/distribution/bernoulli.py +6 -6
- mindspore/nn/probability/distribution/beta.py +1 -1
- mindspore/nn/probability/distribution/categorical.py +9 -9
- mindspore/nn/probability/distribution/cauchy.py +8 -8
- mindspore/nn/probability/distribution/distribution.py +12 -6
- mindspore/nn/probability/distribution/exponential.py +5 -5
- mindspore/nn/probability/distribution/gamma.py +3 -3
- mindspore/nn/probability/distribution/geometric.py +6 -5
- mindspore/nn/probability/distribution/gumbel.py +5 -5
- mindspore/nn/probability/distribution/half_normal.py +133 -0
- mindspore/nn/probability/distribution/laplace.py +128 -0
- mindspore/nn/probability/distribution/log_normal.py +0 -1
- mindspore/nn/probability/distribution/logistic.py +4 -5
- mindspore/nn/probability/distribution/normal.py +11 -15
- mindspore/nn/probability/distribution/poisson.py +6 -2
- mindspore/nn/probability/distribution/student_t.py +150 -0
- mindspore/nn/probability/distribution/transformed_distribution.py +4 -4
- mindspore/nn/probability/distribution/uniform.py +5 -5
- mindspore/nn/reinforcement/_tensors_queue.py +3 -3
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +8 -1
- mindspore/nn/wrap/cell_wrapper.py +55 -27
- mindspore/nn/wrap/grad_reducer.py +20 -11
- mindspore/nn/wrap/loss_scale.py +47 -30
- mindspore/numpy/array_creations.py +33 -22
- mindspore/numpy/array_ops.py +46 -42
- mindspore/numpy/logic_ops.py +6 -27
- mindspore/numpy/math_ops.py +26 -19
- mindspore/numpy/utils.py +1 -8
- mindspore/numpy/utils_const.py +112 -62
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +6 -3
- mindspore/ops/_constants.py +0 -6
- mindspore/ops/_grad/__init__.py +2 -1
- mindspore/ops/_grad/grad_array_ops.py +209 -152
- mindspore/ops/_grad/grad_base.py +55 -17
- mindspore/ops/_grad/grad_clip_ops.py +11 -3
- mindspore/ops/_grad/grad_comm_ops.py +58 -47
- mindspore/ops/_grad/grad_implementations.py +21 -61
- mindspore/ops/_grad/grad_inner_ops.py +48 -6
- mindspore/ops/_grad/grad_math_ops.py +306 -161
- mindspore/ops/_grad/grad_nn_ops.py +192 -181
- mindspore/ops/_grad/grad_other_ops.py +1 -1
- mindspore/ops/_grad/grad_quant_ops.py +5 -5
- mindspore/ops/_grad/grad_sequence_ops.py +296 -0
- mindspore/ops/_grad/grad_sparse.py +15 -9
- mindspore/ops/_grad_experimental/__init__.py +1 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +441 -55
- mindspore/ops/_grad_experimental/grad_image_ops.py +25 -7
- mindspore/ops/_grad_experimental/grad_inner_ops.py +3 -44
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +16 -21
- mindspore/ops/_grad_experimental/grad_math_ops.py +979 -49
- mindspore/ops/_grad_experimental/grad_nn_ops.py +78 -8
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +112 -0
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +197 -13
- mindspore/ops/_op_impl/__init__.py +3 -3
- mindspore/ops/_op_impl/_custom_op/__init__.py +0 -1
- mindspore/ops/_op_impl/_custom_op/_basic.py +0 -1
- mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +4 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +5 -5
- mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +3 -3
- mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/correction_mul.py +3 -3
- mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +4 -8
- mindspore/ops/_op_impl/_custom_op/dsd_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +2 -2
- mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/img2col_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +2 -2
- mindspore/ops/_op_impl/_custom_op/matmul_dds_grad_impl.py +0 -1
- mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +0 -1
- mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +2 -2
- mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +2 -2
- mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +1 -1
- mindspore/ops/_op_impl/aicpu/__init__.py +238 -3
- mindspore/ops/_op_impl/aicpu/abs.py +36 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d.py +34 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d.py +39 -0
- mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d.py +42 -0
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d_grad.py +152 -0
- mindspore/ops/_op_impl/aicpu/add.py +43 -0
- mindspore/ops/_op_impl/aicpu/addcdiv.py +0 -32
- mindspore/ops/_op_impl/aicpu/addcmul.py +0 -84
- mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/arg_max.py +75 -0
- mindspore/ops/_op_impl/aicpu/arg_min.py +75 -0
- mindspore/ops/_op_impl/aicpu/argmin_with_value.py +43 -0
- mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -0
- mindspore/ops/_op_impl/aicpu/batch_norm_grad_grad.py +49 -0
- mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
- mindspore/ops/_op_impl/aicpu/bessel_i0.py +31 -0
- mindspore/ops/_op_impl/aicpu/bias_add.py +44 -0
- mindspore/ops/_op_impl/aicpu/bias_add_grad.py +43 -0
- mindspore/ops/_op_impl/aicpu/bincount.py +33 -0
- mindspore/{nn/probability/infer/variational/__init__.py → ops/_op_impl/aicpu/cauchy.py} +17 -10
- mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
- mindspore/ops/_op_impl/aicpu/cholesky.py +1 -1
- mindspore/ops/_op_impl/{cpu/bias_add.py → aicpu/choleskygrad.py} +9 -7
- mindspore/ops/_op_impl/aicpu/combined_non_max_suppression.py +42 -0
- mindspore/ops/_op_impl/aicpu/concat_offset.py +42 -0
- mindspore/ops/_op_impl/aicpu/concat_offset_v1.py +31 -0
- mindspore/ops/_op_impl/aicpu/conj.py +11 -0
- mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_image.py +38 -0
- mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +36 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
- mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +2 -2
- mindspore/ops/_op_impl/aicpu/dense_to_sparse_set_operation.py +48 -0
- mindspore/ops/_op_impl/aicpu/diag.py +36 -0
- mindspore/ops/_op_impl/aicpu/diag_part.py +36 -0
- mindspore/ops/_op_impl/aicpu/diagonal.py +35 -0
- mindspore/ops/_op_impl/{cpu/bias_add_grad.py → aicpu/digamma.py} +9 -7
- mindspore/ops/_op_impl/aicpu/eig.py +35 -0
- mindspore/ops/_op_impl/aicpu/fft_with_size.py +41 -0
- mindspore/ops/_op_impl/aicpu/flatten.py +1 -0
- mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
- mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
- mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +1 -1
- mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
- mindspore/ops/_op_impl/aicpu/glu.py +33 -0
- mindspore/ops/_op_impl/aicpu/glu_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/greater.py +41 -0
- mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
- mindspore/ops/_op_impl/{tbe/scatter_add_ds.py → aicpu/inplace_index_add.py} +17 -21
- mindspore/ops/_op_impl/aicpu/instance_norm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/instance_norm_v2_grad.py +44 -0
- mindspore/ops/_op_impl/aicpu/layer_norm_grad_grad.py +47 -0
- mindspore/ops/_op_impl/aicpu/less.py +41 -0
- mindspore/ops/_op_impl/aicpu/less_equal.py +41 -0
- mindspore/ops/_op_impl/aicpu/lgamma.py +32 -0
- mindspore/ops/_op_impl/aicpu/log_normal_reverse.py +33 -0
- mindspore/ops/_op_impl/aicpu/logit.py +33 -0
- mindspore/ops/_op_impl/aicpu/logit_grad.py +34 -0
- mindspore/ops/_op_impl/aicpu/masked_fill.py +42 -0
- mindspore/ops/_op_impl/aicpu/masked_scatter.py +39 -0
- mindspore/ops/_op_impl/aicpu/matmul.py +39 -0
- mindspore/ops/_op_impl/aicpu/matrix_logarithm.py +31 -0
- mindspore/ops/_op_impl/aicpu/matrix_power.py +32 -0
- mindspore/ops/_op_impl/aicpu/matrix_solve_ls.py +36 -0
- mindspore/ops/_op_impl/aicpu/matrix_triangular_solve.py +36 -0
- mindspore/ops/_op_impl/aicpu/mirror_pad.py +2 -0
- mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +0 -4
- mindspore/ops/_op_impl/aicpu/mul.py +3 -1
- mindspore/ops/_op_impl/aicpu/multinomial.py +14 -6
- mindspore/ops/_op_impl/aicpu/multinomial_with_replacement.py +35 -0
- mindspore/ops/_op_impl/aicpu/nan_to_num.py +34 -0
- mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
- mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
- mindspore/ops/_op_impl/aicpu/ones_like.py +0 -2
- mindspore/ops/_op_impl/aicpu/polar.py +32 -0
- mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
- mindspore/ops/_op_impl/aicpu/qr.py +36 -0
- mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
- mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
- mindspore/ops/_op_impl/aicpu/ragged_tensor_to_tensor.py +74 -0
- mindspore/ops/_op_impl/aicpu/random_shuffle.py +3 -0
- mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
- mindspore/ops/_op_impl/aicpu/range.py +36 -0
- mindspore/ops/_op_impl/aicpu/reciprocal.py +34 -0
- mindspore/ops/_op_impl/aicpu/reciprocal_grad.py +35 -0
- mindspore/ops/_op_impl/aicpu/reduce_sum.py +57 -0
- mindspore/ops/_op_impl/aicpu/resize_bicubic.py +2 -8
- mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +1 -1
- mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
- mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
- mindspore/ops/_op_impl/aicpu/scatter_elements.py +4 -0
- mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +2 -0
- mindspore/ops/_op_impl/aicpu/search_sorted.py +12 -6
- mindspore/ops/_op_impl/aicpu/self_adjoint_eig.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
- mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
- mindspore/ops/_op_impl/aicpu/slice_grad.py +76 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
- mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
- mindspore/ops/_op_impl/aicpu/sort.py +39 -0
- mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +0 -24
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
- mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows.py +63 -0
- mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows_grad.py +45 -0
- mindspore/ops/_op_impl/aicpu/sparse_matrix_mat_mul.py +56 -0
- mindspore/ops/_op_impl/{tbe/slice_ds.py → aicpu/sparse_segment_sum.py} +16 -24
- mindspore/ops/_op_impl/aicpu/sparse_segment_sum_with_num_segments.py +68 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice.py +63 -0
- mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +61 -0
- mindspore/ops/_op_impl/aicpu/squared_difference.py +2 -0
- mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +93 -0
- mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +66 -0
- mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
- mindspore/ops/_op_impl/{tbe/gather_v2.py → aicpu/tile.py} +24 -24
- mindspore/ops/_op_impl/aicpu/tridiagonal_solve.py +35 -0
- mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/triu_indices.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform.py +34 -0
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +1 -0
- mindspore/ops/_op_impl/aicpu/unique_consecutive.py +10 -2
- mindspore/ops/_op_impl/cpu/__init__.py +1 -2
- mindspore/ops/_op_impl/cpu/dynamic_shape.py +5 -1
- mindspore/ops/_op_impl/cpu/maximum_grad.py +2 -0
- mindspore/{compression/common/__init__.py → ops/_op_impl/cpu/pyexecute.py} +13 -8
- mindspore/ops/_op_impl/cpu/reduce_sum.py +8 -0
- mindspore/ops/_op_impl/cpu/sparse_slice.py +62 -0
- mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +60 -0
- mindspore/ops/_op_impl/cpu/tensor_shape.py +5 -1
- mindspore/ops/_op_impl/tbe/__init__.py +27 -608
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +42 -0
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +44 -0
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +1 -1
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -1
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +41 -0
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +1 -0
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +2 -0
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +4 -2
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +40 -0
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -1
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -1
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +6 -4
- mindspore/ops/_op_impl/tbe/cast.py +0 -2
- mindspore/ops/_op_impl/tbe/cast_ds.py +3 -3
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -2
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -2
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +1 -0
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +1 -0
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +1 -1
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_nd.py +1 -0
- mindspore/ops/_op_impl/tbe/greater.py +2 -0
- mindspore/ops/_op_impl/tbe/{index_add.py → inplace_index_add.py} +3 -6
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -1
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +35 -0
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -6
- mindspore/ops/_op_impl/tbe/{greater_ds.py → reduce_all_ds.py} +13 -16
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +39 -0
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +44 -0
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +44 -0
- mindspore/ops/_op_impl/tbe/scatter_add.py +2 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +2 -2
- mindspore/ops/_op_impl/tbe/slice.py +26 -15
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +1 -1
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +1 -0
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +15 -5
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +1 -1
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +2 -0
- mindspore/ops/_primitive_cache.py +3 -2
- mindspore/ops/_register_for_op.py +11 -0
- mindspore/ops/_utils/__init__.py +1 -1
- mindspore/ops/_utils/utils.py +20 -41
- mindspore/ops/_vmap/__init__.py +2 -2
- mindspore/ops/_vmap/vmap_array_ops.py +170 -78
- mindspore/ops/_vmap/vmap_base.py +24 -10
- mindspore/ops/_vmap/vmap_convolution_ops.py +7 -10
- mindspore/ops/_vmap/vmap_grad_math_ops.py +4 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -9
- mindspore/ops/_vmap/vmap_image_ops.py +52 -0
- mindspore/ops/_vmap/vmap_math_ops.py +77 -6
- mindspore/ops/_vmap/vmap_nn_ops.py +78 -29
- mindspore/ops/_vmap/vmap_other_ops.py +3 -1
- mindspore/ops/_vmap/vmap_random_ops.py +55 -3
- mindspore/ops/_vmap/vmap_sparse_ops.py +1 -0
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ApproximateEqual_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Argmax_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/Argmin_bprop.mindir +14 -13
- mindspore/ops/bprop_mindir/AssignSub_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/Assign_bprop.mindir +16 -16
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +150 -0
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchToSpaceND_bprop.mindir +28 -0
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +306 -0
- mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +12 -8
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Concat_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +240 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +247 -0
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +315 -0
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +278 -0
- mindspore/ops/bprop_mindir/DType_bprop.mindir +12 -12
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/Depend_bprop.mindir +12 -13
- mindspore/ops/bprop_mindir/DepthToSpace_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +138 -0
- mindspore/ops/bprop_mindir/DiagPart_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +22 -24
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +16 -14
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicShape_bprop.mindir +12 -12
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Equal_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +58 -0
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +54 -0
- mindspore/ops/bprop_mindir/FloorDiv_bprop.mindir +18 -15
- mindspore/ops/bprop_mindir/GatherD_bprop.mindir +26 -0
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +57 -0
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/GreaterEqual_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/Greater_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/IOU_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/IsFinite_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/IsInf_bprop.mindir +13 -10
- mindspore/ops/bprop_mindir/IsNan_bprop.mindir +14 -11
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +126 -0
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +15 -0
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +30 -0
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +43 -0
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LessEqual_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/Less_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/LinSpace_bprop.mindir +22 -19
- mindspore/ops/bprop_mindir/Load_bprop.mindir +12 -13
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/LogicalAnd_bprop.mindir +17 -18
- mindspore/ops/bprop_mindir/LogicalNot_bprop.mindir +14 -13
- mindspore/ops/bprop_mindir/MaskedSelect_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +74 -0
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +75 -0
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +65 -0
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Maximum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Minimum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +27 -0
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +35 -0
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NonZero_bprop.mindir +14 -0
- mindspore/ops/bprop_mindir/NotEqual_bprop.mindir +18 -19
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +25 -23
- mindspore/ops/bprop_mindir/OnesLike_bprop.mindir +13 -13
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +82 -0
- mindspore/ops/bprop_mindir/Range_bprop.mindir +21 -19
- mindspore/ops/bprop_mindir/Rank_bprop.mindir +11 -11
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReduceAll_bprop.mindir +18 -17
- mindspore/ops/bprop_mindir/ReduceAny_bprop.mindir +18 -17
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +19 -23
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +60 -0
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +89 -0
- mindspore/ops/bprop_mindir/ReverseSequence_bprop.mindir +52 -0
- mindspore/ops/bprop_mindir/ReverseV2_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/Round_bprop.mindir +14 -13
- mindspore/ops/bprop_mindir/ScatterMax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterMin_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterNdUpdate_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/ScatterNd_bprop.mindir +24 -0
- mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/ScatterUpdate_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/Select_bprop.mindir +30 -34
- mindspore/ops/bprop_mindir/Shape_bprop.mindir +12 -12
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +21 -0
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Sign_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/Slice_bprop.mindir +26 -0
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +36 -0
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +33 -0
- mindspore/ops/bprop_mindir/Sort_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SpaceToBatchND_bprop.mindir +28 -0
- mindspore/ops/bprop_mindir/SpaceToDepth_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Split_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +54 -0
- mindspore/ops/bprop_mindir/StridedSliceGrad_bprop.mindir +95 -0
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +98 -0
- mindspore/ops/bprop_mindir/Switch_bprop.mindir +28 -32
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +66 -0
- mindspore/ops/bprop_mindir/TensorScatterAdd_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/TensorScatterUpdate_bprop.mindir +29 -0
- mindspore/ops/bprop_mindir/TensorShape_bprop.mindir +14 -0
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TransShape_bprop.mindir +23 -0
- mindspore/ops/bprop_mindir/TruncateDiv_bprop.mindir +18 -15
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +11 -13
- mindspore/ops/bprop_mindir/Unique_bprop.mindir +16 -0
- mindspore/ops/bprop_mindir/Unstack_bprop.mindir +22 -0
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +32 -0
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +38 -0
- mindspore/ops/bprop_mindir/ZerosLike_bprop.mindir +13 -12
- mindspore/ops/bprop_mindir/__init__.py +1 -4
- mindspore/ops/bprop_mindir/generate_mindir.py +32 -20
- mindspore/ops/composite/__init__.py +12 -13
- mindspore/ops/composite/base.py +261 -254
- mindspore/ops/composite/env_ops.py +41 -0
- mindspore/ops/composite/math_ops.py +197 -156
- mindspore/ops/composite/multitype_ops/_compile_utils.py +428 -176
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +188 -87
- mindspore/ops/composite/multitype_ops/add_impl.py +23 -1
- mindspore/ops/composite/multitype_ops/div_impl.py +3 -3
- mindspore/ops/composite/multitype_ops/equal_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/getitem_impl.py +52 -5
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +31 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +15 -3
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +33 -2
- mindspore/ops/composite/multitype_ops/less_impl.py +33 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/mod_impl.py +1 -1
- mindspore/ops/composite/multitype_ops/mul_impl.py +21 -7
- mindspore/ops/composite/multitype_ops/not_in_impl.py +15 -3
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -4
- mindspore/ops/composite/multitype_ops/pow_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +62 -70
- mindspore/ops/composite/multitype_ops/sub_impl.py +3 -3
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +41 -4
- mindspore/ops/function/__init__.py +323 -8
- mindspore/ops/function/array_func.py +3511 -780
- mindspore/ops/function/clip_func.py +329 -0
- mindspore/ops/function/debug_func.py +6 -6
- mindspore/ops/function/grad/__init__.py +5 -1
- mindspore/ops/function/grad/grad_func.py +736 -65
- mindspore/ops/function/image_func.py +270 -0
- mindspore/ops/function/linalg_func.py +268 -8
- mindspore/ops/function/math_func.py +8032 -3164
- mindspore/ops/function/nn_func.py +5619 -1855
- mindspore/ops/function/other_func.py +115 -0
- mindspore/ops/function/parameter_func.py +11 -10
- mindspore/ops/function/random_func.py +939 -77
- mindspore/ops/function/sparse_func.py +249 -84
- mindspore/ops/function/sparse_unary_func.py +2303 -0
- mindspore/ops/function/spectral_func.py +146 -0
- mindspore/ops/function/vmap_func.py +114 -0
- mindspore/ops/functional.py +182 -254
- mindspore/ops/op_info_register.py +79 -34
- mindspore/ops/operations/__init__.py +210 -118
- mindspore/ops/operations/_csr_ops.py +7 -7
- mindspore/ops/operations/_embedding_cache_ops.py +25 -15
- mindspore/ops/operations/_grad_ops.py +447 -322
- mindspore/ops/operations/_inner_ops.py +547 -176
- mindspore/ops/operations/_map_tensor_ops.py +112 -0
- mindspore/ops/operations/_ms_kernel.py +29 -27
- mindspore/ops/operations/_ocr_ops.py +11 -11
- mindspore/ops/operations/_opaque_predicate_registry.py +41 -0
- mindspore/ops/operations/_quant_ops.py +186 -101
- mindspore/ops/operations/_rl_inner_ops.py +122 -61
- mindspore/ops/operations/_scalar_ops.py +466 -0
- mindspore/ops/operations/_sequence_ops.py +1047 -0
- mindspore/ops/operations/_tensor_array.py +10 -11
- mindspore/ops/operations/_thor_ops.py +4 -4
- mindspore/ops/operations/array_ops.py +1428 -1226
- mindspore/ops/operations/comm_ops.py +180 -117
- mindspore/ops/operations/control_ops.py +4 -2
- mindspore/ops/operations/custom_ops.py +185 -98
- mindspore/ops/operations/debug_ops.py +92 -54
- mindspore/ops/operations/image_ops.py +406 -211
- mindspore/ops/operations/inner_ops.py +42 -53
- mindspore/ops/operations/linalg_ops.py +32 -29
- mindspore/ops/operations/math_ops.py +2076 -897
- mindspore/ops/operations/nn_ops.py +1282 -1252
- mindspore/ops/operations/other_ops.py +124 -278
- mindspore/ops/operations/random_ops.py +345 -178
- mindspore/ops/operations/rl_ops.py +8 -9
- mindspore/ops/operations/sparse_ops.py +502 -157
- mindspore/ops/operations/spectral_ops.py +107 -0
- mindspore/ops/primitive.py +192 -15
- mindspore/ops/vm_impl_registry.py +23 -2
- mindspore/parallel/__init__.py +6 -1
- mindspore/parallel/_auto_parallel_context.py +199 -92
- mindspore/parallel/_cell_wrapper.py +4 -2
- mindspore/parallel/_cost_model_context.py +3 -0
- mindspore/parallel/_dp_allreduce_fusion.py +2 -1
- mindspore/parallel/_offload_context.py +185 -0
- mindspore/parallel/_parallel_serialization.py +167 -28
- mindspore/parallel/_ps_context.py +9 -5
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +9 -1
- mindspore/{nn/transformer → parallel/_transformer}/__init__.py +6 -6
- mindspore/{nn/transformer → parallel/_transformer}/layers.py +59 -37
- mindspore/{nn/transformer → parallel/_transformer}/loss.py +4 -7
- mindspore/{nn/transformer → parallel/_transformer}/moe.py +160 -35
- mindspore/{nn/transformer → parallel/_transformer}/op_parallel_config.py +3 -3
- mindspore/{nn/transformer → parallel/_transformer}/transformer.py +235 -196
- mindspore/parallel/_utils.py +47 -7
- mindspore/parallel/algo_parameter_config.py +5 -1
- mindspore/parallel/checkpoint_transform.py +329 -0
- mindspore/parallel/shard.py +229 -0
- mindspore/perf_msvcbuildinsights.dll +0 -0
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/common/util.py +4 -3
- mindspore/profiler/common/validator/validate_path.py +2 -2
- mindspore/profiler/envprofiling.py +249 -0
- mindspore/profiler/parser/aicpu_data_parser.py +38 -39
- mindspore/profiler/parser/ascend_timeline_generator.py +497 -0
- mindspore/profiler/parser/base_timeline_generator.py +471 -0
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +684 -0
- mindspore/profiler/parser/framework_parser.py +42 -16
- mindspore/profiler/parser/hccl_parser.py +158 -158
- mindspore/profiler/parser/hwts_log_parser.py +7 -6
- mindspore/profiler/parser/integrator.py +18 -1579
- mindspore/profiler/parser/minddata_analyzer.py +8 -8
- mindspore/profiler/parser/msadvisor_analyzer.py +14 -27
- mindspore/profiler/parser/msadvisor_parser.py +2 -4
- mindspore/profiler/parser/optime_parser.py +17 -18
- mindspore/profiler/parser/profiler_info.py +108 -0
- mindspore/profiler/parser/step_trace_parser.py +1 -1
- mindspore/profiler/profiling.py +396 -194
- mindspore/rewrite/__init__.py +6 -2
- mindspore/rewrite/api/node.py +51 -110
- mindspore/rewrite/api/node_type.py +10 -6
- mindspore/rewrite/api/pattern_engine.py +51 -7
- mindspore/rewrite/api/scoped_value.py +64 -53
- mindspore/rewrite/api/symbol_tree.py +108 -61
- mindspore/rewrite/api/tree_node_helper.py +2 -3
- mindspore/{compression/quant/__init__.py → rewrite/ast_creator_register.py} +20 -11
- mindspore/rewrite/ast_helpers/__init__.py +6 -3
- mindspore/rewrite/ast_helpers/ast_creator.py +115 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +99 -1
- mindspore/rewrite/ast_helpers/ast_modifier.py +17 -4
- mindspore/rewrite/ast_helpers/ast_replacer.py +1 -1
- mindspore/rewrite/ast_transformers/__init__.py +0 -1
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +46 -5
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +6 -3
- mindspore/rewrite/common/__init__.py +2 -0
- mindspore/rewrite/common/event.py +1 -1
- mindspore/rewrite/common/observable.py +1 -1
- mindspore/rewrite/common/observer.py +1 -1
- mindspore/rewrite/common/rewrite_elog.py +35 -0
- mindspore/rewrite/namer.py +2 -2
- mindspore/rewrite/namespace.py +14 -4
- mindspore/rewrite/node.py +161 -13
- mindspore/rewrite/parser.py +0 -1
- mindspore/rewrite/parser_register.py +0 -1
- mindspore/rewrite/parsers/arguments_parser.py +3 -2
- mindspore/rewrite/parsers/assign_parser.py +267 -67
- mindspore/rewrite/parsers/attribute_parser.py +56 -0
- mindspore/rewrite/parsers/class_def_parser.py +191 -108
- mindspore/rewrite/parsers/constant_parser.py +101 -0
- mindspore/rewrite/parsers/container_parser.py +88 -0
- mindspore/rewrite/parsers/for_parser.py +28 -15
- mindspore/rewrite/parsers/function_def_parser.py +21 -5
- mindspore/rewrite/parsers/if_parser.py +11 -28
- mindspore/rewrite/parsers/module_parser.py +9 -6
- mindspore/rewrite/parsers/return_parser.py +3 -2
- mindspore/rewrite/sparsify/__init__.py +0 -0
- mindspore/rewrite/sparsify/sparse_transformer.py +448 -0
- mindspore/rewrite/sparsify/sparsify.py +109 -0
- mindspore/rewrite/sparsify/utils.py +173 -0
- mindspore/rewrite/symbol_tree.py +322 -109
- mindspore/rewrite/symbol_tree_builder.py +45 -8
- mindspore/rewrite/symbol_tree_dumper.py +0 -1
- mindspore/rewrite/topological_manager.py +1 -2
- mindspore/run_check/_check_version.py +209 -112
- mindspore/run_check/run_check.py +2 -1
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +6 -4
- mindspore/train/_utils.py +28 -5
- mindspore/train/amp.py +321 -50
- mindspore/train/callback/__init__.py +3 -1
- mindspore/train/callback/_backup_and_restore.py +120 -0
- mindspore/train/callback/_callback.py +8 -8
- mindspore/train/callback/_checkpoint.py +12 -9
- mindspore/train/callback/_early_stop.py +13 -7
- mindspore/train/callback/_history.py +8 -8
- mindspore/train/callback/_lambda_callback.py +6 -6
- mindspore/train/callback/_landscape.py +36 -38
- mindspore/train/callback/_loss_monitor.py +12 -6
- mindspore/train/callback/_lr_scheduler_callback.py +2 -4
- mindspore/train/callback/_on_request_exit.py +212 -0
- mindspore/train/callback/_reduce_lr_on_plateau.py +13 -7
- mindspore/train/callback/_summary_collector.py +27 -19
- mindspore/train/callback/_time_monitor.py +13 -7
- mindspore/train/checkpoint_pb2.py +68 -8
- mindspore/train/data_sink.py +122 -33
- mindspore/train/dataset_helper.py +28 -87
- mindspore/train/loss_scale_manager.py +4 -7
- mindspore/{nn → train}/metrics/__init__.py +20 -20
- mindspore/{nn → train}/metrics/accuracy.py +12 -10
- mindspore/{nn → train}/metrics/auc.py +4 -4
- mindspore/{nn → train}/metrics/bleu_score.py +4 -4
- mindspore/{nn → train}/metrics/confusion_matrix.py +10 -8
- mindspore/{nn → train}/metrics/cosine_similarity.py +4 -4
- mindspore/{nn → train}/metrics/dice.py +6 -5
- mindspore/{nn → train}/metrics/error.py +7 -5
- mindspore/{nn → train}/metrics/fbeta.py +9 -7
- mindspore/{nn → train}/metrics/hausdorff_distance.py +8 -6
- mindspore/{nn → train}/metrics/loss.py +4 -3
- mindspore/{nn → train}/metrics/mean_surface_distance.py +6 -5
- mindspore/{nn → train}/metrics/metric.py +6 -5
- mindspore/{nn → train}/metrics/occlusion_sensitivity.py +4 -3
- mindspore/{nn → train}/metrics/perplexity.py +5 -4
- mindspore/{nn → train}/metrics/precision.py +5 -4
- mindspore/{nn → train}/metrics/recall.py +5 -4
- mindspore/{nn → train}/metrics/roc.py +7 -6
- mindspore/{nn → train}/metrics/root_mean_square_surface_distance.py +6 -5
- mindspore/{nn → train}/metrics/topk.py +7 -5
- mindspore/train/mind_ir_pb2.py +339 -32
- mindspore/train/model.py +113 -84
- mindspore/train/serialization.py +547 -167
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +43 -12
- mindspore/train/train_thor/convert_utils.py +7 -1
- mindspore/train/train_thor/dataset_helper.py +3 -3
- mindspore/train/train_thor/model_thor.py +0 -4
- mindspore/turbojpeg.dll +0 -0
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/METADATA +4 -3
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/RECORD +901 -660
- mindspore/compression/common/constant.py +0 -124
- mindspore/compression/export/__init__.py +0 -19
- mindspore/compression/export/quant_export.py +0 -514
- mindspore/compression/quant/qat.py +0 -636
- mindspore/compression/quant/quant_utils.py +0 -462
- mindspore/compression/quant/quantizer.py +0 -68
- mindspore/libatomic-1.dll +0 -0
- mindspore/libgcc_s_seh-1.dll +0 -0
- mindspore/libgfortran-4.dll +0 -0
- mindspore/libgomp-1.dll +0 -0
- mindspore/libjpeg-62.dll +0 -0
- mindspore/libmindspore.dll +0 -0
- mindspore/libmindspore_common.dll +0 -0
- mindspore/libmindspore_core.dll +0 -0
- mindspore/libmindspore_glog.dll +0 -0
- mindspore/libnnacl.dll +0 -0
- mindspore/libopencv_core452.dll +0 -0
- mindspore/libopencv_imgcodecs452.dll +0 -0
- mindspore/libopencv_imgproc452.dll +0 -0
- mindspore/libquadmath-0.dll +0 -0
- mindspore/libsqlite3.dll +0 -0
- mindspore/libssp-0.dll +0 -0
- mindspore/libstdc++-6.dll +0 -0
- mindspore/libtinyxml2.dll +0 -0
- mindspore/libturbojpeg.dll +0 -0
- mindspore/libwinpthread-1.dll +0 -0
- mindspore/nn/layer/quant.py +0 -1868
- mindspore/nn/layer/rnn_utils.py +0 -90
- mindspore/nn/probability/dpn/__init__.py +0 -22
- mindspore/nn/probability/dpn/vae/__init__.py +0 -25
- mindspore/nn/probability/dpn/vae/cvae.py +0 -138
- mindspore/nn/probability/dpn/vae/vae.py +0 -122
- mindspore/nn/probability/infer/__init__.py +0 -22
- mindspore/nn/probability/infer/variational/elbo.py +0 -70
- mindspore/nn/probability/infer/variational/svi.py +0 -84
- mindspore/nn/probability/toolbox/__init__.py +0 -22
- mindspore/nn/probability/toolbox/anomaly_detection.py +0 -99
- mindspore/nn/probability/toolbox/uncertainty_evaluation.py +0 -363
- mindspore/nn/probability/transforms/__init__.py +0 -22
- mindspore/nn/probability/transforms/transform_bnn.py +0 -262
- mindspore/nn/probability/zhusuan/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/framework/bn.py +0 -95
- mindspore/nn/probability/zhusuan/variational/__init__.py +0 -18
- mindspore/nn/probability/zhusuan/variational/elbo.py +0 -46
- mindspore/ops/_op_impl/tbe/bias_add_grad_ds.py +0 -52
- mindspore/ops/_op_impl/tbe/scatter_nd_add_ds.py +0 -43
- mindspore/ops/bprop_mindir/AssignAdd_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/Identity_bprop.mindir +0 -9
- mindspore/ops/bprop_mindir/LogicalOr_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/ReLU_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/UpdateState_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/stop_gradient_bprop.mindir +0 -12
- mindspore/ops/composite/array_ops.py +0 -210
- mindspore/ops/composite/clip_ops.py +0 -238
- mindspore/ops/composite/random_ops.py +0 -426
- mindspore/ops/composite/vmap_ops.py +0 -38
- mindspore/ops/operations/sponge_ops.py +0 -3531
- mindspore/ops/operations/sponge_update_ops.py +0 -2546
- mindspore/parallel/nn/__init__.py +0 -42
- mindspore/parallel/nn/loss.py +0 -22
- mindspore/parallel/nn/moe.py +0 -21
- mindspore/parallel/nn/op_parallel_config.py +0 -22
- mindspore/parallel/nn/transformer.py +0 -31
- mindspore/run_check/_check_deps_version.py +0 -84
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-1.10.0.dist-info → mindspore-2.0.0rc1.dist-info}/top_level.txt +0 -0
mindspore/nn/optim/adam.py
CHANGED
|
@@ -13,31 +13,246 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ============================================================================
|
|
15
15
|
"""adam"""
|
|
16
|
-
from __future__ import absolute_import
|
|
16
|
+
from __future__ import absolute_import, division
|
|
17
17
|
|
|
18
18
|
import numpy as np
|
|
19
19
|
|
|
20
20
|
from mindspore import context
|
|
21
21
|
from mindspore.common import dtype as mstype
|
|
22
22
|
from mindspore.common.initializer import initializer
|
|
23
|
-
from mindspore.common.api import
|
|
23
|
+
from mindspore.common.api import jit
|
|
24
24
|
from mindspore.ops import operations as P
|
|
25
25
|
from mindspore.ops import composite as C
|
|
26
26
|
from mindspore.ops import functional as F
|
|
27
27
|
from mindspore.common.parameter import Parameter
|
|
28
28
|
from mindspore.common.tensor import Tensor
|
|
29
|
-
from mindspore
|
|
30
|
-
from mindspore._checkparam import Rel
|
|
29
|
+
from mindspore import _checkparam as validator
|
|
31
30
|
from mindspore.nn.optim.optimizer import Optimizer
|
|
32
31
|
from mindspore.nn.optim.optimizer import opt_init_args_register
|
|
33
32
|
from mindspore.nn.optim._dist_optimizer_registry import _register_dist_optimizer
|
|
33
|
+
from mindspore.common._decorator import deprecated
|
|
34
34
|
|
|
35
35
|
_adam_opt = C.MultitypeFuncGraph("adam_opt")
|
|
36
36
|
_fused_adam_weight_decay = C.MultitypeFuncGraph("fused_adam_weight_decay")
|
|
37
|
+
_lazy_adam_opt = C.MultitypeFuncGraph("lazy_adam_opt")
|
|
37
38
|
_scaler_one = Tensor(1, mstype.int32)
|
|
38
39
|
_scaler_ten = Tensor(10, mstype.float32)
|
|
39
40
|
|
|
40
41
|
|
|
42
|
+
@_lazy_adam_opt.register("Function", "Function", "Function", "Function", "Bool", "Bool", "Bool", "Tensor", "Tensor",
|
|
43
|
+
"Tensor", "Tensor", "Tensor", "Tensor", "RowTensor", "Tensor", "Tensor", "Tensor", "Bool",
|
|
44
|
+
"Bool", "Function", "Bool", "Function", "Bool")
|
|
45
|
+
def _run_lazy_opt_with_sparse_dist(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_power,
|
|
46
|
+
beta2_power, beta1, beta2, eps, lr, gradient, params, m, v, ps_parameter,
|
|
47
|
+
cache_enable, distributed_opt, use_flag, distributed_sparse_opt, use_sparse_flag):
|
|
48
|
+
"""Apply sparse lazy adam optimizer to the weight parameter when the gradient is sparse."""
|
|
49
|
+
success = True
|
|
50
|
+
indices = gradient.indices
|
|
51
|
+
values = gradient.values
|
|
52
|
+
if use_sparse_flag:
|
|
53
|
+
success = F.depend(success, distributed_sparse_opt(params, m, v, beta1_power, beta2_power, lr, beta1, beta2,
|
|
54
|
+
eps, values, indices))
|
|
55
|
+
return success
|
|
56
|
+
if ps_parameter and not cache_enable:
|
|
57
|
+
op_shape = P.Shape()
|
|
58
|
+
shapes = (op_shape(params), op_shape(m), op_shape(v),
|
|
59
|
+
op_shape(beta1_power), op_shape(beta2_power), op_shape(lr), op_shape(beta1),
|
|
60
|
+
op_shape(beta2), op_shape(eps), op_shape(values), op_shape(indices))
|
|
61
|
+
success = F.depend(success, pull(push((beta1_power, beta2_power, lr, beta1, beta2,
|
|
62
|
+
eps, values, indices), shapes), params))
|
|
63
|
+
return success
|
|
64
|
+
|
|
65
|
+
if not target:
|
|
66
|
+
success = F.depend(success, sparse_opt(params, m, v, beta1_power, beta2_power, lr, beta1, beta2,
|
|
67
|
+
eps, values, indices))
|
|
68
|
+
else:
|
|
69
|
+
op_gather = P.Gather()
|
|
70
|
+
op_sqrt = P.Sqrt()
|
|
71
|
+
scatter_add = P.ScatterAdd(use_locking)
|
|
72
|
+
scatter_update = P.ScatterUpdate(use_locking)
|
|
73
|
+
|
|
74
|
+
m_slice = op_gather(m, indices, 0)
|
|
75
|
+
v_slice = op_gather(v, indices, 0)
|
|
76
|
+
|
|
77
|
+
next_m = m_slice * beta1 + values * (1 - beta1)
|
|
78
|
+
next_v = v_slice * beta2 + values * values * (1 - beta2)
|
|
79
|
+
|
|
80
|
+
lr_t = lr * op_sqrt(1 - beta2_power) / (1 - beta1_power)
|
|
81
|
+
|
|
82
|
+
if use_nesterov:
|
|
83
|
+
m_temp = beta1 * next_m + values * (1 - beta1)
|
|
84
|
+
param_update = m_temp / (op_sqrt(next_v) + eps)
|
|
85
|
+
else:
|
|
86
|
+
param_update = next_m / (op_sqrt(next_v) + eps)
|
|
87
|
+
|
|
88
|
+
success = F.depend(success, scatter_add(params, indices, - lr_t * param_update))
|
|
89
|
+
success = F.depend(success, scatter_update(m, indices, next_m))
|
|
90
|
+
success = F.depend(success, scatter_update(v, indices, next_v))
|
|
91
|
+
|
|
92
|
+
return success
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
@_lazy_adam_opt.register("Function", "Function", "Function", "Function", "Bool", "Bool", "Bool", "Tensor", "Tensor",
|
|
96
|
+
"Tensor", "Tensor", "Tensor", "Tensor", "MapTensor", "MapTensor", "MapTensor", "MapTensor",
|
|
97
|
+
"Bool", "Bool", "Function", "Bool", "Function", "Bool")
|
|
98
|
+
def _run_map_tensor_lazy_opt_with_sparse_dist(opt, sparse_opt, push, pull, use_locking, use_nesterov, target,
|
|
99
|
+
beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, params, m, v,
|
|
100
|
+
ps_parameter, cache_enable, distributed_opt, use_flag,
|
|
101
|
+
distributed_sparse_opt, use_sparse_flag):
|
|
102
|
+
"""Apply sparse lazy adam optimizer to the weight parameter when the gradient is sparse."""
|
|
103
|
+
success = True
|
|
104
|
+
indices, values = gradient.get_data()
|
|
105
|
+
if use_sparse_flag:
|
|
106
|
+
# PS Mode.
|
|
107
|
+
success = F.depend(success, distributed_sparse_opt(params, m, v, beta1_power, beta2_power, lr, beta1, beta2,
|
|
108
|
+
eps, values, indices))
|
|
109
|
+
else:
|
|
110
|
+
# PS Cache mode.
|
|
111
|
+
op_sqrt = P.Sqrt()
|
|
112
|
+
|
|
113
|
+
m_slice = m.get(indices)
|
|
114
|
+
v_slice = v.get(indices)
|
|
115
|
+
|
|
116
|
+
next_m = m_slice * beta1 + values * (1 - beta1)
|
|
117
|
+
next_v = v_slice * beta2 + values * values * (1 - beta2)
|
|
118
|
+
|
|
119
|
+
lr_t = lr * op_sqrt(1 - beta2_power) / (1 - beta1_power)
|
|
120
|
+
|
|
121
|
+
if use_nesterov:
|
|
122
|
+
m_temp = beta1 * next_m + values * (1 - beta1)
|
|
123
|
+
param_update = m_temp / (op_sqrt(next_v) + eps)
|
|
124
|
+
else:
|
|
125
|
+
param_update = next_m / (op_sqrt(next_v) + eps)
|
|
126
|
+
|
|
127
|
+
params_need_update = params.get(indices)
|
|
128
|
+
params.put(indices, params_need_update - lr_t * param_update)
|
|
129
|
+
m.put(indices, next_m)
|
|
130
|
+
v.put(indices, next_v)
|
|
131
|
+
|
|
132
|
+
return success
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
@_lazy_adam_opt.register("Function", "Function", "Function", "Function", "Bool", "Bool", "Bool", "Tensor", "Tensor",
|
|
136
|
+
"Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Bool", "Bool",
|
|
137
|
+
"Function", "Bool", "Function", "Bool")
|
|
138
|
+
def _run_lazy_opt_with_one_number_dist(opt, sparse_opt, push, pull, use_locking, use_nesterov, target,
|
|
139
|
+
beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, params, moment1,
|
|
140
|
+
moment2, ps_parameter, cache_enable, distributed_opt, use_flag,
|
|
141
|
+
distributed_sparse_opt, use_sparse_flag):
|
|
142
|
+
"""Apply lazy adam optimizer to the weight parameter using Tensor."""
|
|
143
|
+
success = True
|
|
144
|
+
if use_flag:
|
|
145
|
+
success = F.depend(success, distributed_opt(params, moment1, moment2, beta1_power, beta2_power, lr, beta1,
|
|
146
|
+
beta2, eps, gradient))
|
|
147
|
+
elif ps_parameter and not cache_enable:
|
|
148
|
+
op_shape = P.Shape()
|
|
149
|
+
success = F.depend(success, pull(push((beta1_power, beta2_power, lr, beta1, beta2, eps, gradient),
|
|
150
|
+
(op_shape(params), op_shape(moment1), op_shape(moment2))), params))
|
|
151
|
+
else:
|
|
152
|
+
success = F.depend(success, opt(params, moment1, moment2, beta1_power, beta2_power, lr, beta1, beta2,
|
|
153
|
+
eps, gradient))
|
|
154
|
+
return success
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
@_lazy_adam_opt.register("Function", "Function", "Function", "Function", "Bool", "Bool", "Bool", "Tensor", "Tensor",
|
|
158
|
+
"Tensor", "Tensor", "Tensor", "Tensor", "RowTensor", "Tensor", "Tensor", "Tensor", "Bool",
|
|
159
|
+
"Bool")
|
|
160
|
+
def _run_lazy_opt_with_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_power, beta2_power,
|
|
161
|
+
beta1, beta2, eps, lr, gradient, params, m, v, ps_parameter, cache_enable):
|
|
162
|
+
"""Apply sparse lazy adam optimizer to the weight parameter when the gradient is sparse."""
|
|
163
|
+
success = True
|
|
164
|
+
indices = gradient.indices
|
|
165
|
+
values = gradient.values
|
|
166
|
+
if ps_parameter and not cache_enable:
|
|
167
|
+
op_shape = P.Shape()
|
|
168
|
+
shapes = (op_shape(params), op_shape(m), op_shape(v),
|
|
169
|
+
op_shape(beta1_power), op_shape(beta2_power), op_shape(lr), op_shape(beta1),
|
|
170
|
+
op_shape(beta2), op_shape(eps), op_shape(values), op_shape(indices))
|
|
171
|
+
success = F.depend(success, pull(push((beta1_power, beta2_power, lr, beta1, beta2,
|
|
172
|
+
eps, values, indices), shapes), params))
|
|
173
|
+
return success
|
|
174
|
+
|
|
175
|
+
if not target:
|
|
176
|
+
success = F.depend(success, sparse_opt(params, m, v, beta1_power, beta2_power, lr, beta1, beta2,
|
|
177
|
+
eps, values, indices))
|
|
178
|
+
else:
|
|
179
|
+
op_gather = P.Gather()
|
|
180
|
+
op_sqrt = P.Sqrt()
|
|
181
|
+
scatter_add = P.ScatterAdd(use_locking)
|
|
182
|
+
scatter_update = P.ScatterUpdate(use_locking)
|
|
183
|
+
|
|
184
|
+
m_slice = op_gather(m, indices, 0)
|
|
185
|
+
v_slice = op_gather(v, indices, 0)
|
|
186
|
+
|
|
187
|
+
next_m = m_slice * beta1 + values * (1 - beta1)
|
|
188
|
+
next_v = v_slice * beta2 + values * values * (1 - beta2)
|
|
189
|
+
|
|
190
|
+
lr_t = lr * op_sqrt(1 - beta2_power) / (1 - beta1_power)
|
|
191
|
+
|
|
192
|
+
if use_nesterov:
|
|
193
|
+
m_temp = beta1 * next_m + values * (1 - beta1)
|
|
194
|
+
param_update = m_temp / (op_sqrt(next_v) + eps)
|
|
195
|
+
else:
|
|
196
|
+
param_update = next_m / (op_sqrt(next_v) + eps)
|
|
197
|
+
|
|
198
|
+
success = F.depend(success, scatter_add(params, indices, - lr_t * param_update))
|
|
199
|
+
success = F.depend(success, scatter_update(m, indices, next_m))
|
|
200
|
+
success = F.depend(success, scatter_update(v, indices, next_v))
|
|
201
|
+
|
|
202
|
+
return success
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
@_lazy_adam_opt.register("Function", "Function", "Function", "Function", "Bool", "Bool", "Bool", "Tensor", "Tensor",
|
|
206
|
+
"Tensor", "Tensor", "Tensor", "Tensor", "MapTensor", "MapTensor", "MapTensor", "MapTensor",
|
|
207
|
+
"Bool", "Bool")
|
|
208
|
+
def _run_map_tensor_lazy_opt_with_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_power,
|
|
209
|
+
beta2_power, beta1, beta2, eps, lr, gradient, params, m, v, ps_parameter,
|
|
210
|
+
cache_enable):
|
|
211
|
+
"""Apply sparse lazy adam optimizer to the weight parameter when the gradient is sparse(MapTensor)."""
|
|
212
|
+
success = True
|
|
213
|
+
indices, values = gradient.get_data()
|
|
214
|
+
|
|
215
|
+
op_sqrt = P.Sqrt()
|
|
216
|
+
|
|
217
|
+
m_slice = m.get(indices)
|
|
218
|
+
v_slice = v.get(indices)
|
|
219
|
+
|
|
220
|
+
next_m = m_slice * beta1 + values * (1 - beta1)
|
|
221
|
+
next_v = v_slice * beta2 + values * values * (1 - beta2)
|
|
222
|
+
|
|
223
|
+
lr_t = lr * op_sqrt(1 - beta2_power) / (1 - beta1_power)
|
|
224
|
+
|
|
225
|
+
if use_nesterov:
|
|
226
|
+
m_temp = beta1 * next_m + values * (1 - beta1)
|
|
227
|
+
param_update = m_temp / (op_sqrt(next_v) + eps)
|
|
228
|
+
else:
|
|
229
|
+
param_update = next_m / (op_sqrt(next_v) + eps)
|
|
230
|
+
|
|
231
|
+
params_need_update = params.get(indices)
|
|
232
|
+
params.put(indices, params_need_update - lr_t * param_update)
|
|
233
|
+
m.put(indices, next_m)
|
|
234
|
+
v.put(indices, next_v)
|
|
235
|
+
|
|
236
|
+
return success
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
@_lazy_adam_opt.register("Function", "Function", "Function", "Function", "Bool", "Bool", "Bool", "Tensor", "Tensor",
|
|
240
|
+
"Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Bool", "Bool")
|
|
241
|
+
def _run_lazy_opt_with_one_number(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_power,
|
|
242
|
+
beta2_power, beta1, beta2, eps, lr, gradient, params, moment1, moment2, ps_parameter,
|
|
243
|
+
cache_enable):
|
|
244
|
+
"""Apply lazy adam optimizer to the weight parameter using Tensor."""
|
|
245
|
+
success = True
|
|
246
|
+
if ps_parameter and not cache_enable:
|
|
247
|
+
op_shape = P.Shape()
|
|
248
|
+
success = F.depend(success, pull(push((beta1_power, beta2_power, lr, beta1, beta2, eps, gradient),
|
|
249
|
+
(op_shape(params), op_shape(moment1), op_shape(moment2))), params))
|
|
250
|
+
else:
|
|
251
|
+
success = F.depend(success, opt(params, moment1, moment2, beta1_power, beta2_power, lr, beta1, beta2,
|
|
252
|
+
eps, gradient))
|
|
253
|
+
return success
|
|
254
|
+
|
|
255
|
+
|
|
41
256
|
@_adam_opt.register("Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor",
|
|
42
257
|
"Tensor", "Bool", "Bool")
|
|
43
258
|
def _update_run_op(beta1, beta2, eps, lr, weight_decay, param, m, v, gradient, decay_flag, optim_filter):
|
|
@@ -184,18 +399,14 @@ def _run_opt_with_one_number_dist(opt, sparse_opt, push, pull, use_locking, use_
|
|
|
184
399
|
|
|
185
400
|
|
|
186
401
|
@_adam_opt.register("Function", "Function", "Function", "Function",
|
|
187
|
-
"Bool", "Bool", "Bool",
|
|
402
|
+
"Bool", "Bool", "Bool",
|
|
188
403
|
"Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor",
|
|
189
|
-
"RowTensor", "Tensor", "Tensor", "Tensor", "
|
|
404
|
+
"RowTensor", "Tensor", "Tensor", "Tensor", "Bool", "Bool")
|
|
190
405
|
def _run_opt_with_sparse(opt, sparse_opt, push, pull,
|
|
191
|
-
use_locking, use_nesterov,
|
|
406
|
+
use_locking, use_nesterov, target,
|
|
192
407
|
beta1_power, beta2_power, beta1, beta2, eps, lr,
|
|
193
|
-
gradient, param, m, v,
|
|
408
|
+
gradient, param, m, v, ps_parameter, cache_enable):
|
|
194
409
|
"""Apply sparse adam optimizer to the weight parameter when the gradient is sparse."""
|
|
195
|
-
if use_amsgrad:
|
|
196
|
-
raise Exception("""Adam with amsgrad is currently not supported when the gradients are sparse!
|
|
197
|
-
Please set use_amsgrad=False for sparse gradients.""")
|
|
198
|
-
|
|
199
410
|
success = True
|
|
200
411
|
indices = gradient.indices
|
|
201
412
|
values = gradient.values
|
|
@@ -253,30 +464,42 @@ def _run_opt_with_sparse(opt, sparse_opt, push, pull,
|
|
|
253
464
|
|
|
254
465
|
|
|
255
466
|
@_adam_opt.register("Function", "Function", "Function", "Function",
|
|
256
|
-
"Bool", "Bool", "Bool",
|
|
467
|
+
"Bool", "Bool", "Bool",
|
|
257
468
|
"Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor",
|
|
258
|
-
"Tensor", "Tensor", "Tensor", "Tensor", "
|
|
469
|
+
"Tensor", "Tensor", "Tensor", "Tensor", "Bool", "Bool")
|
|
259
470
|
def _run_opt_with_one_number(opt, sparse_opt, push, pull,
|
|
260
|
-
use_locking, use_nesterov,
|
|
471
|
+
use_locking, use_nesterov, target,
|
|
261
472
|
beta1_power, beta2_power, beta1, beta2, eps, lr,
|
|
262
|
-
gradient, param, moment1, moment2,
|
|
473
|
+
gradient, param, moment1, moment2, ps_parameter, cache_enable):
|
|
263
474
|
"""Apply adam optimizer to the weight parameter using Tensor."""
|
|
264
475
|
success = True
|
|
265
476
|
if ps_parameter and not cache_enable:
|
|
266
477
|
op_shape = P.Shape()
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
(op_shape(param), op_shape(moment1), op_shape(moment2),
|
|
270
|
-
op_shape(vhat))), param))
|
|
271
|
-
else:
|
|
272
|
-
success = F.depend(success, pull(push((beta1_power, beta2_power, lr, beta1, beta2, eps, gradient),
|
|
273
|
-
(op_shape(param), op_shape(moment1), op_shape(moment2))), param))
|
|
478
|
+
success = F.depend(success, pull(push((beta1_power, beta2_power, lr, beta1, beta2, eps, gradient),
|
|
479
|
+
(op_shape(param), op_shape(moment1), op_shape(moment2))), param))
|
|
274
480
|
else:
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
481
|
+
success = F.depend(success, opt(param, moment1, moment2, beta1_power, beta2_power, lr, beta1, beta2,
|
|
482
|
+
eps, gradient))
|
|
483
|
+
return success
|
|
484
|
+
|
|
485
|
+
|
|
486
|
+
@_adam_opt.register("Function", "Function", "Function", "Function",
|
|
487
|
+
"Bool", "Bool", "Bool",
|
|
488
|
+
"Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor",
|
|
489
|
+
"Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Bool", "Bool")
|
|
490
|
+
def _run_opt_with_one_number_use_amsgrad(opt, sparse_opt, push, pull,
|
|
491
|
+
use_locking, use_nesterov, target,
|
|
492
|
+
beta1_power, beta2_power, beta1, beta2, eps, lr,
|
|
493
|
+
gradient, param, moment1, moment2, vhat, ps_parameter, cache_enable):
|
|
494
|
+
"""Apply adam optimizer to the weight parameter using Tensor and use amsgrad."""
|
|
495
|
+
success = True
|
|
496
|
+
if ps_parameter and not cache_enable:
|
|
497
|
+
op_shape = P.Shape()
|
|
498
|
+
success = F.depend(success, pull(push((beta1_power, beta2_power, lr, gradient),
|
|
499
|
+
(op_shape(param), op_shape(moment1), op_shape(moment2),
|
|
500
|
+
op_shape(vhat))), param))
|
|
501
|
+
else:
|
|
502
|
+
success = F.depend(success, opt(param, moment1, moment2, vhat, beta1_power, beta2_power, lr, gradient))
|
|
280
503
|
return success
|
|
281
504
|
|
|
282
505
|
|
|
@@ -308,8 +531,8 @@ def _check_param_value(beta1, beta2, eps, prim_name):
|
|
|
308
531
|
validator.check_value_type("beta1", beta1, [float], prim_name)
|
|
309
532
|
validator.check_value_type("beta2", beta2, [float], prim_name)
|
|
310
533
|
validator.check_value_type("eps", eps, [float], prim_name)
|
|
311
|
-
validator.check_float_range(beta1, 0.0, 1.0,
|
|
312
|
-
validator.check_float_range(beta2, 0.0, 1.0,
|
|
534
|
+
validator.check_float_range(beta1, 0.0, 1.0, validator.INC_NEITHER, "beta1", prim_name)
|
|
535
|
+
validator.check_float_range(beta2, 0.0, 1.0, validator.INC_NEITHER, "beta2", prim_name)
|
|
313
536
|
validator.check_positive_float(eps, "eps", prim_name)
|
|
314
537
|
|
|
315
538
|
|
|
@@ -374,6 +597,16 @@ class Adam(Optimizer):
|
|
|
374
597
|
parameters are grouped, each group can set `weight_decay`. If not, the `weight_decay` in optimizer will be
|
|
375
598
|
applied.
|
|
376
599
|
|
|
600
|
+
When using Adam with use_lazy=True:
|
|
601
|
+
|
|
602
|
+
Please note, the optimizer only updates the current index position of the network parameters
|
|
603
|
+
when the gradient is sparse. The sparse behavior is not equivalent to the original Adam algorithm.
|
|
604
|
+
If you want to execute a sparse policy, target needs to be set to CPU.
|
|
605
|
+
|
|
606
|
+
When using Adam with use_offload=True:
|
|
607
|
+
|
|
608
|
+
This optimizer only supports `GRAPH_MODE`.
|
|
609
|
+
|
|
377
610
|
Args:
|
|
378
611
|
params (Union[list[Parameter], list[dict]]): Must be list of `Parameter` or list of `dict`. When the
|
|
379
612
|
`params` is a list of `dict`, the string "params", "lr", "weight_decay", "grad_centralization" and
|
|
@@ -446,6 +679,17 @@ class Adam(Optimizer):
|
|
|
446
679
|
`FixedLossScaleManager`. Refer to class :class:`mindspore.amp.FixedLossScaleManager` for more details.
|
|
447
680
|
Default: 1.0.
|
|
448
681
|
|
|
682
|
+
kwargs:
|
|
683
|
+
|
|
684
|
+
- use_lazy (bool): Whether to use Lazy Adam algorithm. Default: False.
|
|
685
|
+
If true, apply lazy adam algorithm.
|
|
686
|
+
If false, apply normal adam algorithm.
|
|
687
|
+
|
|
688
|
+
- use_offload (bool): Whether to offload adam optimizer to host CPU and keep parameters being updated on
|
|
689
|
+
the device in order to minimize the memory cost. Default: False.
|
|
690
|
+
If true, apply offload adam.
|
|
691
|
+
If false, apply normal adam.
|
|
692
|
+
|
|
449
693
|
Inputs:
|
|
450
694
|
- **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`.
|
|
451
695
|
|
|
@@ -457,10 +701,13 @@ class Adam(Optimizer):
|
|
|
457
701
|
TypeError: If element of `parameters` is neither Parameter nor dict.
|
|
458
702
|
TypeError: If `beta1`, `beta2`, `eps` or `loss_scale` is not a float.
|
|
459
703
|
TypeError: If `weight_decay` is neither float nor int.
|
|
460
|
-
TypeError: If `use_locking`, `use_nesterov` or `
|
|
704
|
+
TypeError: If `use_locking`, `use_nesterov`, `use_amsgrad`, `use_lazy` or `use_offload` is not a bool.
|
|
461
705
|
ValueError: If `loss_scale` or `eps` is less than or equal to 0.
|
|
462
706
|
ValueError: If `beta1`, `beta2` is not in range (0.0, 1.0).
|
|
463
707
|
ValueError: If `weight_decay` is less than 0.
|
|
708
|
+
ValueError: If `use_lazy` and `use_offload` are both true.
|
|
709
|
+
ValueError: If `use_amsgrad` is true and (`use_lazy` or `use_offload` is true).
|
|
710
|
+
ValueError: If `use_amsgrad` while using distributed training.
|
|
464
711
|
|
|
465
712
|
Supported Platforms:
|
|
466
713
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -479,7 +726,7 @@ class Adam(Optimizer):
|
|
|
479
726
|
>>> group_params = [{'params': conv_params, 'weight_decay': 0.01, 'grad_centralization':True},
|
|
480
727
|
... {'params': no_conv_params, 'lr': 0.01},
|
|
481
728
|
... {'order_params': net.trainable_params()}]
|
|
482
|
-
>>> optim = nn.Adam(group_params, learning_rate=0.1, weight_decay=0.0)
|
|
729
|
+
>>> optim = nn.Adam(group_params, learning_rate=0.1, weight_decay=0.0, use_lazy=False, use_offload=False)
|
|
483
730
|
>>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01 and grad
|
|
484
731
|
>>> # centralization of True.
|
|
485
732
|
>>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0 and grad
|
|
@@ -492,50 +739,174 @@ class Adam(Optimizer):
|
|
|
492
739
|
|
|
493
740
|
@opt_init_args_register
|
|
494
741
|
def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False,
|
|
495
|
-
use_nesterov=False, weight_decay=0.0, loss_scale=1.0, use_amsgrad=False):
|
|
742
|
+
use_nesterov=False, weight_decay=0.0, loss_scale=1.0, use_amsgrad=False, **kwargs):
|
|
496
743
|
super(Adam, self).__init__(learning_rate, params, weight_decay, loss_scale)
|
|
744
|
+
use_lazy = kwargs.get('use_lazy', False)
|
|
745
|
+
use_offload = kwargs.get('use_offload', False)
|
|
497
746
|
_check_param_value(beta1, beta2, eps, self.cls_name)
|
|
498
747
|
validator.check_value_type("use_locking", use_locking, [bool], self.cls_name)
|
|
499
748
|
validator.check_value_type("use_nesterov", use_nesterov, [bool], self.cls_name)
|
|
500
749
|
validator.check_value_type("use_amsgrad", use_amsgrad, [bool], self.cls_name)
|
|
750
|
+
validator.check_value_type("use_lazy", use_lazy, [bool], self.cls_name)
|
|
751
|
+
validator.check_value_type("use_offload", use_offload, [bool], self.cls_name)
|
|
752
|
+
|
|
753
|
+
if use_lazy and use_offload:
|
|
754
|
+
raise ValueError(f"For 'Adam', 'use_lazy' and 'use_offload' can not both be True."
|
|
755
|
+
f"But got use_lazy={use_lazy}, use_offload={use_offload}.")
|
|
756
|
+
|
|
757
|
+
if use_amsgrad and (use_lazy or use_offload):
|
|
758
|
+
raise ValueError(f"For lazy Adam and Adam with offload, there is no parameter named 'use_amsgrad'."
|
|
759
|
+
f"but got 'use_amsgrad'={use_amsgrad}.")
|
|
501
760
|
|
|
502
761
|
self.beta1 = Tensor(beta1, mstype.float32)
|
|
503
762
|
self.beta2 = Tensor(beta2, mstype.float32)
|
|
504
|
-
self.beta1_power = Parameter(initializer(1,
|
|
505
|
-
self.beta2_power = Parameter(initializer(1,
|
|
763
|
+
self.beta1_power = Parameter(initializer(1, (), mstype.float32), name="beta1_power")
|
|
764
|
+
self.beta2_power = Parameter(initializer(1, (), mstype.float32), name="beta2_power")
|
|
506
765
|
self.eps = Tensor(eps, mstype.float32)
|
|
507
766
|
self.use_nesterov = use_nesterov
|
|
508
767
|
self.use_locking = use_locking
|
|
509
768
|
self.use_amsgrad = use_amsgrad
|
|
769
|
+
self.use_lazy = use_lazy
|
|
770
|
+
self.use_offload = use_offload
|
|
510
771
|
self.moment1 = self._parameters.clone(prefix="moment1", init='zeros')
|
|
511
772
|
self.moment2 = self._parameters.clone(prefix="moment2", init='zeros')
|
|
512
|
-
self.vhat = self._parameters.clone(prefix="vhat", init='zeros')
|
|
513
|
-
|
|
514
|
-
self._is_device = True
|
|
515
773
|
if use_amsgrad:
|
|
516
|
-
self.
|
|
517
|
-
|
|
774
|
+
self.vhat = self._parameters.clone(prefix="vhat", init='zeros')
|
|
775
|
+
|
|
776
|
+
if use_offload:
|
|
777
|
+
self.opt = P.AdamNoUpdateParam(use_locking, use_nesterov)
|
|
778
|
+
self.opt.set_device("CPU")
|
|
779
|
+
|
|
780
|
+
elif use_lazy:
|
|
781
|
+
self._is_device = True
|
|
518
782
|
self.opt = P.Adam(use_locking, use_nesterov)
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
if use_amsgrad:
|
|
523
|
-
self._ps_push = P.Push("ApplyAdamWithAmsgrad", [0, 1, 2, 3])
|
|
524
|
-
else:
|
|
783
|
+
self.sparse_opt = P.FusedSparseLazyAdam(use_locking, use_nesterov)
|
|
784
|
+
self.sparse_opt.set_device("CPU")
|
|
785
|
+
self._ps_pull = P.Pull()
|
|
525
786
|
self._ps_push = P.Push("Adam", [0, 1, 2])
|
|
526
787
|
self._ps_push.add_prim_attr("use_nesterov", use_nesterov)
|
|
788
|
+
self._init_distributed_opts(use_locking, use_nesterov)
|
|
527
789
|
|
|
528
|
-
|
|
790
|
+
else:
|
|
791
|
+
self._is_device = True
|
|
792
|
+
if use_amsgrad:
|
|
793
|
+
self.opt = P.ApplyAdamWithAmsgrad(beta1, beta2, eps, use_locking)
|
|
794
|
+
else:
|
|
795
|
+
self.opt = P.Adam(use_locking, use_nesterov)
|
|
796
|
+
self.sparse_opt = P.FusedSparseAdam(use_locking, use_nesterov)
|
|
797
|
+
self.sparse_opt.set_device("CPU")
|
|
798
|
+
self._ps_pull = P.Pull()
|
|
799
|
+
if use_amsgrad:
|
|
800
|
+
self._ps_push = P.Push("ApplyAdamWithAmsgrad", [0, 1, 2, 3])
|
|
801
|
+
else:
|
|
802
|
+
self._ps_push = P.Push("Adam", [0, 1, 2])
|
|
803
|
+
self._ps_push.add_prim_attr("use_nesterov", use_nesterov)
|
|
804
|
+
|
|
805
|
+
self._init_distributed_opts(use_locking, use_nesterov)
|
|
806
|
+
|
|
807
|
+
def _apply_adam(self, params, beta1_power, beta2_power, moment1, moment2, lr, gradients):
|
|
808
|
+
"""Execute Adam optimizer and its variants."""
|
|
809
|
+
if self.use_offload:
|
|
810
|
+
if self.is_group_lr:
|
|
811
|
+
success = self.map_reverse(F.partial(_adam_opt, self.opt, beta1_power, beta2_power, self.beta1,
|
|
812
|
+
self.beta2, self.eps), lr, gradients, params, moment1, moment2)
|
|
813
|
+
else:
|
|
814
|
+
success = self.map_reverse(F.partial(_adam_opt, self.opt, beta1_power, beta2_power, self.beta1,
|
|
815
|
+
self.beta2, self.eps, lr), gradients, params, moment1, moment2)
|
|
816
|
+
# Lazy adam or normal adam
|
|
817
|
+
else:
|
|
818
|
+
if self.use_dist_optimizer:
|
|
819
|
+
if self.use_dist_optimizer and self.use_amsgrad:
|
|
820
|
+
raise ValueError(f"Adam with amsgrad is currently not supporting distributed training!"
|
|
821
|
+
f"Please set use_amsgrad=False for distributed training.")
|
|
822
|
+
if self.is_group_lr:
|
|
823
|
+
if self.use_lazy:
|
|
824
|
+
success = self.map_reverse(F.partial(_lazy_adam_opt, self.opt, self.sparse_opt,
|
|
825
|
+
self._ps_push, self._ps_pull, self.use_locking,
|
|
826
|
+
self.use_nesterov,
|
|
827
|
+
self._is_device, beta1_power, beta2_power,
|
|
828
|
+
self.beta1, self.beta2, self.eps),
|
|
829
|
+
lr, gradients, self._parameters, self.moment1, self.moment2,
|
|
830
|
+
self.ps_parameters, self.cache_enable, self.dense_lazyadam_opts,
|
|
831
|
+
self.use_dense_opt_flags, self.sparse_lazyadam_opts,
|
|
832
|
+
self.use_sparse_opt_flags)
|
|
833
|
+
# Normal Adam
|
|
834
|
+
else:
|
|
835
|
+
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt, self._ps_push,
|
|
836
|
+
self._ps_pull, self.use_locking, self.use_nesterov,
|
|
837
|
+
self._is_device, beta1_power, beta2_power, self.beta1, self.beta2,
|
|
838
|
+
self.eps), lr, gradients, params, moment1, moment2,
|
|
839
|
+
self.ps_parameters, self.cache_enable, self.dense_adam_opts,
|
|
840
|
+
self.use_dense_opt_flags, self.sparse_adam_opts, self.use_sparse_opt_flags)
|
|
841
|
+
else:
|
|
842
|
+
if self.use_lazy:
|
|
843
|
+
success = self.map_reverse(F.partial(_lazy_adam_opt, self.opt, self.sparse_opt, self._ps_push,
|
|
844
|
+
self._ps_pull, self.use_locking, self.use_nesterov,
|
|
845
|
+
self._is_device, beta1_power, beta2_power, self.beta1,
|
|
846
|
+
self.beta2, self.eps, lr), gradients, self._parameters,
|
|
847
|
+
self.moment1, self.moment2, self.ps_parameters, self.cache_enable,
|
|
848
|
+
self.dense_lazyadam_opts, self.use_dense_opt_flags,
|
|
849
|
+
self.sparse_lazyadam_opts, self.use_sparse_opt_flags)
|
|
850
|
+
else:
|
|
851
|
+
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt, self._ps_push,
|
|
852
|
+
self._ps_pull, self.use_locking, self.use_nesterov,
|
|
853
|
+
self._is_device, beta1_power, beta2_power, self.beta1, self.beta2,
|
|
854
|
+
self.eps, lr), gradients, params, moment1, moment2,
|
|
855
|
+
self.ps_parameters, self.cache_enable, self.dense_adam_opts,
|
|
856
|
+
self.use_dense_opt_flags, self.sparse_adam_opts, self.use_sparse_opt_flags)
|
|
857
|
+
else:
|
|
858
|
+
if self.is_group_lr:
|
|
859
|
+
if self.use_lazy:
|
|
860
|
+
success = self.map_(F.partial(_lazy_adam_opt, self.opt, self.sparse_opt, self._ps_push,
|
|
861
|
+
self._ps_pull, self.use_locking, self.use_nesterov,
|
|
862
|
+
self._is_device, beta1_power, beta2_power, self.beta1, self.beta2,
|
|
863
|
+
self.eps), lr, gradients, params, moment1, moment2,
|
|
864
|
+
self.ps_parameters, self.cache_enable)
|
|
865
|
+
else:
|
|
866
|
+
if self.use_amsgrad:
|
|
867
|
+
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt, self._ps_push,
|
|
868
|
+
self._ps_pull, self.use_locking, self.use_nesterov,
|
|
869
|
+
self._is_device, beta1_power, beta2_power,
|
|
870
|
+
self.beta1, self.beta2, self.eps), lr, gradients, params,
|
|
871
|
+
moment1, moment2, self.vhat, self.ps_parameters, self.cache_enable)
|
|
872
|
+
else:
|
|
873
|
+
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt, self._ps_push,
|
|
874
|
+
self._ps_pull, self.use_locking, self.use_nesterov,
|
|
875
|
+
self._is_device, beta1_power, beta2_power,
|
|
876
|
+
self.beta1, self.beta2, self.eps), lr, gradients, params,
|
|
877
|
+
moment1, moment2, self.ps_parameters, self.cache_enable)
|
|
878
|
+
else:
|
|
879
|
+
if self.use_lazy:
|
|
880
|
+
success = self.map_(F.partial(_lazy_adam_opt, self.opt, self.sparse_opt, self._ps_push,
|
|
881
|
+
self._ps_pull, self.use_locking, self.use_nesterov,
|
|
882
|
+
self._is_device, beta1_power, beta2_power, self.beta1, self.beta2,
|
|
883
|
+
self.eps, lr), gradients, params, moment1, moment2,
|
|
884
|
+
self.ps_parameters, self.cache_enable)
|
|
885
|
+
else:
|
|
886
|
+
if self.use_amsgrad:
|
|
887
|
+
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt, self._ps_push,
|
|
888
|
+
self._ps_pull, self.use_locking, self.use_nesterov,
|
|
889
|
+
self._is_device, beta1_power, beta2_power,
|
|
890
|
+
self.beta1, self.beta2, self.eps, lr), gradients, params,
|
|
891
|
+
moment1, moment2, self.vhat, self.ps_parameters, self.cache_enable)
|
|
892
|
+
else:
|
|
893
|
+
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt, self._ps_push,
|
|
894
|
+
self._ps_pull, self.use_locking, self.use_nesterov,
|
|
895
|
+
self._is_device, beta1_power, beta2_power,
|
|
896
|
+
self.beta1, self.beta2, self.eps, lr), gradients, params,
|
|
897
|
+
moment1, moment2, self.ps_parameters, self.cache_enable)
|
|
529
898
|
|
|
530
|
-
|
|
899
|
+
return success
|
|
900
|
+
|
|
901
|
+
@jit
|
|
531
902
|
def construct(self, gradients):
|
|
532
903
|
params = self._parameters
|
|
533
904
|
moment1 = self.moment1
|
|
534
905
|
moment2 = self.moment2
|
|
535
|
-
vhat = self.vhat
|
|
536
906
|
gradients = self.flatten_gradients(gradients)
|
|
537
907
|
gradients = self.decay_weight(gradients)
|
|
538
|
-
|
|
908
|
+
if not self.use_offload:
|
|
909
|
+
gradients = self.gradients_centralization(gradients)
|
|
539
910
|
gradients = self.scale_grad(gradients)
|
|
540
911
|
gradients = self._grad_sparse_indices_deduplicate(gradients)
|
|
541
912
|
lr = self.get_lr()
|
|
@@ -544,38 +915,8 @@ class Adam(Optimizer):
|
|
|
544
915
|
self.beta1_power = beta1_power
|
|
545
916
|
beta2_power = self.beta2_power * self.beta2
|
|
546
917
|
self.beta2_power = beta2_power
|
|
547
|
-
if self.use_dist_optimizer:
|
|
548
|
-
if self.use_amsgrad:
|
|
549
|
-
raise Exception("""Adam with amsgrad is currently not supporting distributed training!
|
|
550
|
-
Please set use_amsgrad=False for distributed training.""")
|
|
551
|
-
if self.is_group_lr:
|
|
552
|
-
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt, self._ps_push, self._ps_pull,
|
|
553
|
-
self.use_locking, self.use_nesterov, self._is_device,
|
|
554
|
-
beta1_power, beta2_power, self.beta1, self.beta2, self.eps),
|
|
555
|
-
lr, gradients, params, moment1, moment2, self.ps_parameters, self.cache_enable,
|
|
556
|
-
self.dense_adam_opts, self.use_dense_opt_flags,
|
|
557
|
-
self.sparse_adam_opts, self.use_sparse_opt_flags)
|
|
558
|
-
else:
|
|
559
|
-
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt, self._ps_push, self._ps_pull,
|
|
560
|
-
self.use_locking, self.use_nesterov, self._is_device,
|
|
561
|
-
beta1_power, beta2_power, self.beta1, self.beta2, self.eps, lr),
|
|
562
|
-
gradients, params, moment1, moment2, self.ps_parameters, self.cache_enable,
|
|
563
|
-
self.dense_adam_opts, self.use_dense_opt_flags,
|
|
564
|
-
self.sparse_adam_opts, self.use_sparse_opt_flags)
|
|
565
|
-
else:
|
|
566
|
-
if self.is_group_lr:
|
|
567
|
-
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt, self._ps_push, self._ps_pull,
|
|
568
|
-
self.use_locking, self.use_nesterov, self.use_amsgrad, self._is_device,
|
|
569
|
-
beta1_power, beta2_power, self.beta1, self.beta2, self.eps),
|
|
570
|
-
lr, gradients, params, moment1, moment2, vhat,
|
|
571
|
-
self.ps_parameters, self.cache_enable)
|
|
572
|
-
else:
|
|
573
|
-
success = self.map_(F.partial(_adam_opt, self.opt, self.sparse_opt, self._ps_push, self._ps_pull,
|
|
574
|
-
self.use_locking, self.use_nesterov, self.use_amsgrad, self._is_device,
|
|
575
|
-
beta1_power, beta2_power, self.beta1, self.beta2, self.eps, lr),
|
|
576
|
-
gradients, params, moment1, moment2, vhat, self.ps_parameters, self.cache_enable)
|
|
577
918
|
|
|
578
|
-
return
|
|
919
|
+
return self._apply_adam(params, beta1_power, beta2_power, moment1, moment2, lr, gradients)
|
|
579
920
|
|
|
580
921
|
@Optimizer.target.setter
|
|
581
922
|
def target(self, value):
|
|
@@ -630,13 +971,13 @@ class AdamWeightDecay(Optimizer):
|
|
|
630
971
|
:math:`m` represents the 1st moment vector `moment1`, :math:`v` represents the 2nd moment vector `moment2`,
|
|
631
972
|
:math:`g` represents `gradients`, :math:`\gamma` represents `learning_rate`,
|
|
632
973
|
:math:`\beta_1, \beta_2` represent `beta1` and `beta2`, :math:`t` represents the current step,
|
|
633
|
-
:math:`w` represents `params`, :math:`\
|
|
974
|
+
:math:`w` represents `params`, :math:`\lambda` represents `weight_decay`.
|
|
634
975
|
|
|
635
976
|
Note:
|
|
636
977
|
There is usually no connection between a optimizer and mixed precision. But when `FixedLossScaleManager` is used
|
|
637
978
|
and `drop_overflow_update` in `FixedLossScaleManager` is set to False, optimizer needs to set the 'loss_scale'.
|
|
638
979
|
As this optimizer has no argument of `loss_scale`, so `loss_scale` needs to be processed by other means, refer
|
|
639
|
-
document `LossScale <https://www.mindspore.cn/tutorials/
|
|
980
|
+
document `LossScale <https://www.mindspore.cn/tutorials/en/r2.0/advanced/mixed_precision.html>`_ to
|
|
640
981
|
process `loss_scale` correctly.
|
|
641
982
|
|
|
642
983
|
If parameters are not grouped, the `weight_decay` in optimizer will be applied on the network parameters without
|
|
@@ -753,7 +1094,7 @@ class AdamWeightDecay(Optimizer):
|
|
|
753
1094
|
else:
|
|
754
1095
|
self.use_fused_opt = False
|
|
755
1096
|
|
|
756
|
-
@
|
|
1097
|
+
@jit
|
|
757
1098
|
def construct(self, gradients):
|
|
758
1099
|
gradients = self.flatten_gradients(gradients)
|
|
759
1100
|
weight_decay = self.get_weight_decay()
|
|
@@ -804,7 +1145,7 @@ class AdamWeightDecay(Optimizer):
|
|
|
804
1145
|
"""
|
|
805
1146
|
self._set_base_target(value)
|
|
806
1147
|
if value == 'CPU':
|
|
807
|
-
self.fused_opt.
|
|
1148
|
+
self.fused_opt.set_device("CPU")
|
|
808
1149
|
self.use_fused_opt = True
|
|
809
1150
|
else:
|
|
810
1151
|
self.use_fused_opt = False
|
|
@@ -949,6 +1290,7 @@ class AdamOffload(Optimizer):
|
|
|
949
1290
|
>>> model = ms.Model(net, loss_fn=loss, optimizer=optim)
|
|
950
1291
|
"""
|
|
951
1292
|
|
|
1293
|
+
@deprecated("2.0", "Adam", False)
|
|
952
1294
|
def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False,
|
|
953
1295
|
use_nesterov=False, weight_decay=0.0, loss_scale=1.0):
|
|
954
1296
|
super(AdamOffload, self).__init__(learning_rate, params, weight_decay, loss_scale)
|
|
@@ -965,9 +1307,9 @@ class AdamOffload(Optimizer):
|
|
|
965
1307
|
self.moment1 = self._parameters.clone(prefix="moment1", init='zeros')
|
|
966
1308
|
self.moment2 = self._parameters.clone(prefix="moment2", init='zeros')
|
|
967
1309
|
self.opt = P.AdamNoUpdateParam(use_locking, use_nesterov)
|
|
968
|
-
self.opt.
|
|
1310
|
+
self.opt.set_device("CPU")
|
|
969
1311
|
|
|
970
|
-
@
|
|
1312
|
+
@jit
|
|
971
1313
|
def construct(self, gradients):
|
|
972
1314
|
params = self._parameters
|
|
973
1315
|
moment1 = self.moment1
|