mindspore 2.2.14__cp39-cp39-win_amd64.whl → 2.3.0__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +6 -5
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +76 -18
- mindspore/_extends/builtin_operations.py +2 -1
- mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
- mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
- mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
- mindspore/_extends/parse/__init__.py +18 -14
- mindspore/_extends/parse/compile_config.py +258 -0
- mindspore/_extends/parse/namespace.py +2 -2
- mindspore/_extends/parse/parser.py +174 -62
- mindspore/_extends/parse/resources.py +45 -14
- mindspore/_extends/parse/standard_method.py +142 -240
- mindspore/{ops/_op_impl/tbe/atomic_addr_clean.py → _extends/pijit/__init__.py} +6 -16
- mindspore/_extends/pijit/pijit_func_white_list.py +343 -0
- mindspore/_extends/remote/kernel_build_server.py +2 -0
- mindspore/_profiler.py +30 -0
- mindspore/amp.py +51 -24
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/base.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +2 -2
- mindspore/boost/grad_freeze.py +2 -2
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/boost/less_batch_normalization.py +9 -6
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +15 -4
- mindspore/common/_jit_fallback_utils.py +2 -3
- mindspore/common/_register_for_adapter.py +7 -0
- mindspore/common/_register_for_recompute.py +48 -0
- mindspore/common/_register_for_tensor.py +8 -9
- mindspore/common/_stub_tensor.py +7 -1
- mindspore/common/_utils.py +5 -17
- mindspore/common/api.py +411 -106
- mindspore/common/auto_dynamic_shape.py +27 -14
- mindspore/common/dtype.py +17 -10
- mindspore/common/dump.py +6 -8
- mindspore/common/file_system.py +48 -0
- mindspore/common/generator.py +260 -0
- mindspore/common/hook_handle.py +51 -4
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +34 -14
- mindspore/common/lazy_inline.py +72 -19
- mindspore/common/mindir_util.py +12 -2
- mindspore/common/mutable.py +79 -14
- mindspore/common/no_inline.py +54 -0
- mindspore/common/np_dtype.py +25 -0
- mindspore/common/parameter.py +30 -11
- mindspore/common/recompute.py +262 -0
- mindspore/common/seed.py +9 -9
- mindspore/common/sparse_tensor.py +272 -24
- mindspore/common/symbol.py +122 -0
- mindspore/common/tensor.py +468 -494
- mindspore/communication/__init__.py +6 -11
- mindspore/communication/_comm_helper.py +5 -0
- mindspore/communication/comm_func.py +1140 -0
- mindspore/communication/management.py +115 -102
- mindspore/config/op_info.config +22 -54
- mindspore/context.py +346 -63
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +6 -6
- mindspore/dataset/audio/transforms.py +711 -158
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/engine/cache_client.py +2 -2
- mindspore/dataset/engine/datasets.py +140 -83
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +33 -3
- mindspore/dataset/engine/datasets_text.py +38 -38
- mindspore/dataset/engine/datasets_user_defined.py +78 -59
- mindspore/dataset/engine/datasets_vision.py +77 -73
- mindspore/dataset/engine/offload.py +5 -7
- mindspore/dataset/engine/queue.py +56 -38
- mindspore/dataset/engine/validators.py +11 -5
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +408 -121
- mindspore/dataset/text/utils.py +9 -9
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/transforms.py +261 -76
- mindspore/dataset/utils/browse_dataset.py +9 -9
- mindspore/dataset/vision/__init__.py +8 -8
- mindspore/dataset/vision/c_transforms.py +10 -10
- mindspore/dataset/vision/py_transforms_util.py +1 -1
- mindspore/dataset/vision/transforms.py +2844 -549
- mindspore/dataset/vision/utils.py +161 -10
- mindspore/dataset/vision/validators.py +14 -2
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/optim/__init__.py +12 -2
- mindspore/experimental/optim/adadelta.py +161 -0
- mindspore/experimental/optim/adagrad.py +168 -0
- mindspore/experimental/optim/adam.py +35 -34
- mindspore/experimental/optim/adamax.py +170 -0
- mindspore/experimental/optim/adamw.py +40 -16
- mindspore/experimental/optim/asgd.py +153 -0
- mindspore/experimental/optim/lr_scheduler.py +66 -121
- mindspore/experimental/optim/nadam.py +157 -0
- mindspore/experimental/optim/optimizer.py +15 -8
- mindspore/experimental/optim/radam.py +194 -0
- mindspore/experimental/optim/rmsprop.py +154 -0
- mindspore/experimental/optim/rprop.py +164 -0
- mindspore/experimental/optim/sgd.py +28 -19
- mindspore/hal/__init__.py +40 -0
- mindspore/hal/_ascend.py +57 -0
- mindspore/hal/_base.py +57 -0
- mindspore/hal/_cpu.py +56 -0
- mindspore/hal/_gpu.py +57 -0
- mindspore/hal/device.py +356 -0
- mindspore/hal/event.py +179 -0
- mindspore/hal/memory.py +326 -0
- mindspore/hal/stream.py +339 -0
- mindspore/include/api/data_type.h +2 -2
- mindspore/include/api/dual_abi_helper.h +16 -3
- mindspore/include/api/model.h +4 -3
- mindspore/include/api/status.h +14 -0
- mindspore/include/c_api/model_c.h +173 -0
- mindspore/include/c_api/ms/base/types.h +1 -0
- mindspore/include/c_api/types_c.h +19 -0
- mindspore/include/dataset/execute.h +1 -3
- mindspore/include/dataset/vision.h +54 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +2 -2
- mindspore/mindrecord/__init__.py +5 -1
- mindspore/mindrecord/config.py +809 -0
- mindspore/mindrecord/filereader.py +25 -0
- mindspore/mindrecord/filewriter.py +76 -58
- mindspore/mindrecord/mindpage.py +40 -6
- mindspore/mindrecord/shardutils.py +3 -2
- mindspore/mindrecord/shardwriter.py +7 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +8 -13
- mindspore/mindrecord/tools/cifar10_to_mr.py +9 -15
- mindspore/mindrecord/tools/csv_to_mr.py +4 -9
- mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +7 -12
- mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -6
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/mint/__init__.py +1137 -0
- mindspore/{rewrite/ast_transformers → mint/linalg}/__init__.py +9 -4
- mindspore/mint/nn/__init__.py +512 -0
- mindspore/mint/nn/functional.py +573 -0
- mindspore/mint/optim/__init__.py +24 -0
- mindspore/mint/optim/adamw.py +185 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/multiprocessing/__init__.py +72 -0
- mindspore/nn/__init__.py +1 -0
- mindspore/nn/cell.py +213 -257
- mindspore/nn/dynamic_lr.py +2 -2
- mindspore/nn/extend/__init__.py +29 -0
- mindspore/nn/extend/basic.py +140 -0
- mindspore/nn/extend/embedding.py +143 -0
- mindspore/{rewrite/ast_creator_register.py → nn/extend/layer/__init__.py} +9 -19
- mindspore/nn/extend/layer/normalization.py +109 -0
- mindspore/nn/extend/pooling.py +117 -0
- mindspore/nn/layer/activation.py +83 -93
- mindspore/nn/layer/basic.py +177 -82
- mindspore/nn/layer/channel_shuffle.py +3 -16
- mindspore/nn/layer/container.py +3 -3
- mindspore/nn/layer/conv.py +75 -66
- mindspore/nn/layer/embedding.py +101 -43
- mindspore/nn/layer/embedding_service.py +531 -0
- mindspore/nn/layer/embedding_service_layer.py +393 -0
- mindspore/nn/layer/image.py +4 -7
- mindspore/nn/layer/math.py +1 -1
- mindspore/nn/layer/normalization.py +52 -66
- mindspore/nn/layer/padding.py +30 -39
- mindspore/nn/layer/pooling.py +18 -9
- mindspore/nn/layer/rnn_cells.py +6 -16
- mindspore/nn/layer/rnns.py +6 -5
- mindspore/nn/layer/thor_layer.py +1 -2
- mindspore/nn/layer/timedistributed.py +1 -1
- mindspore/nn/layer/transformer.py +52 -50
- mindspore/nn/learning_rate_schedule.py +6 -5
- mindspore/nn/loss/loss.py +62 -83
- mindspore/nn/optim/ada_grad.py +4 -2
- mindspore/nn/optim/adadelta.py +3 -1
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +102 -181
- mindspore/nn/optim/adamax.py +4 -2
- mindspore/nn/optim/adasum.py +3 -3
- mindspore/nn/optim/asgd.py +4 -2
- mindspore/nn/optim/ftrl.py +31 -61
- mindspore/nn/optim/lamb.py +5 -3
- mindspore/nn/optim/lars.py +2 -2
- mindspore/nn/optim/lazyadam.py +6 -4
- mindspore/nn/optim/momentum.py +13 -25
- mindspore/nn/optim/optimizer.py +6 -3
- mindspore/nn/optim/proximal_ada_grad.py +4 -2
- mindspore/nn/optim/rmsprop.py +9 -3
- mindspore/nn/optim/rprop.py +4 -2
- mindspore/nn/optim/sgd.py +5 -3
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
- mindspore/nn/probability/distribution/beta.py +2 -2
- mindspore/nn/probability/distribution/categorical.py +4 -6
- mindspore/nn/probability/distribution/cauchy.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -2
- mindspore/nn/probability/distribution/geometric.py +1 -1
- mindspore/nn/probability/distribution/gumbel.py +2 -2
- mindspore/nn/probability/distribution/logistic.py +1 -1
- mindspore/nn/probability/distribution/poisson.py +2 -2
- mindspore/nn/probability/distribution/uniform.py +2 -2
- mindspore/nn/reinforcement/_tensors_queue.py +13 -1
- mindspore/nn/wrap/__init__.py +2 -1
- mindspore/nn/wrap/cell_wrapper.py +58 -13
- mindspore/nn/wrap/grad_reducer.py +148 -8
- mindspore/nn/wrap/loss_scale.py +32 -9
- mindspore/numpy/__init__.py +2 -0
- mindspore/numpy/array_creations.py +2 -0
- mindspore/numpy/array_ops.py +6 -6
- mindspore/numpy/dtypes.py +3 -3
- mindspore/numpy/fft.py +431 -0
- mindspore/numpy/math_ops.py +62 -68
- mindspore/numpy/utils.py +3 -0
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +6 -5
- mindspore/ops/_grad_experimental/grad_array_ops.py +4 -129
- mindspore/ops/_grad_experimental/grad_comm_ops.py +89 -34
- mindspore/ops/_grad_experimental/grad_math_ops.py +68 -283
- mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
- mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
- mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
- mindspore/ops/_op_impl/__init__.py +0 -1
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
- mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +1 -1
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
- mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
- mindspore/ops/_op_impl/cpu/__init__.py +1 -3
- mindspore/ops/_op_impl/cpu/adam.py +2 -2
- mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
- mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
- mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
- mindspore/ops/_vmap/vmap_array_ops.py +164 -101
- mindspore/ops/_vmap/vmap_base.py +8 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +143 -58
- mindspore/ops/_vmap/vmap_image_ops.py +70 -13
- mindspore/ops/_vmap/vmap_math_ops.py +130 -58
- mindspore/ops/_vmap/vmap_nn_ops.py +249 -115
- mindspore/ops/_vmap/vmap_other_ops.py +1 -1
- mindspore/ops/auto_generate/__init__.py +31 -0
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +231 -0
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +250 -0
- mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
- mindspore/ops/auto_generate/gen_extend_func.py +980 -0
- mindspore/ops/auto_generate/gen_ops_def.py +6443 -0
- mindspore/ops/auto_generate/gen_ops_prim.py +13167 -0
- mindspore/ops/auto_generate/pyboost_inner_prim.py +429 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +121 -23
- mindspore/ops/composite/math_ops.py +10 -49
- mindspore/ops/composite/multitype_ops/_compile_utils.py +191 -618
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +25 -134
- mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
- mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +6 -1
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
- mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
- mindspore/ops/deprecated.py +14 -3
- mindspore/ops/extend/__init__.py +53 -0
- mindspore/ops/extend/array_func.py +218 -0
- mindspore/ops/extend/math_func.py +76 -0
- mindspore/ops/extend/nn_func.py +308 -0
- mindspore/ops/function/__init__.py +31 -11
- mindspore/ops/function/array_func.py +846 -1735
- mindspore/ops/function/clip_func.py +19 -31
- mindspore/ops/function/debug_func.py +1 -4
- mindspore/ops/function/fft_func.py +31 -0
- mindspore/ops/function/grad/grad_func.py +27 -20
- mindspore/ops/function/image_func.py +27 -21
- mindspore/ops/function/linalg_func.py +35 -68
- mindspore/ops/function/math_func.py +913 -2791
- mindspore/ops/function/nn_func.py +1439 -885
- mindspore/ops/function/other_func.py +6 -7
- mindspore/ops/function/parameter_func.py +5 -93
- mindspore/ops/function/random_func.py +254 -108
- mindspore/ops/function/reshard_func.py +102 -0
- mindspore/ops/function/sparse_func.py +4 -4
- mindspore/ops/function/sparse_unary_func.py +9 -16
- mindspore/ops/function/spectral_func.py +1 -1
- mindspore/ops/function/vmap_func.py +14 -14
- mindspore/ops/functional.py +342 -343
- mindspore/ops/op_info_register.py +16 -43
- mindspore/ops/operations/__init__.py +32 -23
- mindspore/ops/operations/_grad_ops.py +21 -853
- mindspore/ops/operations/_infer_ops.py +19 -0
- mindspore/ops/operations/_inner_ops.py +107 -518
- mindspore/ops/operations/_rl_inner_ops.py +2 -2
- mindspore/ops/operations/_scalar_ops.py +5 -480
- mindspore/ops/operations/_sequence_ops.py +6 -36
- mindspore/ops/operations/_tensor_array.py +8 -8
- mindspore/ops/operations/array_ops.py +108 -2705
- mindspore/ops/operations/comm_ops.py +801 -118
- mindspore/ops/operations/custom_ops.py +61 -120
- mindspore/ops/operations/debug_ops.py +104 -35
- mindspore/ops/operations/image_ops.py +1 -217
- mindspore/ops/operations/inner_ops.py +5 -40
- mindspore/ops/operations/linalg_ops.py +1 -49
- mindspore/ops/operations/manually_defined/__init__.py +24 -0
- mindspore/ops/operations/manually_defined/_inner.py +61 -0
- mindspore/ops/operations/manually_defined/ops_def.py +2016 -0
- mindspore/ops/operations/math_ops.py +572 -4667
- mindspore/ops/operations/nn_ops.py +248 -2162
- mindspore/ops/operations/other_ops.py +53 -45
- mindspore/ops/operations/random_ops.py +4 -53
- mindspore/ops/operations/reshard_ops.py +53 -0
- mindspore/ops/operations/sparse_ops.py +4 -4
- mindspore/ops/primitive.py +204 -103
- mindspore/ops/silent_check.py +5 -5
- mindspore/ops_generate/__init__.py +27 -0
- mindspore/ops_generate/arg_dtype_cast.py +250 -0
- mindspore/ops_generate/arg_handler.py +197 -0
- mindspore/ops_generate/gen_aclnn_implement.py +263 -0
- mindspore/ops_generate/gen_ops.py +1084 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
- mindspore/ops_generate/gen_pyboost_func.py +968 -0
- mindspore/ops_generate/gen_utils.py +209 -0
- mindspore/ops_generate/op_proto.py +138 -0
- mindspore/ops_generate/pyboost_utils.py +354 -0
- mindspore/ops_generate/template.py +239 -0
- mindspore/parallel/__init__.py +6 -4
- mindspore/parallel/_auto_parallel_context.py +73 -3
- mindspore/parallel/_cell_wrapper.py +16 -9
- mindspore/parallel/_cost_model_context.py +1 -1
- mindspore/parallel/_dp_allreduce_fusion.py +159 -159
- mindspore/parallel/_parallel_serialization.py +29 -13
- mindspore/parallel/_ps_context.py +1 -1
- mindspore/parallel/_recovery_context.py +1 -1
- mindspore/parallel/_tensor.py +18 -11
- mindspore/parallel/_transformer/__init__.py +1 -1
- mindspore/parallel/_transformer/layers.py +1 -1
- mindspore/parallel/_transformer/loss.py +1 -1
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/op_parallel_config.py +1 -1
- mindspore/parallel/_transformer/transformer.py +2 -2
- mindspore/parallel/_utils.py +161 -6
- mindspore/parallel/algo_parameter_config.py +6 -8
- mindspore/parallel/checkpoint_transform.py +191 -32
- mindspore/parallel/cluster/__init__.py +15 -0
- mindspore/parallel/cluster/process_entity/__init__.py +18 -0
- mindspore/parallel/cluster/process_entity/_api.py +344 -0
- mindspore/parallel/cluster/process_entity/_utils.py +126 -0
- mindspore/parallel/cluster/run.py +136 -0
- mindspore/parallel/mpi/__init__.py +1 -1
- mindspore/parallel/mpi/_mpi_config.py +1 -1
- mindspore/parallel/parameter_broadcast.py +152 -0
- mindspore/parallel/shard.py +128 -17
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +3 -2
- mindspore/profiler/common/process_pool.py +41 -0
- mindspore/profiler/common/singleton.py +28 -0
- mindspore/profiler/common/util.py +125 -0
- mindspore/profiler/envprofiling.py +2 -2
- mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
- mindspore/profiler/parser/ascend_analysis/constant.py +53 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +159 -0
- mindspore/profiler/parser/ascend_analysis/function_event.py +161 -0
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +131 -0
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +85 -0
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +57 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +116 -0
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +68 -0
- mindspore/profiler/parser/ascend_cluster_generator.py +14 -9
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -1
- mindspore/profiler/parser/ascend_flops_generator.py +20 -4
- mindspore/profiler/parser/ascend_hccl_generator.py +29 -278
- mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
- mindspore/profiler/parser/ascend_memory_generator.py +185 -0
- mindspore/profiler/parser/ascend_msprof_exporter.py +147 -146
- mindspore/profiler/parser/ascend_msprof_generator.py +73 -283
- mindspore/profiler/parser/ascend_op_generator.py +92 -42
- mindspore/profiler/parser/ascend_timeline_generator.py +296 -133
- mindspore/profiler/parser/base_timeline_generator.py +6 -0
- mindspore/profiler/parser/framework_parser.py +3 -2
- mindspore/profiler/parser/integrator.py +3 -1
- mindspore/profiler/parser/minddata_parser.py +72 -3
- mindspore/profiler/parser/msadvisor_analyzer.py +1 -1
- mindspore/profiler/parser/msadvisor_parser.py +1 -1
- mindspore/profiler/parser/profiler_info.py +16 -1
- mindspore/profiler/profiling.py +445 -190
- mindspore/rewrite/__init__.py +2 -13
- mindspore/rewrite/api/node.py +122 -36
- mindspore/rewrite/api/pattern_engine.py +2 -3
- mindspore/rewrite/api/scoped_value.py +16 -15
- mindspore/rewrite/api/symbol_tree.py +45 -29
- mindspore/rewrite/ast_helpers/__init__.py +3 -6
- mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
- mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
- mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
- mindspore/rewrite/common/__init__.py +1 -2
- mindspore/rewrite/common/config.py +24 -0
- mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
- mindspore/rewrite/{namer.py → common/namer.py} +63 -18
- mindspore/rewrite/common/namespace.py +118 -0
- mindspore/rewrite/node/__init__.py +5 -5
- mindspore/rewrite/node/call_function.py +23 -7
- mindspore/rewrite/node/cell_container.py +7 -3
- mindspore/rewrite/node/control_flow.py +53 -28
- mindspore/rewrite/node/node.py +212 -196
- mindspore/rewrite/node/node_manager.py +51 -22
- mindspore/rewrite/node/node_topological_manager.py +3 -23
- mindspore/rewrite/parsers/__init__.py +12 -0
- mindspore/rewrite/parsers/arguments_parser.py +8 -9
- mindspore/rewrite/parsers/assign_parser.py +637 -413
- mindspore/rewrite/parsers/attribute_parser.py +3 -4
- mindspore/rewrite/parsers/class_def_parser.py +115 -148
- mindspore/rewrite/parsers/constant_parser.py +5 -5
- mindspore/rewrite/parsers/container_parser.py +4 -6
- mindspore/rewrite/parsers/expr_parser.py +55 -0
- mindspore/rewrite/parsers/for_parser.py +31 -98
- mindspore/rewrite/parsers/function_def_parser.py +13 -5
- mindspore/rewrite/parsers/if_parser.py +28 -10
- mindspore/rewrite/parsers/module_parser.py +8 -182
- mindspore/rewrite/parsers/parser.py +1 -5
- mindspore/rewrite/parsers/parser_register.py +1 -1
- mindspore/rewrite/parsers/return_parser.py +5 -10
- mindspore/rewrite/parsers/while_parser.py +59 -0
- mindspore/rewrite/sparsify/utils.py +1 -1
- mindspore/rewrite/symbol_tree/__init__.py +20 -0
- mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +704 -185
- mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
- mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
- mindspore/run_check/_check_version.py +6 -14
- mindspore/run_check/run_check.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +9 -19
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +6 -5
- mindspore/train/_utils.py +178 -4
- mindspore/train/amp.py +167 -245
- mindspore/train/anf_ir_pb2.py +14 -2
- mindspore/train/callback/__init__.py +5 -2
- mindspore/train/callback/_backup_and_restore.py +5 -5
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +143 -29
- mindspore/train/callback/_cluster_monitor.py +201 -0
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_flops_collector.py +238 -0
- mindspore/train/callback/_landscape.py +15 -9
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_mindio_ttp.py +443 -0
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +7 -7
- mindspore/train/callback/_time_monitor.py +3 -3
- mindspore/train/data_sink.py +6 -5
- mindspore/train/dataset_helper.py +60 -21
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/accuracy.py +7 -7
- mindspore/train/metrics/confusion_matrix.py +8 -6
- mindspore/train/metrics/cosine_similarity.py +6 -4
- mindspore/train/metrics/error.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/perplexity.py +2 -1
- mindspore/train/metrics/topk.py +2 -2
- mindspore/train/mind_ir_pb2.py +89 -15
- mindspore/train/model.py +290 -60
- mindspore/train/serialization.py +495 -220
- mindspore/train/summary/_summary_adapter.py +1 -1
- mindspore/train/summary/summary_record.py +51 -28
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/turbojpeg.dll +0 -0
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.2.14.dist-info → mindspore-2.3.0.dist-info}/METADATA +3 -3
- mindspore-2.3.0.dist-info/RECORD +1400 -0
- {mindspore-2.2.14.dist-info → mindspore-2.3.0.dist-info}/entry_points.txt +1 -0
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
- mindspore/gen_ops.py +0 -273
- mindspore/nn/layer/flash_attention.py +0 -189
- mindspore/ops/_op_impl/cpu/concat.py +0 -39
- mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
- mindspore/ops/_op_impl/tbe/__init__.py +0 -47
- mindspore/ops/_op_impl/tbe/abs.py +0 -38
- mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/acos.py +0 -37
- mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/acosh.py +0 -37
- mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
- mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
- mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
- mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
- mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
- mindspore/ops/_op_impl/tbe/add.py +0 -42
- mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/add_n.py +0 -39
- mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
- mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
- mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
- mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
- mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
- mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
- mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
- mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
- mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
- mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
- mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
- mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
- mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
- mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
- mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
- mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
- mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
- mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
- mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
- mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/asin.py +0 -37
- mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/asinh.py +0 -37
- mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/assign.py +0 -79
- mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
- mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
- mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
- mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
- mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
- mindspore/ops/_op_impl/tbe/atan.py +0 -37
- mindspore/ops/_op_impl/tbe/atan2.py +0 -38
- mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/atanh.py +0 -37
- mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
- mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
- mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
- mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
- mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
- mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
- mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
- mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
- mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
- mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
- mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
- mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
- mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
- mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
- mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
- mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
- mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
- mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
- mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
- mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cast.py +0 -55
- mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/cdist.py +0 -38
- mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/ceil.py +0 -37
- mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/celu.py +0 -39
- mindspore/ops/_op_impl/tbe/centralization.py +0 -39
- mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
- mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
- mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/concat.py +0 -40
- mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
- mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
- mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
- mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
- mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
- mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
- mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/cos.py +0 -37
- mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/cosh.py +0 -37
- mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
- mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
- mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/cummin.py +0 -41
- mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
- mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
- mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
- mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
- mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
- mindspore/ops/_op_impl/tbe/diag.py +0 -38
- mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
- mindspore/ops/_op_impl/tbe/dilation.py +0 -40
- mindspore/ops/_op_impl/tbe/div.py +0 -41
- mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
- mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
- mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
- mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
- mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
- mindspore/ops/_op_impl/tbe/elu.py +0 -38
- mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/equal.py +0 -42
- mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/erf.py +0 -37
- mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfc.py +0 -37
- mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
- mindspore/ops/_op_impl/tbe/exp.py +0 -40
- mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
- mindspore/ops/_op_impl/tbe/expm1.py +0 -37
- mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
- mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
- mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
- mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/fill.py +0 -56
- mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/flatten.py +0 -48
- mindspore/ops/_op_impl/tbe/floor.py +0 -37
- mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
- mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
- mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
- mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
- mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
- mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
- mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
- mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
- mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
- mindspore/ops/_op_impl/tbe/gelu.py +0 -37
- mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/ger.py +0 -43
- mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/greater.py +0 -43
- mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
- mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
- mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
- mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
- mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
- mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
- mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
- mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/im2col.py +0 -42
- mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
- mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
- mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
- mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/inv.py +0 -38
- mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/invert.py +0 -37
- mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/iou.py +0 -38
- mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/is_close.py +0 -40
- mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
- mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
- mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
- mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
- mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
- mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
- mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
- mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
- mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
- mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
- mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/lerp.py +0 -38
- mindspore/ops/_op_impl/tbe/less.py +0 -41
- mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/log.py +0 -40
- mindspore/ops/_op_impl/tbe/log1p.py +0 -37
- mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
- mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
- mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
- mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
- mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn.py +0 -41
- mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
- mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
- mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/matmul.py +0 -53
- mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
- mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
- mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
- mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
- mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
- mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum.py +0 -39
- mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
- mindspore/ops/_op_impl/tbe/minimum.py +0 -40
- mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/mish.py +0 -37
- mindspore/ops/_op_impl/tbe/mod.py +0 -41
- mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/mul.py +0 -37
- mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
- mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
- mindspore/ops/_op_impl/tbe/neg.py +0 -39
- mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
- mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
- mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
- mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
- mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
- mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
- mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
- mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
- mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
- mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
- mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
- mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/pack.py +0 -58
- mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
- mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
- mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
- mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
- mindspore/ops/_op_impl/tbe/pdist.py +0 -36
- mindspore/ops/_op_impl/tbe/pooling.py +0 -46
- mindspore/ops/_op_impl/tbe/population_count.py +0 -38
- mindspore/ops/_op_impl/tbe/pow.py +0 -41
- mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/prelu.py +0 -37
- mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/range.py +0 -39
- mindspore/ops/_op_impl/tbe/real_div.py +0 -38
- mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
- mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
- mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
- mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
- mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
- mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
- mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
- mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
- mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6.py +0 -38
- mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
- mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/renorm.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
- mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
- mindspore/ops/_op_impl/tbe/rint.py +0 -37
- mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/roll.py +0 -42
- mindspore/ops/_op_impl/tbe/round.py +0 -38
- mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
- mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
- mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
- mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
- mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
- mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
- mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
- mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
- mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
- mindspore/ops/_op_impl/tbe/select.py +0 -38
- mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/selu.py +0 -39
- mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sgd.py +0 -62
- mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
- mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/sign.py +0 -38
- mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/sin.py +0 -37
- mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sinh.py +0 -37
- mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/slice.py +0 -58
- mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
- mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
- mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
- mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax.py +0 -37
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
- mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
- mindspore/ops/_op_impl/tbe/softplus.py +0 -37
- mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
- mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/softsign.py +0 -37
- mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sort.py +0 -38
- mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
- mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
- mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
- mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
- mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
- mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
- mindspore/ops/_op_impl/tbe/split_d.py +0 -38
- mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/split_v.py +0 -39
- mindspore/ops/_op_impl/tbe/splitv.py +0 -39
- mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
- mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
- mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
- mindspore/ops/_op_impl/tbe/square.py +0 -38
- mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
- mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
- mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
- mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
- mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
- mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
- mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
- mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
- mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
- mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
- mindspore/ops/_op_impl/tbe/sub.py +0 -39
- mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tan.py +0 -38
- mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh.py +0 -37
- mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
- mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
- mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
- mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
- mindspore/ops/_op_impl/tbe/tile.py +0 -37
- mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k.py +0 -42
- mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
- mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
- mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
- mindspore/ops/_op_impl/tbe/transpose.py +0 -60
- mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
- mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
- mindspore/ops/_op_impl/tbe/trunc.py +0 -39
- mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
- mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
- mindspore/ops/_op_impl/tbe/unpack.py +0 -38
- mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
- mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
- mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
- mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
- mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
- mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
- mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
- mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
- mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
- mindspore/ops/_tracefunc.py +0 -241
- mindspore/ops/arg_dtype_cast.py +0 -54
- mindspore/rewrite/api/tree_node_helper.py +0 -60
- mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
- mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
- mindspore/rewrite/namespace.py +0 -53
- mindspore-2.2.14.dist-info/RECORD +0 -1924
- {mindspore-2.2.14.dist-info → mindspore-2.3.0.dist-info}/WHEEL +0 -0
- {mindspore-2.2.14.dist-info → mindspore-2.3.0.dist-info}/top_level.txt +0 -0
|
@@ -229,11 +229,11 @@ class Poisson(Distribution):
|
|
|
229
229
|
rate = self._check_param_type(rate)
|
|
230
230
|
log_rate = self.log(rate)
|
|
231
231
|
zeros = F.fill(self.dtypeop(value), self.shape(value), 0.0)
|
|
232
|
-
|
|
232
|
+
neginf = F.fill(self.dtypeop(value), self.shape(value), -np.inf)
|
|
233
233
|
safe_x = self.select(self.less(value, zeros), zeros, value)
|
|
234
234
|
y = log_rate * safe_x - self.lgamma(safe_x + 1.)
|
|
235
235
|
comp = self.equal(value, safe_x)
|
|
236
|
-
log_unnormalized_prob = self.select(comp, y,
|
|
236
|
+
log_unnormalized_prob = self.select(comp, y, neginf)
|
|
237
237
|
log_normalization = self.exp(log_rate)
|
|
238
238
|
return log_unnormalized_prob - log_normalization
|
|
239
239
|
|
|
@@ -31,9 +31,9 @@ class Uniform(Distribution):
|
|
|
31
31
|
and the probability density function:
|
|
32
32
|
|
|
33
33
|
.. math::
|
|
34
|
-
f(x, a, b) = 1 / (b - a)
|
|
34
|
+
f(x, a, b) = 1 / (b - a)
|
|
35
35
|
|
|
36
|
-
|
|
36
|
+
Where :math:`a, b` are the lower and upper bound respectively.
|
|
37
37
|
|
|
38
38
|
Args:
|
|
39
39
|
low (int, float, list, numpy.ndarray, Tensor): The lower bound of the distribution. Default: ``None`` .
|
|
@@ -37,7 +37,7 @@ class TensorsQueue(Cell):
|
|
|
37
37
|
name (string): the name of this TensorsQueue. Default: "TQ".
|
|
38
38
|
|
|
39
39
|
Raises:
|
|
40
|
-
TypeError: If `dtype` is not
|
|
40
|
+
TypeError: If `dtype` is not mindspore number type.
|
|
41
41
|
ValueError: If `size` is less than 0.
|
|
42
42
|
ValueError: If `shapes` size is less than 1.
|
|
43
43
|
|
|
@@ -69,6 +69,7 @@ class TensorsQueue(Cell):
|
|
|
69
69
|
self.tensors_q_clear = rl_ops.TensorsQueueClear()
|
|
70
70
|
self.tensors_q_close = rl_ops.TensorsQueueClose()
|
|
71
71
|
self.tensors_q_size = rl_ops.TensorsQueueSize()
|
|
72
|
+
self.__is_tensors_queue__ = True
|
|
72
73
|
|
|
73
74
|
def put(self, element):
|
|
74
75
|
"""
|
|
@@ -103,6 +104,17 @@ class TensorsQueue(Cell):
|
|
|
103
104
|
element = self.tensors_q_pop(self.handle_)
|
|
104
105
|
return element
|
|
105
106
|
|
|
107
|
+
def __graph_pop__(self):
|
|
108
|
+
"""
|
|
109
|
+
Get one element int the front of the TensorsQueue, and remove it.
|
|
110
|
+
This is only used in graph mode.
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
tuple(Tensors), the element in TensorsQueue.
|
|
114
|
+
"""
|
|
115
|
+
element = self.tensors_q_pop(self.handle_)
|
|
116
|
+
return self.handle_, element
|
|
117
|
+
|
|
106
118
|
def size(self):
|
|
107
119
|
"""
|
|
108
120
|
Get the used/available size of the TensorsQueue, and remove it.
|
mindspore/nn/wrap/__init__.py
CHANGED
|
@@ -24,7 +24,7 @@ from mindspore.nn.wrap.cell_wrapper import ForwardValueAndGrad, TrainOneStepCell
|
|
|
24
24
|
GradAccumulationCell
|
|
25
25
|
from mindspore.nn.wrap.loss_scale import TrainOneStepWithLossScaleCell,\
|
|
26
26
|
DynamicLossScaleUpdateCell, FixedLossScaleUpdateCell
|
|
27
|
-
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
|
|
27
|
+
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer, PipelineGradReducer
|
|
28
28
|
from mindspore.nn.layer.timedistributed import TimeDistributed
|
|
29
29
|
|
|
30
30
|
|
|
@@ -40,6 +40,7 @@ __all__ = [
|
|
|
40
40
|
"GetNextSingleOp",
|
|
41
41
|
"TrainOneStepWithLossScaleCell",
|
|
42
42
|
"DistributedGradReducer",
|
|
43
|
+
"PipelineGradReducer",
|
|
43
44
|
"ParameterUpdate",
|
|
44
45
|
"DynamicLossScaleUpdateCell",
|
|
45
46
|
"FixedLossScaleUpdateCell",
|
|
@@ -17,16 +17,18 @@
|
|
|
17
17
|
from __future__ import absolute_import
|
|
18
18
|
from __future__ import division
|
|
19
19
|
|
|
20
|
+
import os
|
|
20
21
|
from types import FunctionType, MethodType
|
|
21
22
|
|
|
22
23
|
from mindspore import log as logger
|
|
23
24
|
from mindspore.parallel._utils import _get_device_num, _get_gradients_mean,\
|
|
24
25
|
_get_parallel_mode, _get_enable_parallel_optimizer, _is_pynative_parallel
|
|
25
|
-
from mindspore.context import ParallelMode
|
|
26
|
+
from mindspore.context import ParallelMode, GRAPH_MODE, get_context
|
|
26
27
|
from mindspore import _checkparam as validator
|
|
27
28
|
from mindspore import ops, nn
|
|
28
29
|
from mindspore.common import dtype as mstype
|
|
29
30
|
from mindspore.common.parameter import Parameter, ParameterTuple
|
|
31
|
+
from mindspore.common.tensor import Tensor
|
|
30
32
|
from mindspore.ops.primitive import _primexpr
|
|
31
33
|
from mindspore.ops import composite as C
|
|
32
34
|
from mindspore.ops import functional as F
|
|
@@ -99,7 +101,7 @@ class WithLossCell(Cell):
|
|
|
99
101
|
>>> from mindspore import Tensor, nn
|
|
100
102
|
>>> import numpy as np
|
|
101
103
|
>>> # Define the network structure of LeNet5. Refer to
|
|
102
|
-
>>> # https://gitee.com/mindspore/docs/blob/
|
|
104
|
+
>>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
|
|
103
105
|
>>> net = LeNet5()
|
|
104
106
|
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
|
|
105
107
|
>>> net_with_criterion = nn.WithLossCell(net, loss_fn)
|
|
@@ -132,7 +134,7 @@ class WithLossCell(Cell):
|
|
|
132
134
|
Examples:
|
|
133
135
|
>>> from mindspore import nn
|
|
134
136
|
>>> # Define the network structure of LeNet5. Refer to
|
|
135
|
-
>>> # https://gitee.com/mindspore/docs/blob/
|
|
137
|
+
>>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
|
|
136
138
|
>>> net = LeNet5()
|
|
137
139
|
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
|
|
138
140
|
>>> net_with_criterion = nn.WithLossCell(net, loss_fn)
|
|
@@ -175,7 +177,7 @@ class WithGradCell(Cell):
|
|
|
175
177
|
>>> import mindspore as ms
|
|
176
178
|
>>> from mindspore import nn
|
|
177
179
|
>>> # Defined a network without loss function, taking LeNet5 as an example.
|
|
178
|
-
>>> # Refer to https://gitee.com/mindspore/docs/blob/
|
|
180
|
+
>>> # Refer to https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
|
|
179
181
|
>>> net = LeNet5()
|
|
180
182
|
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits()
|
|
181
183
|
>>> grad_net = nn.WithGradCell(net, loss_fn)
|
|
@@ -346,7 +348,7 @@ class TrainOneStepCell(Cell):
|
|
|
346
348
|
Examples:
|
|
347
349
|
>>> import mindspore.nn as nn
|
|
348
350
|
>>> # Define the network structure of LeNet5. Refer to
|
|
349
|
-
>>> # https://gitee.com/mindspore/docs/blob/
|
|
351
|
+
>>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
|
|
350
352
|
>>> net = LeNet5()
|
|
351
353
|
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits()
|
|
352
354
|
>>> optim = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
|
|
@@ -454,7 +456,7 @@ class GetNextSingleOp(Cell):
|
|
|
454
456
|
queue_name (str): Queue name to fetch the data.
|
|
455
457
|
|
|
456
458
|
Outputs:
|
|
457
|
-
tuple[Tensor], the data
|
|
459
|
+
tuple[Tensor], the data gets from Dataset.
|
|
458
460
|
|
|
459
461
|
Supported Platforms:
|
|
460
462
|
``Ascend`` ``GPU``
|
|
@@ -586,7 +588,7 @@ class MicroBatchInterleaved(Cell):
|
|
|
586
588
|
Examples:
|
|
587
589
|
>>> import mindspore.nn as nn
|
|
588
590
|
>>> # Define the network structure of LeNet5. Refer to
|
|
589
|
-
>>> # https://gitee.com/mindspore/docs/blob/
|
|
591
|
+
>>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
|
|
590
592
|
>>> net = LeNet5()
|
|
591
593
|
>>> net = nn.MicroBatchInterleaved(net, 2)
|
|
592
594
|
"""
|
|
@@ -619,7 +621,7 @@ class MicroBatchInterleaved(Cell):
|
|
|
619
621
|
|
|
620
622
|
class PipelineCell(Cell):
|
|
621
623
|
"""
|
|
622
|
-
|
|
624
|
+
Slice MiniBatch into finer-grained MicroBatch for use in pipeline-parallel training.
|
|
623
625
|
|
|
624
626
|
Note:
|
|
625
627
|
micro_size must be greater or equal to pipeline stages.
|
|
@@ -634,7 +636,7 @@ class PipelineCell(Cell):
|
|
|
634
636
|
Examples:
|
|
635
637
|
>>> import mindspore.nn as nn
|
|
636
638
|
>>> # Define the network structure of LeNet5. Refer to
|
|
637
|
-
>>> # https://gitee.com/mindspore/docs/blob/
|
|
639
|
+
>>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
|
|
638
640
|
>>> net = LeNet5()
|
|
639
641
|
>>> net = nn.PipelineCell(net, 4)
|
|
640
642
|
"""
|
|
@@ -685,7 +687,7 @@ class GradAccumulationCell(Cell):
|
|
|
685
687
|
Examples:
|
|
686
688
|
>>> import mindspore.nn as nn
|
|
687
689
|
>>> # Define the network structure of LeNet5. Refer to
|
|
688
|
-
>>> # https://gitee.com/mindspore/docs/blob/
|
|
690
|
+
>>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
|
|
689
691
|
>>> net = LeNet5()
|
|
690
692
|
>>> net = nn.GradAccumulationCell(net, 4)
|
|
691
693
|
"""
|
|
@@ -726,7 +728,7 @@ class GradAccumulationCell(Cell):
|
|
|
726
728
|
|
|
727
729
|
def _pipeline_clear_grad(accu_grad, grad):
|
|
728
730
|
accu_grad = F.depend(accu_grad, grad)
|
|
729
|
-
zeros = F.
|
|
731
|
+
zeros = F.zeros_like(accu_grad)
|
|
730
732
|
return F.assign(accu_grad, zeros)
|
|
731
733
|
|
|
732
734
|
|
|
@@ -740,6 +742,18 @@ class _TrainGradAccuStepCell(TrainOneStepCell):
|
|
|
740
742
|
self.hyper_map = ops.HyperMap()
|
|
741
743
|
self.opt_shard = _get_enable_parallel_optimizer()
|
|
742
744
|
self._get_attr_from_cell(network)
|
|
745
|
+
self.enable_mindio = False
|
|
746
|
+
mode = get_context("mode")
|
|
747
|
+
device_type = get_context("device_target")
|
|
748
|
+
if device_type != "Ascend" or mode != GRAPH_MODE:
|
|
749
|
+
return
|
|
750
|
+
graceful_exit = os.getenv("MS_ENABLE_MINDIO_GRACEFUL_EXIT")
|
|
751
|
+
ttp_lib_path = os.getenv("MS_MINDIO_TTP_LIB_PATH")
|
|
752
|
+
ttp_path_check = ttp_lib_path is not None and os.path.isfile(ttp_lib_path)
|
|
753
|
+
if graceful_exit == "true" and ttp_path_check:
|
|
754
|
+
self.g_one = Tensor([0.1])
|
|
755
|
+
self.allreduce_sum = ops.AllReduce()
|
|
756
|
+
self.enable_mindio = True
|
|
743
757
|
|
|
744
758
|
def construct(self, *inputs):
|
|
745
759
|
if not self.sense_flag:
|
|
@@ -748,6 +762,11 @@ class _TrainGradAccuStepCell(TrainOneStepCell):
|
|
|
748
762
|
sens = ops.fill(ops.DType()(loss), ops.Shape()(loss), self.sens)
|
|
749
763
|
grads = self.grad(self.network, self.weights)(*inputs, sens)
|
|
750
764
|
accu_grads = ops.depend(self.accu_grads, grads)
|
|
765
|
+
if self.enable_mindio:
|
|
766
|
+
g_one = ops.depend(self.g_one, accu_grads)
|
|
767
|
+
g_one_res = self.allreduce_sum(g_one)
|
|
768
|
+
accu_grads = ops.depend(accu_grads, g_one_res)
|
|
769
|
+
grads = ops.depend(grads, g_one_res)
|
|
751
770
|
if self.opt_shard:
|
|
752
771
|
succ = self.optimizer(grads)
|
|
753
772
|
else:
|
|
@@ -762,6 +781,11 @@ class _TrainGradAccuStepCell(TrainOneStepCell):
|
|
|
762
781
|
loss = self.network(*inputs)
|
|
763
782
|
grads = self.grad_no_sens(self.network, self.weights)(*inputs)
|
|
764
783
|
accu_grads = ops.depend(self.accu_grads, grads)
|
|
784
|
+
if self.enable_mindio:
|
|
785
|
+
g_one = ops.depend(self.g_one, accu_grads)
|
|
786
|
+
g_one_res = self.allreduce_sum(g_one)
|
|
787
|
+
accu_grads = ops.depend(accu_grads, g_one_res)
|
|
788
|
+
grads = ops.depend(grads, g_one_res)
|
|
765
789
|
if self.opt_shard:
|
|
766
790
|
succ = self.optimizer(grads)
|
|
767
791
|
else:
|
|
@@ -772,6 +796,27 @@ class _TrainGradAccuStepCell(TrainOneStepCell):
|
|
|
772
796
|
return loss
|
|
773
797
|
|
|
774
798
|
|
|
799
|
+
class AllreduceGraph(Cell):
|
|
800
|
+
"""
|
|
801
|
+
A allreduce graph to broadcast parameters.
|
|
802
|
+
"""
|
|
803
|
+
def __init__(self, inputs, group_name):
|
|
804
|
+
super(AllreduceGraph, self).__init__()
|
|
805
|
+
self.input_num = len(inputs)
|
|
806
|
+
self.inputs = inputs
|
|
807
|
+
self.allreduces = []
|
|
808
|
+
self.assigns = []
|
|
809
|
+
for _ in range(self.input_num):
|
|
810
|
+
self.allreduces.append(ops.AllReduce(op="sum", group=group_name))
|
|
811
|
+
self.assigns.append(ops.Assign())
|
|
812
|
+
|
|
813
|
+
def construct(self):
|
|
814
|
+
for i in range(self.input_num):
|
|
815
|
+
res = self.allreduces[i](self.inputs[i])
|
|
816
|
+
self.assigns[i](self.inputs[i], res)
|
|
817
|
+
return self.inputs
|
|
818
|
+
|
|
819
|
+
|
|
775
820
|
class VirtualDatasetCellTriple(Cell):
|
|
776
821
|
"""
|
|
777
822
|
Wrap the network with virtual dataset to convert data parallel layout to model parallel layout.
|
|
@@ -790,7 +835,7 @@ class VirtualDatasetCellTriple(Cell):
|
|
|
790
835
|
Examples:
|
|
791
836
|
>>> import mindspore.nn as nn
|
|
792
837
|
>>> # Define the network structure of LeNet5. Refer to
|
|
793
|
-
>>> # https://gitee.com/mindspore/docs/blob/
|
|
838
|
+
>>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
|
|
794
839
|
>>> net = LeNet5()
|
|
795
840
|
>>> net = nn.VirtualDatasetCellTriple(net)
|
|
796
841
|
"""
|
|
@@ -833,7 +878,7 @@ class WithEvalCell(Cell):
|
|
|
833
878
|
Examples:
|
|
834
879
|
>>> import mindspore.nn as nn
|
|
835
880
|
>>> # Define a forward network without loss function, taking LeNet5 as an example.
|
|
836
|
-
>>> # Refer to https://gitee.com/mindspore/docs/blob/
|
|
881
|
+
>>> # Refer to https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
|
|
837
882
|
>>> net = LeNet5()
|
|
838
883
|
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits()
|
|
839
884
|
>>> eval_net = nn.WithEvalCell(net, loss_fn)
|
|
@@ -18,17 +18,40 @@ from __future__ import absolute_import
|
|
|
18
18
|
from mindspore import context
|
|
19
19
|
from mindspore import log as logger
|
|
20
20
|
from mindspore.nn.cell import Cell
|
|
21
|
+
from mindspore.nn.layer import Identity
|
|
21
22
|
from mindspore.communication.management import GlobalComm, get_group_size
|
|
22
23
|
from mindspore.common.sparse_tensor import RowTensorInner
|
|
23
|
-
from mindspore.ops import functional as F, composite as C
|
|
24
|
+
from mindspore.ops import functional as F, composite as C, operations as P
|
|
24
25
|
from mindspore.ops.operations.comm_ops import AllReduce, AllGather
|
|
25
26
|
from mindspore.parallel._auto_parallel_context import auto_parallel_context
|
|
26
27
|
import mindspore.common.dtype as mstype
|
|
27
28
|
from mindspore.common.sparse_tensor import Tensor
|
|
28
29
|
from mindspore.common.api import jit
|
|
29
|
-
|
|
30
|
+
from mindspore.common.parameter import Parameter
|
|
31
|
+
from mindspore.parallel._utils import _get_enable_parallel_optimizer
|
|
30
32
|
|
|
31
33
|
reduce_opt = C.MultitypeFuncGraph("reduce_opt")
|
|
34
|
+
grad_scale = C.MultitypeFuncGraph("grad_scale")
|
|
35
|
+
shard_grad_scale = C.MultitypeFuncGraph("shard_grad_scale")
|
|
36
|
+
reciprocal = P.Reciprocal()
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
@grad_scale.register("Tensor", "Tensor", "Tensor")
|
|
40
|
+
def tensor_grad_scale_pipeline(scale, grad, accu_grad):
|
|
41
|
+
accu_grad = F.depend(accu_grad, grad)
|
|
42
|
+
new_grad = accu_grad * reciprocal(scale)
|
|
43
|
+
accu_grad = F.depend(accu_grad, new_grad)
|
|
44
|
+
zeros = F.tensor_mul(accu_grad, 0.0)
|
|
45
|
+
new_grad = F.depend(new_grad, F.assign(accu_grad, zeros))
|
|
46
|
+
return new_grad
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@shard_grad_scale.register("Tensor", "Tensor", "Tensor")
|
|
50
|
+
def tensor_shard_grad_scale_pipeline(scale, grad, accu_grad):
|
|
51
|
+
new_grad = grad * reciprocal(scale)
|
|
52
|
+
accu_grad = F.depend(accu_grad, new_grad)
|
|
53
|
+
new_grad = F.depend(new_grad, F.assign(accu_grad, F.zeros_like(accu_grad)))
|
|
54
|
+
return new_grad
|
|
32
55
|
|
|
33
56
|
|
|
34
57
|
def _init_allreduce_operators(length, split_indices, group=GlobalComm.WORLD_COMM_GROUP):
|
|
@@ -53,9 +76,6 @@ def _init_allreduce_operators(length, split_indices, group=GlobalComm.WORLD_COMM
|
|
|
53
76
|
for i in range(length):
|
|
54
77
|
op = AllReduce('sum', group)
|
|
55
78
|
op_fusion_id = fusion[i]
|
|
56
|
-
# When running in ge and enabled all_reduce_fusion_config, hccl will check the allreduce' fusion id to be -1
|
|
57
|
-
if context.get_context("enable_ge") and context.get_auto_parallel_context("all_reduce_fusion_config"):
|
|
58
|
-
op_fusion_id = -1
|
|
59
79
|
op.add_prim_attr('fusion', op_fusion_id)
|
|
60
80
|
op.add_prim_attr('index', index[i])
|
|
61
81
|
op_list = op_list + (op,)
|
|
@@ -315,14 +335,14 @@ class DistributedGradReducer(Cell):
|
|
|
315
335
|
|
|
316
336
|
For the Ascend devices, users need to prepare the rank table, set rank_id and device_id.
|
|
317
337
|
Please see the `rank table Startup
|
|
318
|
-
<https://www.mindspore.cn/tutorials/experts/en/
|
|
338
|
+
<https://www.mindspore.cn/tutorials/experts/en/master/parallel/rank_table.html>`_
|
|
319
339
|
for more details.
|
|
320
340
|
|
|
321
341
|
For the GPU devices, users need to prepare the host file and mpi, please see the `mpirun Startup
|
|
322
|
-
<https://www.mindspore.cn/tutorials/experts/en/
|
|
342
|
+
<https://www.mindspore.cn/tutorials/experts/en/master/parallel/mpirun.html>`_ .
|
|
323
343
|
|
|
324
344
|
For the CPU device, users need to write a dynamic cluster startup script, please see the `Dynamic Cluster
|
|
325
|
-
Startup <https://www.mindspore.cn/tutorials/experts/en/
|
|
345
|
+
Startup <https://www.mindspore.cn/tutorials/experts/en/master/parallel/dynamic_cluster.html>`_ .
|
|
326
346
|
|
|
327
347
|
This example should be run with multiple devices.
|
|
328
348
|
|
|
@@ -466,3 +486,123 @@ class DistributedGradReducer(Cell):
|
|
|
466
486
|
if context.get_context('mode') == context.GRAPH_MODE and parallel_mode in (
|
|
467
487
|
context.ParallelMode.SEMI_AUTO_PARALLEL, context.ParallelMode.AUTO_PARALLEL):
|
|
468
488
|
raise RuntimeError("{} can not use DistributedGradReducer in graph mode".format(parallel_mode))
|
|
489
|
+
|
|
490
|
+
|
|
491
|
+
class PipelineGradReducer(Cell):
|
|
492
|
+
"""
|
|
493
|
+
PipelineGradReducer is a gradient reducer for pipeline parallelism.
|
|
494
|
+
|
|
495
|
+
Args:
|
|
496
|
+
parameters (list): the parameters to be updated.
|
|
497
|
+
scale_sense (float): the scale sense of the gradient. Default: 1.0.
|
|
498
|
+
|
|
499
|
+
Raise:
|
|
500
|
+
RuntimeError: If the mode is not graph mode.
|
|
501
|
+
RuntimeError: If the parallel mode is not semi auto parallel or auto parallel.
|
|
502
|
+
|
|
503
|
+
Supported Platforms:
|
|
504
|
+
``Ascend`` ``GPU``
|
|
505
|
+
|
|
506
|
+
Examples:
|
|
507
|
+
.. note::
|
|
508
|
+
Before running the following examples, you need to configure the communication environment variables.
|
|
509
|
+
|
|
510
|
+
For the Ascend devices, users need to prepare the rank table, set rank_id and device_id.
|
|
511
|
+
Please see the `rank table Startup
|
|
512
|
+
<https://www.mindspore.cn/tutorials/experts/en/master/parallel/rank_table.html>`_
|
|
513
|
+
for more details.
|
|
514
|
+
|
|
515
|
+
For the GPU devices, users need to prepare the host file and mpi, please see the `mpirun Startup
|
|
516
|
+
<https://www.mindspore.cn/tutorials/experts/en/master/parallel/mpirun.html>`_ .
|
|
517
|
+
|
|
518
|
+
This example should be run with multiple devices.
|
|
519
|
+
|
|
520
|
+
>>> import numpy as np
|
|
521
|
+
>>> import mindspore as ms
|
|
522
|
+
>>> from mindspore import nn, ops, Tensor
|
|
523
|
+
>>> from mindspore.communication import init
|
|
524
|
+
>>>
|
|
525
|
+
>>> ms.set_context(mode=ms.GRAPH_MODE)
|
|
526
|
+
>>> ms.reset_auto_parallel_context()
|
|
527
|
+
>>> ms.set_auto_parallel_context(parallel_mode=ms.ParallelMode.SEMI_AUTO_PARALLEL, pipeline_stages=2)
|
|
528
|
+
>>> init()
|
|
529
|
+
>>> ms.set_seed(1)
|
|
530
|
+
>>>
|
|
531
|
+
>>> class Network(nn.Cell):
|
|
532
|
+
... def __init__(self, in_features, out_features, sens=1.0):
|
|
533
|
+
... super().__init__()
|
|
534
|
+
... self.layer1 = nn.Dense(in_features, 16)
|
|
535
|
+
... self.relu1 = nn.ReLU()
|
|
536
|
+
... self.layer2 = nn.Dense(16, 16)
|
|
537
|
+
... self.relu2 = nn.ReLU()
|
|
538
|
+
... self.layer3 = nn.Dense(16, out_features)
|
|
539
|
+
...
|
|
540
|
+
... def construct(self, x):
|
|
541
|
+
... x = self.layer1(x)
|
|
542
|
+
... x = self.relu1(x)
|
|
543
|
+
... x = self.layer2(x)
|
|
544
|
+
... x = self.relu2(x)
|
|
545
|
+
... logits = self.layer3(x)
|
|
546
|
+
... return logits
|
|
547
|
+
>>>
|
|
548
|
+
>>> size, in_features, out_features = 16, 32, 10
|
|
549
|
+
>>> net = Network(in_features, out_features)
|
|
550
|
+
>>> net.layer1.pipeline_stage = 0
|
|
551
|
+
>>> net.relu1.pipeline_stage = 0
|
|
552
|
+
>>> net.layer2.pipeline_stage = 0
|
|
553
|
+
>>> net.relu2.pipeline_stage = 1
|
|
554
|
+
>>> net.layer3.pipeline_stage = 1
|
|
555
|
+
>>> loss_fn = nn.CrossEntropyLoss()
|
|
556
|
+
>>> optimizer = nn.SGD(net.trainable_params(), 1e-2)
|
|
557
|
+
>>> net_with_loss = nn.PipelineCell(nn.WithLossCell(net, loss_fn), 2)
|
|
558
|
+
>>> net_with_loss.set_train()
|
|
559
|
+
>>> def forward_fn(inputs, target):
|
|
560
|
+
... loss = net_with_loss(inputs, target)
|
|
561
|
+
... return loss
|
|
562
|
+
>>>
|
|
563
|
+
>>> grad_fn = ops.value_and_grad(forward_fn, None, net_with_loss.trainable_params())
|
|
564
|
+
>>> pp_grad_reducer = nn.PipelineGradReducer(optimizer.parameters)
|
|
565
|
+
>>>
|
|
566
|
+
>>> @ms.jit
|
|
567
|
+
>>> def train_one_step(inputs, target):
|
|
568
|
+
... loss, grads = grad_fn(inputs, target)
|
|
569
|
+
... grads = pp_grad_reducer(grads)
|
|
570
|
+
... optimizer(grads)
|
|
571
|
+
... return loss, grads
|
|
572
|
+
>>>
|
|
573
|
+
>>> inputs = Tensor(np.ones([size, in_features]).astype(np.float32))
|
|
574
|
+
>>> label = Tensor(np.ones([size, out_features]).astype(np.float32))
|
|
575
|
+
>>> loss, _ = train_one_step(inputs, label)
|
|
576
|
+
>>> print(loss)
|
|
577
|
+
46.36721
|
|
578
|
+
"""
|
|
579
|
+
def __init__(self, parameters, scale_sense=1.0):
|
|
580
|
+
super(PipelineGradReducer, self).__init__(auto_prefix=False)
|
|
581
|
+
self._check_mode()
|
|
582
|
+
self.accu_grads = parameters.clone(prefix="accu_grads", init="zeros")
|
|
583
|
+
self.grad_reducer = Identity()
|
|
584
|
+
self.degree = Tensor(1, mstype.float32)
|
|
585
|
+
self.scale_sense = Parameter(scale_sense, name='scale_sense')
|
|
586
|
+
self.hyper_map = C.HyperMap()
|
|
587
|
+
self.opt_shard = _get_enable_parallel_optimizer()
|
|
588
|
+
|
|
589
|
+
@jit
|
|
590
|
+
def construct(self, grads):
|
|
591
|
+
new_grads = None
|
|
592
|
+
if self.opt_shard:
|
|
593
|
+
grads = self.grad_reducer(grads)
|
|
594
|
+
new_grads = self.hyper_map(F.partial(shard_grad_scale, self.scale_sense * self.degree),
|
|
595
|
+
grads, self.accu_grads)
|
|
596
|
+
else:
|
|
597
|
+
accu_grads = self.grad_reducer(self.accu_grads)
|
|
598
|
+
new_grads = self.hyper_map(F.partial(grad_scale, self.scale_sense * self.degree), grads, accu_grads)
|
|
599
|
+
return new_grads
|
|
600
|
+
|
|
601
|
+
def _check_mode(self):
|
|
602
|
+
"""check parallel mode"""
|
|
603
|
+
mode = context.get_context('mode')
|
|
604
|
+
if mode != context.GRAPH_MODE:
|
|
605
|
+
raise RuntimeError(f"PipelineGradReducer only support graph mode, but get {mode}")
|
|
606
|
+
parallel_mode = context.get_auto_parallel_context('parallel_mode')
|
|
607
|
+
if parallel_mode not in (context.ParallelMode.SEMI_AUTO_PARALLEL, context.ParallelMode.AUTO_PARALLEL):
|
|
608
|
+
raise RuntimeError(f"{parallel_mode} can not use PipelineGradReducer in graph mode")
|
mindspore/nn/wrap/loss_scale.py
CHANGED
|
@@ -29,6 +29,7 @@ from mindspore.ops.operations.math_ops import NPUGetFloatStatusV2, NPUClearFloat
|
|
|
29
29
|
from mindspore.ops import functional as F
|
|
30
30
|
from mindspore.ops import composite as C
|
|
31
31
|
from mindspore.ops import operations as P
|
|
32
|
+
from mindspore.ops.operations.nn_ops import AllFinite
|
|
32
33
|
from mindspore.common import dtype as mstype
|
|
33
34
|
from mindspore.common.api import jit
|
|
34
35
|
from mindspore._c_expression import MSContext
|
|
@@ -71,7 +72,7 @@ def _tensor_ascend_grad_overflow(grad):
|
|
|
71
72
|
status = ascend_grad_overflow(grad)
|
|
72
73
|
base = Tensor(1.0, dtype=mstype.float32)
|
|
73
74
|
output = base - status.all()
|
|
74
|
-
output = P.Reshape()(output, ((1,)))
|
|
75
|
+
output = P.Reshape()(output, ((-1,)))
|
|
75
76
|
return output
|
|
76
77
|
|
|
77
78
|
|
|
@@ -368,10 +369,19 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
|
|
|
368
369
|
self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE)
|
|
369
370
|
self.gpu_target = (context.get_context("device_target") == "GPU")
|
|
370
371
|
self.ascend_910a_target = (MSContext.get_instance().get_ascend_soc_version() == 'ascend910')
|
|
371
|
-
self.
|
|
372
|
+
self.ascend_910bc_target = (MSContext.get_instance().get_ascend_soc_version() in ['ascend910b', 'ascend910c'])
|
|
372
373
|
self.loss_scaling_manager = None
|
|
373
|
-
self.
|
|
374
|
-
|
|
374
|
+
self._ascend_check_overflow_mode = os.environ.get('MS_ASCEND_CHECK_OVERFLOW_MODE')
|
|
375
|
+
|
|
376
|
+
self.enable_allfinite = False
|
|
377
|
+
runtime_conf = os.environ.get('MS_DEV_RUNTIME_CONF')
|
|
378
|
+
global_jit_config = context.get_jit_config()
|
|
379
|
+
if runtime_conf is not None and ("all_finite:True" in runtime_conf or "all_finite:true" in runtime_conf):
|
|
380
|
+
self.enable_allfinite = True
|
|
381
|
+
elif runtime_conf is not None and ("all_finite:False" in runtime_conf or "all_finite:false" in runtime_conf):
|
|
382
|
+
self.enable_allfinite = False
|
|
383
|
+
elif global_jit_config:
|
|
384
|
+
self.enable_allfinite = global_jit_config["jit_level"] == "O0" or global_jit_config["jit_level"] == "O1"
|
|
375
385
|
|
|
376
386
|
if isinstance(scale_sense, Cell):
|
|
377
387
|
self.loss_scaling_manager = scale_sense
|
|
@@ -450,8 +460,8 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
|
|
|
450
460
|
is cleaned up when the function returns.
|
|
451
461
|
"""
|
|
452
462
|
status = Tensor([0] * 8, mstype.int32)
|
|
453
|
-
if self.ascend_910a_target or (self.
|
|
454
|
-
self.
|
|
463
|
+
if self.ascend_910a_target or (self.ascend_910bc_target and \
|
|
464
|
+
self._ascend_check_overflow_mode == "SATURATION_MODE"):
|
|
455
465
|
status = F.depend(status, pre_cond)
|
|
456
466
|
# clear overflow buffer
|
|
457
467
|
clear_status = NPUClearFloatStatusV2()(status)
|
|
@@ -478,6 +488,15 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
|
|
|
478
488
|
overflow = self.less_equal(self.base, flag_sum)
|
|
479
489
|
return overflow
|
|
480
490
|
|
|
491
|
+
def _get_distributed_overflow_status_on_infnan_enable_allfinite(self, compute_output):
|
|
492
|
+
"""check overflow status on infnan kernel mode."""
|
|
493
|
+
overflow = AllFinite()(compute_output)
|
|
494
|
+
|
|
495
|
+
if self.is_distributed:
|
|
496
|
+
overflow = P.Cast()(overflow, mstype.int8)
|
|
497
|
+
overflow = P.Cast()(self.allreduce(overflow), mstype.bool_)
|
|
498
|
+
return overflow
|
|
499
|
+
|
|
481
500
|
def _get_gpu_overflow_status(self, compute_output):
|
|
482
501
|
"""get overflow status of gpu."""
|
|
483
502
|
overflow = self._get_distributed_overflow_status_on_infnan_mode(_grad_overflow, compute_output)
|
|
@@ -485,7 +504,11 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
|
|
|
485
504
|
|
|
486
505
|
def _get_ascend_overflow_status_on_infnan_mode(self, compute_output):
|
|
487
506
|
"""get overflow status of ascend on infnan mode."""
|
|
488
|
-
overflow =
|
|
507
|
+
overflow = False
|
|
508
|
+
if self.enable_allfinite:
|
|
509
|
+
overflow = self._get_distributed_overflow_status_on_infnan_enable_allfinite(compute_output)
|
|
510
|
+
else:
|
|
511
|
+
overflow = self._get_distributed_overflow_status_on_infnan_mode(_ascend_grad_overflow, compute_output)
|
|
489
512
|
return overflow
|
|
490
513
|
|
|
491
514
|
def _get_ascend_overflow_status_on_saturation_mode(self, status, compute_output):
|
|
@@ -531,8 +554,8 @@ class TrainOneStepWithLossScaleCell(TrainOneStepCell):
|
|
|
531
554
|
"""
|
|
532
555
|
if self.gpu_target:
|
|
533
556
|
overflow = self._get_gpu_overflow_status(compute_output)
|
|
534
|
-
elif self.
|
|
535
|
-
if self.
|
|
557
|
+
elif self.ascend_910bc_target:
|
|
558
|
+
if self._ascend_check_overflow_mode == "SATURATION_MODE":
|
|
536
559
|
overflow = self._get_ascend_overflow_status_on_saturation_mode(status, compute_output)
|
|
537
560
|
else:
|
|
538
561
|
overflow = self._get_ascend_overflow_status_on_infnan_mode(compute_output)
|
mindspore/numpy/__init__.py
CHANGED
|
@@ -64,6 +64,8 @@ from mindspore.numpy.logic_ops import (not_equal, less_equal, less, greater_equa
|
|
|
64
64
|
logical_or, logical_xor, in1d, isin, isclose, signbit, sometrue,
|
|
65
65
|
array_equal, array_equiv, setdiff1d)
|
|
66
66
|
|
|
67
|
+
from . import fft
|
|
68
|
+
|
|
67
69
|
mod = remainder
|
|
68
70
|
fabs = absolute
|
|
69
71
|
round = around # pylint: disable=redefined-builtin
|
|
@@ -134,6 +134,8 @@ def asarray_const(a, dtype=None):
|
|
|
134
134
|
dtype = mstype.float32
|
|
135
135
|
elif dtype == mstype.int64:
|
|
136
136
|
dtype = mstype.int32
|
|
137
|
+
if a.size == 0:
|
|
138
|
+
a = Tensor_(a)
|
|
137
139
|
|
|
138
140
|
if isinstance(a, onp.ndarray) and dtype is None:
|
|
139
141
|
if a.dtype is onp.dtype('object'):
|
mindspore/numpy/array_ops.py
CHANGED
|
@@ -1137,7 +1137,7 @@ def _get_moved_perm(ndim, source, destination):
|
|
|
1137
1137
|
@_primexpr
|
|
1138
1138
|
def _get_moved_shape(shape, perm):
|
|
1139
1139
|
"""
|
|
1140
|
-
Helper function for moveaxis, returns the
|
|
1140
|
+
Helper function for moveaxis, returns the permuted shape after
|
|
1141
1141
|
applying perm.
|
|
1142
1142
|
"""
|
|
1143
1143
|
return tuple([shape[i] for i in perm])
|
|
@@ -1186,10 +1186,6 @@ def moveaxis(a, source, destination):
|
|
|
1186
1186
|
_raise_value_error('`source` and `destination` arguments must have the same number of elements')
|
|
1187
1187
|
perm = _get_moved_perm(ndim, source, destination)
|
|
1188
1188
|
|
|
1189
|
-
shape = F.shape(a)
|
|
1190
|
-
if _is_shape_empty(shape):
|
|
1191
|
-
return _empty(F.dtype(a), _get_moved_shape(shape, perm))
|
|
1192
|
-
|
|
1193
1189
|
return F.transpose(a, perm)
|
|
1194
1190
|
|
|
1195
1191
|
|
|
@@ -2610,7 +2606,11 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
|
|
|
2610
2606
|
array1 = ar1.ravel()
|
|
2611
2607
|
array2 = ar2.ravel()
|
|
2612
2608
|
concat_array = concatenate((array1, array2))
|
|
2613
|
-
|
|
2609
|
+
if return_indices:
|
|
2610
|
+
concat_sort_indices = F.argsort(concat_array)
|
|
2611
|
+
concat_array = concat_array[concat_sort_indices]
|
|
2612
|
+
else:
|
|
2613
|
+
concat_array, concat_sort_indices = concat_array.sort()
|
|
2614
2614
|
|
|
2615
2615
|
mask_res = concat_array[1:] == concat_array[:-1]
|
|
2616
2616
|
res = F.masked_select(concat_array[1:], mask_res)
|
mindspore/numpy/dtypes.py
CHANGED
|
@@ -86,7 +86,7 @@ dtype_map = {
|
|
|
86
86
|
}
|
|
87
87
|
|
|
88
88
|
all_types = [
|
|
89
|
-
'np.
|
|
89
|
+
'np.int_',
|
|
90
90
|
'np.int8',
|
|
91
91
|
'np.int16',
|
|
92
92
|
'np.int32',
|
|
@@ -96,11 +96,11 @@ all_types = [
|
|
|
96
96
|
'np.uint16',
|
|
97
97
|
'np.uint32',
|
|
98
98
|
'np.uint64',
|
|
99
|
-
'np.
|
|
99
|
+
'np.float_',
|
|
100
100
|
'np.float16',
|
|
101
101
|
'np.float32',
|
|
102
102
|
'np.float64',
|
|
103
|
-
'np.
|
|
103
|
+
'np.bool_']
|
|
104
104
|
|
|
105
105
|
promotion_rule = {
|
|
106
106
|
(uint8, uint16): uint16,
|