mindspore 2.0.0rc1__cp38-cp38-manylinux1_x86_64.whl → 2.2.0__cp38-cp38-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Third_Party_Open_Source_Software_Notice +2 -2
- mindspore/__init__.py +5 -2
- mindspore/_akg/akg/build_module.py +5 -6
- mindspore/_akg/akg/composite/build_module.py +49 -16
- mindspore/_akg/akg/composite/split_stitch.py +10 -11
- mindspore/_akg/akg/config/repository.json +195 -0
- mindspore/_akg/akg/global_configs.py +5 -1
- mindspore/_akg/akg/ms/info_version_adapt.py +67 -1
- mindspore/_akg/akg/tvm/api.py +4 -3
- mindspore/_akg/akg/tvm/autotvm/__init__.py +1 -2
- mindspore/_akg/akg/tvm/autotvm/graph_tuner/base_graph_tuner.py +1 -5
- mindspore/_akg/akg/tvm/autotvm/measure/__init__.py +1 -1
- mindspore/_akg/akg/tvm/autotvm/measure/measure.py +1 -10
- mindspore/_akg/akg/tvm/autotvm/measure/measure_methods.py +1 -372
- mindspore/_akg/akg/tvm/build_module.py +16 -1
- mindspore/_akg/akg/tvm/contrib/graph_runtime.py +0 -53
- mindspore/_akg/akg/tvm/hybrid/parser.py +7 -6
- mindspore/_akg/akg/tvm/ir_builder.py +1 -1
- mindspore/_akg/akg/tvm/module.py +1 -2
- mindspore/_akg/akg/tvm/stmt.py +2 -2
- mindspore/_akg/akg/utils/composite_op_helper.py +9 -10
- mindspore/_akg/akg/utils/kernel_exec.py +58 -260
- mindspore/_akg/akg/utils/op_dsl.py +17 -1
- mindspore/_akg/akg/utils/result_analysis.py +4 -24
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +198 -0
- mindspore/_c_dataengine.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/_check_jit_forbidden_api.py +5 -1
- mindspore/_checkparam.py +79 -62
- mindspore/_extends/graph_kernel/__init__.py +0 -1
- mindspore/_extends/graph_kernel/model/graph_split.py +2 -0
- mindspore/_extends/graph_kernel/model/model_builder.py +9 -50
- mindspore/_extends/graph_kernel/splitter.py +1 -9
- mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +128 -21
- mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +2 -2
- mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +4 -2
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +18 -13
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +13 -9
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +1 -1
- mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +1 -1
- mindspore/_extends/parse/__init__.py +19 -17
- mindspore/_extends/parse/namespace.py +7 -36
- mindspore/_extends/parse/parser.py +375 -189
- mindspore/_extends/parse/resources.py +36 -41
- mindspore/_extends/parse/standard_method.py +350 -245
- mindspore/_extends/parse/trope.py +2 -12
- mindspore/_extends/remote/kernel_build_server.py +24 -7
- mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
- mindspore/_install_custom.py +43 -0
- mindspore/_mindspore_offline_debug.cpython-38-x86_64-linux-gnu.so +0 -0
- mindspore/amp.py +85 -19
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/base.py +2 -2
- mindspore/boost/boost.py +27 -32
- mindspore/boost/boost_cell_wrapper.py +37 -13
- mindspore/boost/grad_accumulation.py +1 -1
- mindspore/boost/grad_freeze.py +34 -6
- mindspore/boost/group_loss_scale_manager.py +15 -14
- mindspore/boost/less_batch_normalization.py +28 -3
- mindspore/common/__init__.py +15 -11
- mindspore/common/_auto_dynamic.py +68 -0
- mindspore/common/_jit_fallback_utils.py +111 -0
- mindspore/common/_register_for_adapter.py +17 -5
- mindspore/common/_register_for_tensor.py +2 -2
- mindspore/common/_stub_tensor.py +18 -15
- mindspore/common/_utils.py +31 -7
- mindspore/common/api.py +269 -101
- mindspore/common/auto_dynamic_shape.py +498 -0
- mindspore/common/dtype.py +61 -21
- mindspore/common/dump.py +9 -7
- mindspore/common/initializer.py +106 -76
- mindspore/common/jit_config.py +35 -14
- mindspore/common/lazy_inline.py +187 -0
- mindspore/common/mindir_util.py +101 -0
- mindspore/common/mutable.py +10 -13
- mindspore/common/parameter.py +246 -55
- mindspore/common/seed.py +13 -7
- mindspore/common/sparse_tensor.py +29 -33
- mindspore/common/tensor.py +907 -251
- mindspore/communication/__init__.py +7 -4
- mindspore/communication/_comm_helper.py +84 -4
- mindspore/communication/management.py +160 -88
- mindspore/config/op_info.config +99 -75
- mindspore/config/super_bar_config.json +36 -4
- mindspore/context.py +526 -219
- mindspore/dataset/__init__.py +9 -46
- mindspore/dataset/audio/__init__.py +4 -19
- mindspore/dataset/audio/transforms.py +545 -233
- mindspore/dataset/audio/utils.py +21 -18
- mindspore/dataset/callback/ds_callback.py +42 -13
- mindspore/dataset/core/config.py +158 -100
- mindspore/dataset/core/validator_helpers.py +1 -63
- mindspore/dataset/debug/debug_hook.py +45 -13
- mindspore/dataset/debug/pre_defined_hook.py +5 -5
- mindspore/dataset/engine/__init__.py +0 -5
- mindspore/dataset/engine/cache_client.py +38 -15
- mindspore/dataset/engine/datasets.py +615 -278
- mindspore/dataset/engine/datasets_audio.py +154 -283
- mindspore/dataset/engine/datasets_standard_format.py +104 -116
- mindspore/dataset/engine/datasets_text.py +443 -326
- mindspore/dataset/engine/datasets_user_defined.py +251 -164
- mindspore/dataset/engine/datasets_vision.py +839 -1443
- mindspore/dataset/engine/iterators.py +11 -4
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +7 -3
- mindspore/dataset/engine/obs/util.py +3 -0
- mindspore/dataset/engine/offload.py +6 -6
- mindspore/dataset/engine/queue.py +15 -14
- mindspore/dataset/engine/samplers.py +39 -23
- mindspore/dataset/engine/serializer_deserializer.py +22 -6
- mindspore/dataset/engine/validators.py +21 -331
- mindspore/dataset/text/__init__.py +5 -33
- mindspore/dataset/text/transforms.py +334 -165
- mindspore/dataset/text/utils.py +215 -145
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/transforms/c_transforms.py +3 -2
- mindspore/dataset/transforms/py_transforms_util.py +40 -12
- mindspore/dataset/transforms/transforms.py +174 -71
- mindspore/dataset/utils/browse_dataset.py +25 -17
- mindspore/dataset/utils/line_reader.py +24 -21
- mindspore/dataset/vision/__init__.py +5 -26
- mindspore/dataset/vision/c_transforms.py +177 -165
- mindspore/dataset/vision/py_transforms.py +114 -119
- mindspore/dataset/vision/py_transforms_util.py +54 -51
- mindspore/dataset/vision/transforms.py +1127 -381
- mindspore/dataset/vision/utils.py +54 -38
- mindspore/dataset/vision/validators.py +12 -2
- mindspore/experimental/map_parameter.py +38 -4
- mindspore/{dataset/datapreprocess → experimental/optim}/__init__.py +14 -4
- mindspore/experimental/optim/adam.py +192 -0
- mindspore/experimental/optim/adamw.py +181 -0
- mindspore/experimental/optim/lr_scheduler.py +1427 -0
- mindspore/experimental/optim/optimizer.py +252 -0
- mindspore/experimental/optim/sgd.py +147 -0
- mindspore/gen_ops.py +273 -0
- mindspore/include/OWNERS +1 -2
- mindspore/include/api/context.h +21 -1
- mindspore/include/api/data_type.h +2 -1
- mindspore/include/api/graph.h +0 -15
- mindspore/include/api/kernel.h +2 -0
- mindspore/include/api/kernel_api.h +37 -12
- mindspore/include/api/model.h +29 -42
- mindspore/include/api/model_group.h +14 -3
- mindspore/include/api/model_parallel_runner.h +18 -2
- mindspore/include/api/serialization.h +26 -0
- mindspore/include/api/status.h +1 -0
- mindspore/include/api/types.h +38 -4
- mindspore/include/c_api/ms/abstract.h +67 -0
- mindspore/include/c_api/ms/attribute.h +197 -0
- mindspore/include/c_api/ms/base/handle_types.h +43 -0
- mindspore/include/c_api/ms/base/macros.h +32 -0
- mindspore/include/c_api/ms/base/status.h +33 -0
- mindspore/include/c_api/ms/base/types.h +282 -0
- mindspore/include/c_api/ms/context.h +102 -0
- mindspore/include/c_api/ms/graph.h +160 -0
- mindspore/include/c_api/ms/node.h +606 -0
- mindspore/include/c_api/ms/tensor.h +161 -0
- mindspore/include/c_api/ms/value.h +84 -0
- mindspore/include/c_api/status_c.h +3 -0
- mindspore/include/dataset/constants.h +6 -12
- mindspore/include/dataset/execute.h +23 -13
- mindspore/include/dataset/text.h +26 -26
- mindspore/include/dataset/transforms.h +25 -31
- mindspore/include/dataset/vision.h +60 -60
- mindspore/include/dataset/vision_ascend.h +5 -6
- mindspore/include/dataset/vision_lite.h +17 -17
- mindspore/include/mindapi/base/format.h +0 -1
- mindspore/include/mindapi/base/type_id.h +2 -1
- mindspore/include/mindapi/base/types.h +5 -1
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libjemalloc.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libmpi_adapter.so +0 -0
- mindspore/lib/libnnacl.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/libps_cache.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +9000 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libhccl_plugin.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/cpu/libakg.so +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
- mindspore/lib/plugin/gpu10.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu10.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.1/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.1/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/gpu11.6/libakg.so +0 -0
- mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.6/libnvidia_collective.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
- mindspore/log.py +9 -6
- mindspore/mindrecord/filereader.py +33 -4
- mindspore/mindrecord/filewriter.py +70 -35
- mindspore/mindrecord/mindpage.py +40 -34
- mindspore/mindrecord/shardreader.py +1 -1
- mindspore/mindrecord/shardsegment.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +25 -18
- mindspore/mindrecord/tools/cifar10_to_mr.py +25 -18
- mindspore/mindrecord/tools/csv_to_mr.py +29 -13
- mindspore/mindrecord/tools/imagenet_to_mr.py +24 -10
- mindspore/mindrecord/tools/mnist_to_mr.py +24 -11
- mindspore/mindrecord/tools/tfrecord_to_mr.py +31 -26
- mindspore/nn/cell.py +463 -169
- mindspore/nn/dynamic_lr.py +47 -43
- mindspore/nn/layer/activation.py +225 -82
- mindspore/nn/layer/basic.py +121 -79
- mindspore/nn/layer/channel_shuffle.py +21 -21
- mindspore/nn/layer/combined.py +33 -26
- mindspore/nn/layer/container.py +277 -22
- mindspore/nn/layer/conv.py +441 -304
- mindspore/nn/layer/dense.py +19 -13
- mindspore/nn/layer/embedding.py +62 -49
- mindspore/nn/layer/flash_attention.py +264 -0
- mindspore/nn/layer/image.py +50 -39
- mindspore/nn/layer/math.py +62 -51
- mindspore/nn/layer/normalization.py +219 -167
- mindspore/nn/layer/padding.py +58 -70
- mindspore/nn/layer/pooling.py +334 -287
- mindspore/nn/layer/rnn_cells.py +53 -38
- mindspore/nn/layer/rnns.py +59 -56
- mindspore/nn/layer/thor_layer.py +52 -44
- mindspore/nn/layer/timedistributed.py +6 -4
- mindspore/nn/layer/transformer.py +284 -164
- mindspore/nn/learning_rate_schedule.py +34 -25
- mindspore/nn/loss/__init__.py +3 -2
- mindspore/nn/loss/loss.py +554 -311
- mindspore/nn/optim/ada_grad.py +12 -9
- mindspore/nn/optim/adadelta.py +14 -11
- mindspore/nn/optim/adafactor.py +19 -16
- mindspore/nn/optim/adam.py +62 -47
- mindspore/nn/optim/adamax.py +13 -10
- mindspore/nn/optim/adasum.py +12 -8
- mindspore/nn/optim/asgd.py +10 -9
- mindspore/nn/optim/ftrl.py +20 -17
- mindspore/nn/optim/lamb.py +16 -12
- mindspore/nn/optim/lars.py +8 -6
- mindspore/nn/optim/lazyadam.py +25 -20
- mindspore/nn/optim/momentum.py +10 -7
- mindspore/nn/optim/optimizer.py +61 -9
- mindspore/nn/optim/proximal_ada_grad.py +14 -13
- mindspore/nn/optim/rmsprop.py +17 -13
- mindspore/nn/optim/rprop.py +30 -17
- mindspore/nn/optim/sgd.py +40 -23
- mindspore/nn/optim/thor.py +24 -26
- mindspore/nn/probability/bijector/bijector.py +11 -11
- mindspore/nn/probability/bijector/exp.py +1 -1
- mindspore/nn/probability/bijector/gumbel_cdf.py +3 -3
- mindspore/nn/probability/bijector/invert.py +1 -1
- mindspore/nn/probability/bijector/power_transform.py +29 -29
- mindspore/nn/probability/bijector/scalar_affine.py +3 -3
- mindspore/nn/probability/bijector/softplus.py +5 -5
- mindspore/nn/probability/bnn_layers/bnn_cell_wrapper.py +4 -2
- mindspore/nn/probability/bnn_layers/conv_variational.py +13 -13
- mindspore/nn/probability/bnn_layers/dense_variational.py +12 -12
- mindspore/nn/probability/bnn_layers/layer_distribution.py +9 -8
- mindspore/nn/probability/distribution/_utils/custom_ops.py +19 -3
- mindspore/nn/probability/distribution/_utils/utils.py +1 -1
- mindspore/nn/probability/distribution/bernoulli.py +9 -9
- mindspore/nn/probability/distribution/beta.py +8 -8
- mindspore/nn/probability/distribution/categorical.py +23 -15
- mindspore/nn/probability/distribution/cauchy.py +5 -6
- mindspore/nn/probability/distribution/distribution.py +3 -3
- mindspore/nn/probability/distribution/exponential.py +4 -4
- mindspore/nn/probability/distribution/gamma.py +10 -10
- mindspore/nn/probability/distribution/geometric.py +8 -8
- mindspore/nn/probability/distribution/gumbel.py +8 -9
- mindspore/nn/probability/distribution/half_normal.py +5 -5
- mindspore/nn/probability/distribution/laplace.py +5 -5
- mindspore/nn/probability/distribution/log_normal.py +12 -11
- mindspore/nn/probability/distribution/logistic.py +8 -8
- mindspore/nn/probability/distribution/normal.py +6 -5
- mindspore/nn/probability/distribution/poisson.py +10 -11
- mindspore/nn/probability/distribution/student_t.py +8 -9
- mindspore/nn/probability/distribution/transformed_distribution.py +5 -5
- mindspore/nn/probability/distribution/uniform.py +11 -11
- mindspore/nn/reinforcement/tensor_array.py +2 -2
- mindspore/nn/sparse/sparse.py +9 -9
- mindspore/nn/wrap/cell_wrapper.py +188 -63
- mindspore/nn/wrap/grad_reducer.py +21 -12
- mindspore/nn/wrap/loss_scale.py +136 -49
- mindspore/numpy/__init__.py +4 -4
- mindspore/numpy/array_creations.py +55 -56
- mindspore/numpy/array_ops.py +134 -35
- mindspore/numpy/logic_ops.py +66 -20
- mindspore/numpy/math_ops.py +142 -139
- mindspore/numpy/utils_const.py +2 -2
- mindspore/offline_debug/convert_async.py +2 -2
- mindspore/ops/_grad_experimental/__init__.py +7 -5
- mindspore/ops/_grad_experimental/grad_array_ops.py +231 -348
- mindspore/ops/{_grad → _grad_experimental}/grad_base.py +1 -33
- mindspore/ops/{_grad → _grad_experimental}/grad_comm_ops.py +25 -13
- mindspore/ops/{_grad/__init__.py → _grad_experimental/grad_debug_ops.py} +15 -7
- mindspore/ops/{_grad → _grad_experimental}/grad_implementations.py +17 -11
- mindspore/ops/_grad_experimental/grad_inner_ops.py +33 -52
- mindspore/ops/_grad_experimental/grad_math_ops.py +151 -1224
- mindspore/ops/_grad_experimental/grad_nn_ops.py +141 -414
- mindspore/ops/{_grad → _grad_experimental}/grad_quant_ops.py +10 -6
- mindspore/ops/_grad_experimental/grad_sparse.py +317 -2
- mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -13
- mindspore/ops/{_grad → _grad_experimental}/taylor_rule.py +1 -1
- mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +1 -1
- mindspore/ops/_op_impl/_custom_op/flash_attention/__init__.py +0 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +406 -0
- mindspore/{_extends/graph_kernel/expanders/complex/__init__.py → ops/_op_impl/_custom_op/flash_attention/constants.py} +27 -8
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +467 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +563 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +193 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +435 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/__init__.py +0 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/sparse_tiling.py +45 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/strategy.py +67 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +62 -0
- mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +2 -2
- mindspore/ops/_op_impl/aicpu/__init__.py +41 -1
- mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d.py +37 -0
- mindspore/ops/_op_impl/aicpu/bias_add_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/cast.py +52 -0
- mindspore/ops/_op_impl/aicpu/coalesce.py +2 -0
- mindspore/ops/_op_impl/aicpu/col2im.py +3 -1
- mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
- mindspore/ops/_op_impl/aicpu/dropout_genmask.py +6 -0
- mindspore/ops/_op_impl/aicpu/eps.py +32 -0
- mindspore/ops/_op_impl/aicpu/eye.py +4 -4
- mindspore/ops/_op_impl/aicpu/fft_with_size.py +6 -0
- mindspore/ops/_op_impl/aicpu/fill_diagonal.py +5 -0
- mindspore/ops/_op_impl/aicpu/gamma.py +2 -2
- mindspore/ops/_op_impl/aicpu/im2col.py +3 -5
- mindspore/ops/_op_impl/aicpu/lgamma.py +1 -0
- mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +6 -3
- mindspore/ops/_op_impl/aicpu/lu.py +39 -0
- mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/masked_scatter.py +1 -0
- mindspore/ops/_op_impl/aicpu/masked_select_grad.py +3 -0
- mindspore/ops/_op_impl/aicpu/matrix_band_part.py +59 -0
- mindspore/ops/_op_impl/aicpu/matrix_power.py +6 -1
- mindspore/ops/_op_impl/aicpu/median.py +1 -0
- mindspore/ops/_op_impl/aicpu/multinomial.py +9 -9
- mindspore/ops/_op_impl/aicpu/not_equal.py +0 -5
- mindspore/ops/_op_impl/aicpu/pad_v3.py +3 -1
- mindspore/ops/_op_impl/aicpu/pad_v3_grad.py +2 -0
- mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +15 -7
- mindspore/ops/_op_impl/aicpu/random_categorical.py +39 -19
- mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +5 -2
- mindspore/ops/_op_impl/aicpu/random_poisson.py +103 -52
- mindspore/ops/_op_impl/aicpu/random_shuffle.py +17 -15
- mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py +0 -1
- mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2.py +0 -6
- mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2_grad.py +0 -7
- mindspore/ops/_op_impl/aicpu/scatter_nd.py +2 -0
- mindspore/ops/_op_impl/aicpu/sequence_concat.py +40 -0
- mindspore/ops/_op_impl/aicpu/sequence_stack.py +40 -0
- mindspore/ops/_op_impl/aicpu/{sparseaddmm.py → sparse_addmm.py} +2 -2
- mindspore/ops/_op_impl/aicpu/{sparsesparsemaximum.py → sparse_sparse_maximum.py} +4 -4
- mindspore/ops/_op_impl/aicpu/standard_laplace.py +5 -4
- mindspore/ops/_op_impl/aicpu/standard_normal.py +5 -4
- mindspore/ops/_op_impl/aicpu/truncated_normal.py +9 -7
- mindspore/ops/_op_impl/aicpu/uniform.py +5 -3
- mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +8 -4
- mindspore/ops/_op_impl/aicpu/uniform_int.py +5 -5
- mindspore/ops/_op_impl/aicpu/uniform_real.py +4 -4
- mindspore/ops/_op_impl/aicpu/upsample_nearest_3d.py +14 -6
- mindspore/ops/_op_impl/aicpu/upsample_nearest_3d_grad.py +22 -8
- mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d.py +11 -6
- mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d_grad.py +21 -10
- mindspore/ops/_op_impl/tbe/__init__.py +6 -4
- mindspore/ops/_op_impl/tbe/atomic_addr_clean.py +1 -1
- mindspore/ops/_op_impl/tbe/avg_pool.py +2 -2
- mindspore/ops/_op_impl/tbe/avg_pool_3d.py +3 -3
- mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +4 -4
- mindspore/ops/_op_impl/tbe/avg_pool_ds.py +2 -2
- mindspore/ops/_op_impl/tbe/avg_pool_grad.py +3 -3
- mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +3 -3
- mindspore/ops/_op_impl/tbe/batch_to_space.py +1 -1
- mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +2 -2
- mindspore/ops/_op_impl/tbe/bn_infer.py +2 -2
- mindspore/ops/_op_impl/tbe/bn_infer_ds.py +3 -2
- mindspore/ops/_op_impl/tbe/broadcast_to.py +1 -1
- mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +3 -3
- mindspore/ops/_op_impl/tbe/expand_dims.py +1 -1
- mindspore/ops/_op_impl/tbe/gather_v2.py +56 -0
- mindspore/ops/_op_impl/tbe/im2col.py +4 -4
- mindspore/ops/_op_impl/tbe/inplace_index_add.py +7 -3
- mindspore/ops/_op_impl/tbe/mem_set.py +38 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_add.py +3 -0
- mindspore/ops/_op_impl/tbe/scatter_nd_d.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch.py +1 -1
- mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +2 -2
- mindspore/ops/_op_impl/tbe/trans_data_ds.py +2 -0
- mindspore/ops/_primitive_cache.py +1 -1
- mindspore/ops/_tracefunc.py +241 -0
- mindspore/ops/_utils/utils.py +10 -2
- mindspore/ops/_vmap/vmap_array_ops.py +5 -3
- mindspore/ops/_vmap/vmap_base.py +5 -4
- mindspore/ops/_vmap/vmap_convolution_ops.py +1 -1
- mindspore/ops/_vmap/vmap_grad_math_ops.py +6 -4
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +11 -6
- mindspore/ops/_vmap/vmap_math_ops.py +5 -2
- mindspore/ops/_vmap/vmap_nn_ops.py +135 -11
- mindspore/ops/arg_dtype_cast.py +54 -0
- mindspore/ops/composite/__init__.py +7 -5
- mindspore/ops/composite/base.py +78 -34
- mindspore/ops/composite/math_ops.py +5 -695
- mindspore/ops/composite/multitype_ops/_compile_utils.py +403 -97
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +28 -22
- mindspore/ops/composite/multitype_ops/add_impl.py +69 -7
- mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +2 -1
- mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/div_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +48 -10
- mindspore/ops/composite/multitype_ops/greater_equal_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/greater_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/left_shift_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/less_equal_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/less_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/logic_not_impl.py +2 -2
- mindspore/ops/composite/multitype_ops/mod_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/mul_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/negative_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/ones_like_impl.py +6 -0
- mindspore/ops/composite/multitype_ops/pow_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/right_shift_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +10 -7
- mindspore/ops/composite/multitype_ops/sub_impl.py +1 -0
- mindspore/ops/composite/multitype_ops/uadd_impl.py +2 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +9 -0
- mindspore/ops/deprecated.py +304 -0
- mindspore/ops/function/__init__.py +41 -4
- mindspore/ops/function/array_func.py +1108 -467
- mindspore/ops/function/clip_func.py +94 -27
- mindspore/ops/function/debug_func.py +3 -1
- mindspore/ops/function/grad/grad_func.py +82 -73
- mindspore/ops/function/image_func.py +28 -12
- mindspore/ops/function/linalg_func.py +135 -39
- mindspore/ops/function/math_func.py +3779 -894
- mindspore/ops/function/nn_func.py +1584 -657
- mindspore/ops/function/parameter_func.py +13 -3
- mindspore/ops/function/random_func.py +247 -153
- mindspore/ops/function/sparse_func.py +14 -11
- mindspore/ops/function/sparse_unary_func.py +173 -47
- mindspore/ops/function/spectral_func.py +8 -4
- mindspore/ops/function/vmap_func.py +8 -7
- mindspore/ops/functional.py +47 -16
- mindspore/ops/op_info_register.py +346 -86
- mindspore/ops/operations/__init__.py +38 -22
- mindspore/ops/operations/_grad_ops.py +145 -149
- mindspore/ops/operations/_inner_ops.py +298 -56
- mindspore/ops/operations/_ms_kernel.py +3 -3
- mindspore/ops/operations/_quant_ops.py +24 -28
- mindspore/ops/operations/_rl_inner_ops.py +9 -7
- mindspore/ops/operations/_scalar_ops.py +115 -0
- mindspore/ops/operations/_sequence_ops.py +148 -10
- mindspore/ops/operations/_tensor_array.py +1 -1
- mindspore/ops/operations/_thor_ops.py +2 -2
- mindspore/ops/operations/array_ops.py +1239 -561
- mindspore/ops/operations/comm_ops.py +166 -90
- mindspore/ops/operations/control_ops.py +3 -3
- mindspore/ops/operations/custom_ops.py +124 -102
- mindspore/ops/operations/debug_ops.py +24 -11
- mindspore/ops/operations/image_ops.py +86 -71
- mindspore/ops/operations/inner_ops.py +18 -13
- mindspore/ops/operations/linalg_ops.py +30 -11
- mindspore/ops/operations/math_ops.py +1730 -435
- mindspore/ops/operations/nn_ops.py +1953 -943
- mindspore/ops/operations/other_ops.py +65 -43
- mindspore/ops/operations/random_ops.py +258 -98
- mindspore/ops/operations/rl_ops.py +4 -36
- mindspore/ops/operations/sparse_ops.py +38 -33
- mindspore/ops/operations/spectral_ops.py +8 -4
- mindspore/ops/primitive.py +66 -44
- mindspore/ops/signature.py +5 -5
- mindspore/parallel/_auto_parallel_context.py +80 -19
- mindspore/parallel/_cost_model_context.py +42 -0
- mindspore/parallel/_offload_context.py +162 -72
- mindspore/parallel/_parallel_serialization.py +2 -2
- mindspore/parallel/_ps_context.py +16 -4
- mindspore/parallel/_recovery_context.py +2 -1
- mindspore/parallel/_tensor.py +15 -13
- mindspore/parallel/_transformer/layers.py +8 -6
- mindspore/parallel/_transformer/loss.py +1 -0
- mindspore/parallel/_transformer/moe.py +7 -7
- mindspore/parallel/_transformer/op_parallel_config.py +12 -1
- mindspore/parallel/_transformer/transformer.py +34 -14
- mindspore/parallel/_utils.py +36 -14
- mindspore/parallel/algo_parameter_config.py +114 -20
- mindspore/parallel/checkpoint_transform.py +16 -18
- mindspore/parallel/shard.py +16 -13
- mindspore/profiler/__init__.py +1 -1
- mindspore/profiler/common/struct_type.py +3 -3
- mindspore/profiler/common/util.py +3 -2
- mindspore/profiler/envprofiling.py +11 -4
- mindspore/profiler/parser/aicpu_data_parser.py +5 -3
- mindspore/profiler/parser/ascend_flops_generator.py +94 -0
- mindspore/profiler/parser/ascend_fpbp_generator.py +76 -0
- mindspore/profiler/parser/ascend_hccl_generator.py +288 -0
- mindspore/profiler/parser/ascend_msprof_exporter.py +213 -0
- mindspore/profiler/parser/ascend_msprof_generator.py +199 -0
- mindspore/profiler/parser/ascend_op_generator.py +276 -0
- mindspore/profiler/parser/ascend_steptrace_generator.py +94 -0
- mindspore/profiler/parser/ascend_timeline_generator.py +110 -54
- mindspore/profiler/parser/base_timeline_generator.py +11 -7
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +45 -46
- mindspore/profiler/parser/flops_parser.py +15 -11
- mindspore/profiler/parser/framework_parser.py +92 -73
- mindspore/profiler/parser/hccl_parser.py +16 -12
- mindspore/profiler/parser/integrator.py +22 -11
- mindspore/profiler/parser/memory_usage_parser.py +36 -11
- mindspore/profiler/parser/minddata_analyzer.py +12 -14
- mindspore/profiler/parser/minddata_pipeline_parser.py +1 -1
- mindspore/profiler/parser/msadvisor_parser.py +8 -4
- mindspore/profiler/parser/op_intermediate_parser.py +5 -2
- mindspore/profiler/parser/optime_parser.py +1 -1
- mindspore/profiler/parser/profiler_info.py +4 -5
- mindspore/profiler/parser/step_trace_parser.py +11 -14
- mindspore/profiler/profiling.py +678 -377
- mindspore/rewrite/api/node.py +211 -54
- mindspore/rewrite/api/node_type.py +5 -0
- mindspore/rewrite/api/pattern_engine.py +22 -23
- mindspore/rewrite/api/scoped_value.py +20 -17
- mindspore/rewrite/api/symbol_tree.py +252 -106
- mindspore/rewrite/api/tree_node_helper.py +3 -0
- mindspore/rewrite/ast_helpers/__init__.py +2 -1
- mindspore/rewrite/ast_helpers/ast_finder.py +129 -0
- mindspore/rewrite/ast_helpers/ast_modifier.py +116 -104
- mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +97 -46
- mindspore/rewrite/common/rewrite_elog.py +5 -1
- mindspore/rewrite/namer.py +51 -51
- mindspore/rewrite/namespace.py +14 -5
- mindspore/{ops/bprop_mindir → rewrite/node}/__init__.py +9 -4
- mindspore/rewrite/node/call_function.py +79 -0
- mindspore/rewrite/node/cell_container.py +135 -0
- mindspore/rewrite/node/control_flow.py +88 -0
- mindspore/rewrite/{node.py → node/node.py} +313 -247
- mindspore/rewrite/node/node_manager.py +254 -0
- mindspore/rewrite/node/node_topological_manager.py +243 -0
- mindspore/rewrite/parsers/arguments_parser.py +22 -21
- mindspore/rewrite/parsers/assign_parser.py +225 -239
- mindspore/rewrite/parsers/attribute_parser.py +9 -7
- mindspore/rewrite/parsers/class_def_parser.py +179 -218
- mindspore/rewrite/parsers/constant_parser.py +9 -6
- mindspore/rewrite/parsers/container_parser.py +9 -7
- mindspore/rewrite/parsers/for_parser.py +36 -15
- mindspore/rewrite/parsers/function_def_parser.py +23 -20
- mindspore/rewrite/parsers/if_parser.py +28 -24
- mindspore/rewrite/parsers/module_parser.py +202 -25
- mindspore/rewrite/{parser.py → parsers/parser.py} +4 -2
- mindspore/rewrite/{parser_register.py → parsers/parser_register.py} +1 -1
- mindspore/rewrite/parsers/return_parser.py +6 -6
- mindspore/rewrite/sparsify/sparse_transformer.py +12 -3
- mindspore/rewrite/sparsify/sparsify.py +4 -1
- mindspore/rewrite/sparsify/utils.py +11 -5
- mindspore/rewrite/symbol_tree.py +577 -732
- mindspore/rewrite/symbol_tree_builder.py +9 -175
- mindspore/rewrite/symbol_tree_dumper.py +2 -2
- mindspore/run_check/_check_version.py +46 -39
- mindspore/run_check/run_check.py +3 -2
- mindspore/{scipy/sparse → safeguard}/__init__.py +4 -5
- mindspore/safeguard/rewrite_obfuscation.py +517 -0
- mindspore/scipy/__init__.py +1 -1
- mindspore/scipy/linalg.py +67 -61
- mindspore/scipy/ops.py +5 -41
- mindspore/scipy/ops_grad.py +3 -2
- mindspore/scipy/ops_wrapper.py +5 -5
- mindspore/scipy/optimize/line_search.py +8 -8
- mindspore/scipy/optimize/linear_sum_assignment.py +4 -4
- mindspore/scipy/optimize/minimize.py +16 -12
- mindspore/scipy/utils.py +1 -52
- mindspore/scipy/utils_const.py +4 -4
- mindspore/train/__init__.py +4 -4
- mindspore/train/_utils.py +13 -5
- mindspore/train/amp.py +410 -148
- mindspore/train/anf_ir_pb2.py +16 -4
- mindspore/train/callback/_backup_and_restore.py +8 -11
- mindspore/train/callback/_callback.py +80 -3
- mindspore/train/callback/_checkpoint.py +82 -51
- mindspore/train/callback/_early_stop.py +12 -15
- mindspore/train/callback/_history.py +1 -1
- mindspore/train/callback/_lambda_callback.py +13 -13
- mindspore/train/callback/_landscape.py +21 -17
- mindspore/train/callback/_loss_monitor.py +9 -10
- mindspore/train/callback/_on_request_exit.py +16 -33
- mindspore/train/callback/_reduce_lr_on_plateau.py +21 -24
- mindspore/train/callback/_summary_collector.py +44 -30
- mindspore/train/callback/_time_monitor.py +62 -12
- mindspore/train/data_sink.py +10 -16
- mindspore/train/dataset_helper.py +154 -86
- mindspore/train/loss_scale_manager.py +14 -9
- mindspore/train/metrics/__init__.py +10 -2
- mindspore/train/metrics/accuracy.py +1 -1
- mindspore/train/metrics/auc.py +1 -1
- mindspore/train/metrics/bleu_score.py +2 -2
- mindspore/train/metrics/confusion_matrix.py +14 -14
- mindspore/train/metrics/cosine_similarity.py +3 -3
- mindspore/train/metrics/dice.py +1 -1
- mindspore/train/metrics/fbeta.py +1 -1
- mindspore/train/metrics/hausdorff_distance.py +8 -6
- mindspore/train/metrics/mean_surface_distance.py +5 -4
- mindspore/train/metrics/metric.py +49 -17
- mindspore/train/metrics/occlusion_sensitivity.py +4 -4
- mindspore/train/metrics/perplexity.py +1 -1
- mindspore/train/metrics/precision.py +2 -2
- mindspore/train/metrics/recall.py +2 -3
- mindspore/train/metrics/roc.py +7 -7
- mindspore/train/metrics/root_mean_square_surface_distance.py +5 -4
- mindspore/train/metrics/topk.py +7 -4
- mindspore/train/mind_ir_pb2.py +193 -48
- mindspore/train/model.py +377 -133
- mindspore/train/serialization.py +697 -245
- mindspore/train/summary/_summary_adapter.py +5 -2
- mindspore/train/summary/_writer_pool.py +4 -3
- mindspore/train/summary/summary_record.py +25 -23
- mindspore/train/train_thor/convert_utils.py +39 -23
- mindspore/train/train_thor/dataset_helper.py +4 -3
- mindspore/train/train_thor/model_thor.py +8 -8
- mindspore/version.py +1 -1
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/METADATA +7 -8
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/RECORD +647 -818
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/entry_points.txt +0 -1
- mindspore/_akg/akg/tvm/contrib/debugger/__init__.py +0 -16
- mindspore/_akg/akg/tvm/contrib/debugger/debug_result.py +0 -274
- mindspore/_akg/akg/tvm/contrib/debugger/debug_runtime.py +0 -259
- mindspore/_akg/akg/tvm/contrib/peak.py +0 -341
- mindspore/_akg/akg/tvm/contrib/rpc.py +0 -25
- mindspore/_akg/akg/tvm/contrib/xcode.py +0 -257
- mindspore/_akg/akg/tvm/exec/__init__.py +0 -17
- mindspore/_akg/akg/tvm/exec/autotvm_log_editor.py +0 -60
- mindspore/_akg/akg/tvm/exec/measure_peak.py +0 -48
- mindspore/_akg/akg/tvm/exec/query_rpc_tracker.py +0 -48
- mindspore/_akg/akg/tvm/exec/rpc_proxy.py +0 -98
- mindspore/_akg/akg/tvm/exec/rpc_server.py +0 -88
- mindspore/_akg/akg/tvm/exec/rpc_tracker.py +0 -62
- mindspore/_akg/akg/tvm/rpc/__init__.py +0 -29
- mindspore/_akg/akg/tvm/rpc/base.py +0 -182
- mindspore/_akg/akg/tvm/rpc/client.py +0 -436
- mindspore/_akg/akg/tvm/rpc/proxy.py +0 -595
- mindspore/_akg/akg/tvm/rpc/server.py +0 -413
- mindspore/_akg/akg/tvm/rpc/tornado_util.py +0 -121
- mindspore/_akg/akg/tvm/rpc/tracker.py +0 -431
- mindspore/_extends/graph_kernel/expander.py +0 -80
- mindspore/_extends/graph_kernel/expanders/__init__.py +0 -57
- mindspore/_extends/graph_kernel/expanders/_utils.py +0 -269
- mindspore/_extends/graph_kernel/expanders/addn.py +0 -33
- mindspore/_extends/graph_kernel/expanders/batchnorm.py +0 -152
- mindspore/_extends/graph_kernel/expanders/batchnorm_grad.py +0 -105
- mindspore/_extends/graph_kernel/expanders/bias_add_grad.py +0 -49
- mindspore/_extends/graph_kernel/expanders/clip_by_norm_no_div_sum.py +0 -33
- mindspore/_extends/graph_kernel/expanders/complex/abs.py +0 -30
- mindspore/_extends/graph_kernel/expanders/complex/add.py +0 -44
- mindspore/_extends/graph_kernel/expanders/complex/div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/mul.py +0 -52
- mindspore/_extends/graph_kernel/expanders/complex/real_div.py +0 -62
- mindspore/_extends/graph_kernel/expanders/complex/sub.py +0 -45
- mindspore/_extends/graph_kernel/expanders/conv2d.py +0 -200
- mindspore/_extends/graph_kernel/expanders/dropout_grad.py +0 -30
- mindspore/_extends/graph_kernel/expanders/equal_count.py +0 -50
- mindspore/_extends/graph_kernel/expanders/erfc.py +0 -35
- mindspore/_extends/graph_kernel/expanders/expand_dims.py +0 -50
- mindspore/_extends/graph_kernel/expanders/fused_adam.py +0 -44
- mindspore/_extends/graph_kernel/expanders/fused_adam_weight_decay.py +0 -47
- mindspore/_extends/graph_kernel/expanders/fused_mul_add.py +0 -28
- mindspore/_extends/graph_kernel/expanders/gather.py +0 -43
- mindspore/_extends/graph_kernel/expanders/gelu_grad.py +0 -70
- mindspore/_extends/graph_kernel/expanders/gkdropout.py +0 -40
- mindspore/_extends/graph_kernel/expanders/identity.py +0 -25
- mindspore/_extends/graph_kernel/expanders/layernorm.py +0 -93
- mindspore/_extends/graph_kernel/expanders/layernorm_grad.py +0 -113
- mindspore/_extends/graph_kernel/expanders/logsoftmax.py +0 -46
- mindspore/_extends/graph_kernel/expanders/logsoftmax_grad.py +0 -36
- mindspore/_extends/graph_kernel/expanders/matmul.py +0 -80
- mindspore/_extends/graph_kernel/expanders/maximum_grad.py +0 -59
- mindspore/_extends/graph_kernel/expanders/minimum_grad.py +0 -80
- mindspore/_extends/graph_kernel/expanders/oneslike.py +0 -26
- mindspore/_extends/graph_kernel/expanders/reduce_mean.py +0 -43
- mindspore/_extends/graph_kernel/expanders/relu_grad.py +0 -32
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits.py +0 -41
- mindspore/_extends/graph_kernel/expanders/sigmoid_cross_entropy_with_logits_grad.py +0 -35
- mindspore/_extends/graph_kernel/expanders/sigmoid_grad.py +0 -31
- mindspore/_extends/graph_kernel/expanders/slice.py +0 -35
- mindspore/_extends/graph_kernel/expanders/softmax_cross_entropy_with_logits.py +0 -42
- mindspore/_extends/graph_kernel/expanders/softmax_grad_ext.py +0 -41
- mindspore/_extends/graph_kernel/expanders/softsign.py +0 -28
- mindspore/_extends/graph_kernel/expanders/sqrt_grad.py +0 -29
- mindspore/_extends/graph_kernel/expanders/square_sum_all.py +0 -44
- mindspore/_extends/graph_kernel/expanders/square_sum_v1.py +0 -37
- mindspore/_extends/graph_kernel/expanders/squared_difference.py +0 -43
- mindspore/_extends/graph_kernel/expanders/tanh_grad.py +0 -31
- mindspore/_extends/graph_kernel/expanders/tile.py +0 -54
- mindspore/_extends/graph_kernel/model/op_infer.py +0 -506
- mindspore/_extends/parse/jit_fallback_modules.py +0 -51
- mindspore/dataset/datapreprocess/preprocess_imagenet_validate_dataset.py +0 -54
- mindspore/dataset/engine/graphdata.py +0 -1586
- mindspore/include/api/net.h +0 -142
- mindspore/ops/_grad/grad_array_ops.py +0 -1347
- mindspore/ops/_grad/grad_clip_ops.py +0 -84
- mindspore/ops/_grad/grad_debug_ops.py +0 -68
- mindspore/ops/_grad/grad_inner_ops.py +0 -235
- mindspore/ops/_grad/grad_math_ops.py +0 -1684
- mindspore/ops/_grad/grad_nn_ops.py +0 -1529
- mindspore/ops/_grad/grad_other_ops.py +0 -89
- mindspore/ops/_grad/grad_sequence_ops.py +0 -296
- mindspore/ops/_grad/grad_sparse.py +0 -323
- mindspore/ops/_grad_experimental/grad_image_ops.py +0 -249
- mindspore/ops/_grad_experimental/grad_linalg_ops.py +0 -195
- mindspore/ops/_grad_experimental/grad_scalar_ops.py +0 -112
- mindspore/ops/bprop_mindir/AdaptiveAvgPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/AdaptiveMaxPool2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ApproximateEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Argmax_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/Argmin_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/AssignSub_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Assign_bprop.mindir +0 -17
- mindspore/ops/bprop_mindir/AvgPool3D_bprop.mindir +0 -150
- mindspore/ops/bprop_mindir/AvgPool_bprop.mindir +0 -66
- mindspore/ops/bprop_mindir/BCEWithLogitsLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BNTrainingReduce_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/BatchNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BatchToSpaceND_bprop.mindir +0 -28
- mindspore/ops/bprop_mindir/BiasAddGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/BinaryCrossEntropy_bprop.mindir +0 -33
- mindspore/ops/bprop_mindir/BroadcastTo_bprop.mindir +0 -306
- mindspore/ops/bprop_mindir/Broadcast_bprop.mindir +0 -13
- mindspore/ops/bprop_mindir/CTCLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Concat_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Conv2DBackpropFilter_bprop.mindir +0 -240
- mindspore/ops/bprop_mindir/Conv2DBackpropInput_bprop.mindir +0 -247
- mindspore/ops/bprop_mindir/Conv2DTranspose_bprop.mindir +0 -247
- mindspore/ops/bprop_mindir/Conv3DTranspose_bprop.mindir +0 -315
- mindspore/ops/bprop_mindir/Conv3D_bprop.mindir +0 -278
- mindspore/ops/bprop_mindir/DType_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/DeformableOffsets_bprop.mindir +0 -58
- mindspore/ops/bprop_mindir/Depend_bprop.mindir +0 -13
- mindspore/ops/bprop_mindir/DepthToSpace_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/DepthwiseConv2dNative_bprop.mindir +0 -138
- mindspore/ops/bprop_mindir/DiagPart_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/Dropout2D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Dropout3D_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DropoutDoMask_bprop.mindir +0 -25
- mindspore/ops/bprop_mindir/DropoutGenMask_bprop.mindir +0 -18
- mindspore/ops/bprop_mindir/DropoutGrad_bprop.mindir +0 -27
- mindspore/ops/bprop_mindir/Dropout_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicGRUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicRNN_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/DynamicShape_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/Elu_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/EmbeddingLookup_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Equal_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/ExpandDims_bprop.mindir +0 -58
- mindspore/ops/bprop_mindir/FastGeLU_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Flatten_bprop.mindir +0 -54
- mindspore/ops/bprop_mindir/FloorDiv_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/GatherD_bprop.mindir +0 -26
- mindspore/ops/bprop_mindir/GatherNd_bprop.mindir +0 -57
- mindspore/ops/bprop_mindir/Gather_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/GreaterEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Greater_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/HSigmoid_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/HSwish_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/IOU_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/InstanceNorm_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/IsFinite_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/IsInf_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/IsNan_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/KLDivLoss_bprop.mindir +0 -126
- mindspore/ops/bprop_mindir/L2Loss_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/L2Normalize_bprop.mindir +0 -30
- mindspore/ops/bprop_mindir/LRN_bprop.mindir +0 -43
- mindspore/ops/bprop_mindir/LayerNormGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/LessEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/Less_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LinSpace_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/Load_bprop.mindir +0 -13
- mindspore/ops/bprop_mindir/LogSoftmax_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/LogicalAnd_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/LogicalNot_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/MaskedSelect_bprop.mindir +0 -21
- mindspore/ops/bprop_mindir/MaxPool3DGradGrad_bprop.mindir +0 -74
- mindspore/ops/bprop_mindir/MaxPool3DGrad_bprop.mindir +0 -74
- mindspore/ops/bprop_mindir/MaxPool3D_bprop.mindir +0 -75
- mindspore/ops/bprop_mindir/MaxPoolGradGrad_bprop.mindir +0 -65
- mindspore/ops/bprop_mindir/MaxPoolWithArgmax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Maximum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Minimum_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/MirrorPad_bprop.mindir +0 -27
- mindspore/ops/bprop_mindir/Mish_bprop.mindir +0 -35
- mindspore/ops/bprop_mindir/MulNoNan_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NLLLoss_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/NonZero_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/NotEqual_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/OneHot_bprop.mindir +0 -26
- mindspore/ops/bprop_mindir/OnesLike_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/PReLU_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Pad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Padding_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/RNNTLoss_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/ROIAlign_bprop.mindir +0 -82
- mindspore/ops/bprop_mindir/Range_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/Rank_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/ReLU6_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/ReLUV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ReduceAll_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/ReduceAny_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/ReluGrad_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/Reshape_bprop.mindir +0 -60
- mindspore/ops/bprop_mindir/ResizeBilinear_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/ResizeNearestNeighbor_bprop.mindir +0 -89
- mindspore/ops/bprop_mindir/ReverseSequence_bprop.mindir +0 -52
- mindspore/ops/bprop_mindir/ReverseV2_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/Round_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/ScatterMax_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterMin_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/ScatterNdUpdate_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/ScatterNd_bprop.mindir +0 -24
- mindspore/ops/bprop_mindir/ScatterNonAliasingAdd_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/ScatterUpdate_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SeLU_bprop.mindir +0 -21
- mindspore/ops/bprop_mindir/Select_bprop.mindir +0 -31
- mindspore/ops/bprop_mindir/Shape_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/SigmoidCrossEntropyWithLogits_bprop.mindir +0 -21
- mindspore/ops/bprop_mindir/SigmoidGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Sigmoid_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Sign_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/Slice_bprop.mindir +0 -26
- mindspore/ops/bprop_mindir/SmoothL1Loss_bprop.mindir +0 -36
- mindspore/ops/bprop_mindir/SoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Softplus_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Softsign_bprop.mindir +0 -33
- mindspore/ops/bprop_mindir/Sort_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SpaceToBatchND_bprop.mindir +0 -28
- mindspore/ops/bprop_mindir/SpaceToDepth_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/SparseGatherV2_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/SparseSoftmaxCrossEntropyWithLogits_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Split_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/Squeeze_bprop.mindir +0 -54
- mindspore/ops/bprop_mindir/StridedSliceGrad_bprop.mindir +0 -95
- mindspore/ops/bprop_mindir/StridedSlice_bprop.mindir +0 -98
- mindspore/ops/bprop_mindir/Switch_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/TanhGrad_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/Tanh_bprop.mindir +0 -66
- mindspore/ops/bprop_mindir/TensorScatterAdd_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/TensorScatterUpdate_bprop.mindir +0 -29
- mindspore/ops/bprop_mindir/TensorShape_bprop.mindir +0 -14
- mindspore/ops/bprop_mindir/Tile_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TopK_bprop.mindir +0 -0
- mindspore/ops/bprop_mindir/TransShape_bprop.mindir +0 -23
- mindspore/ops/bprop_mindir/TruncateDiv_bprop.mindir +0 -19
- mindspore/ops/bprop_mindir/TupleGetItem_bprop.mindir +0 -20
- mindspore/ops/bprop_mindir/Unique_bprop.mindir +0 -16
- mindspore/ops/bprop_mindir/Unstack_bprop.mindir +0 -22
- mindspore/ops/bprop_mindir/UpsampleNearest3D_bprop.mindir +0 -32
- mindspore/ops/bprop_mindir/UpsampleTrilinear3D_bprop.mindir +0 -38
- mindspore/ops/bprop_mindir/ZerosLike_bprop.mindir +0 -15
- mindspore/ops/bprop_mindir/generate_mindir.py +0 -114
- mindspore/rewrite/node_visitor.py +0 -44
- mindspore/rewrite/topological_manager.py +0 -203
- mindspore/scipy/sparse/linalg.py +0 -192
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/WHEEL +0 -0
- {mindspore-2.0.0rc1.dist-info → mindspore-2.2.0.dist-info}/top_level.txt +0 -0
mindspore/common/api.py
CHANGED
|
@@ -25,13 +25,15 @@ import ast
|
|
|
25
25
|
import inspect
|
|
26
26
|
import importlib
|
|
27
27
|
import hashlib
|
|
28
|
-
|
|
28
|
+
import contextlib
|
|
29
|
+
from collections import OrderedDict, namedtuple
|
|
29
30
|
from functools import wraps
|
|
30
31
|
import numpy as np
|
|
31
32
|
import mindspore as ms
|
|
32
33
|
from mindspore import context
|
|
33
34
|
from mindspore import log as logger
|
|
34
35
|
from mindspore._extends.remote import kernel_build_server
|
|
36
|
+
from mindspore.common.jit_config import JitConfig
|
|
35
37
|
from mindspore.common.tensor import Tensor as PythonTensor
|
|
36
38
|
from mindspore.common.sparse_tensor import CSRTensor as PythonCSRTensor
|
|
37
39
|
from mindspore.common.sparse_tensor import COOTensor as PythonCOOTensor
|
|
@@ -41,17 +43,21 @@ from mindspore._c_expression import GraphExecutor_, Tensor, CSRTensor, RowTensor
|
|
|
41
43
|
_ms_memory_recycle, _bind_device_ctx
|
|
42
44
|
from mindspore.parallel._ps_context import _is_role_sched
|
|
43
45
|
from mindspore.parallel._utils import _check_full_batch, _get_parameter_broadcast, _is_pynative_parallel, \
|
|
44
|
-
|
|
46
|
+
_is_in_auto_parallel_mode
|
|
45
47
|
from mindspore import _checkparam as Validator
|
|
46
48
|
from mindspore._checkparam import is_stub_tensor
|
|
47
49
|
from mindspore.common._utils import is_shape_unknown
|
|
48
50
|
from mindspore.common.mutable import mutable
|
|
49
51
|
from mindspore.common._register_for_adapter import ms_adapter_registry
|
|
52
|
+
from mindspore.common.auto_dynamic_shape import get_auto_dynamic_shape_args, update_auto_dynamic_shape_phase, \
|
|
53
|
+
get_auto_dynamic_shape_args_with_check_input_signature, update_auto_dynamic_shape_phase_with_check_input_signature
|
|
50
54
|
|
|
51
|
-
#
|
|
55
|
+
# Store ms_function class compiled pipeline cache.
|
|
52
56
|
ms_compile_cache = set()
|
|
53
|
-
#
|
|
57
|
+
# Store cell compiled pipeline cache.
|
|
54
58
|
cells_compile_cache = {}
|
|
59
|
+
# Store function compiled times information.
|
|
60
|
+
function_phases = dict()
|
|
55
61
|
|
|
56
62
|
BROADCAST_PHASE = "_broadcast_"
|
|
57
63
|
_PYNATIVE_PARALLEL_FUNC_NAME = "after_shard"
|
|
@@ -78,11 +84,24 @@ def _convert_python_data(data):
|
|
|
78
84
|
if isinstance(data, RowTensor) and not isinstance(data, PythonRowTensor):
|
|
79
85
|
return PythonRowTensor(row_tensor=data)
|
|
80
86
|
if isinstance(data, tuple):
|
|
87
|
+
# Handle namedtuple since its type is tuple.
|
|
88
|
+
if hasattr(data, "_fields"):
|
|
89
|
+
type_name = data.__class__.__name__
|
|
90
|
+
data_dict = data._asdict()
|
|
91
|
+
fields = data_dict.keys()
|
|
92
|
+
return namedtuple(type_name, fields)(**_convert_python_data(data_dict))
|
|
81
93
|
return tuple(_convert_python_data(x) for x in data)
|
|
82
94
|
if isinstance(data, list):
|
|
83
|
-
|
|
95
|
+
# Keep list object not change for inplace operation.
|
|
96
|
+
for i in range(len(data)):
|
|
97
|
+
data[i] = _convert_python_data(data[i])
|
|
98
|
+
return data
|
|
84
99
|
if isinstance(data, dict):
|
|
85
|
-
|
|
100
|
+
# Keep the dict object not change.
|
|
101
|
+
keys = tuple(data.keys())
|
|
102
|
+
for key in keys:
|
|
103
|
+
data[_convert_python_data(key)] = _convert_python_data(data.pop(key))
|
|
104
|
+
return data
|
|
86
105
|
return data
|
|
87
106
|
|
|
88
107
|
|
|
@@ -171,8 +190,7 @@ def __get_compile_cache_dep_files(file_path, compile_cache_dep_files, pkg):
|
|
|
171
190
|
if isinstance(node, ast.ImportFrom):
|
|
172
191
|
if node.module is not None:
|
|
173
192
|
module_name = node.module
|
|
174
|
-
|
|
175
|
-
module_name = "." + module_name
|
|
193
|
+
module_name = "." * node.level + module_name
|
|
176
194
|
elif not isinstance(node, ast.Import):
|
|
177
195
|
continue
|
|
178
196
|
# Do not care the files in mindspore package
|
|
@@ -297,7 +315,6 @@ class _MindsporeFunctionExecutor:
|
|
|
297
315
|
Returns:
|
|
298
316
|
The result of pipeline running in graph mode.
|
|
299
317
|
"""
|
|
300
|
-
|
|
301
318
|
def __init__(self, fn, ms_create_time, input_signature=None, obj=None, jit_config=None):
|
|
302
319
|
init_pipeline()
|
|
303
320
|
if not isinstance(fn, (types.FunctionType, types.MethodType)):
|
|
@@ -314,6 +331,7 @@ class _MindsporeFunctionExecutor:
|
|
|
314
331
|
self._create_time = ms_create_time
|
|
315
332
|
self.jit_config_dict = jit_config.jit_config_dict if jit_config else None
|
|
316
333
|
|
|
334
|
+
|
|
317
335
|
@_wrap_func
|
|
318
336
|
def __call__(self, *args, **kwargs):
|
|
319
337
|
args_list = args
|
|
@@ -322,9 +340,9 @@ class _MindsporeFunctionExecutor:
|
|
|
322
340
|
phase = ""
|
|
323
341
|
try:
|
|
324
342
|
if context.get_context("mode") == context.PYNATIVE_MODE:
|
|
325
|
-
_pynative_executor.
|
|
343
|
+
_pynative_executor.set_jit_compile_status(True, phase)
|
|
326
344
|
phase = self.compile(self.fn.__name__, *args_list, **kwargs)
|
|
327
|
-
_pynative_executor.
|
|
345
|
+
_pynative_executor.set_jit_compile_status(False, phase)
|
|
328
346
|
else:
|
|
329
347
|
phase = self.compile(self.fn.__name__, *args_list, **kwargs)
|
|
330
348
|
except Exception as err:
|
|
@@ -337,19 +355,11 @@ class _MindsporeFunctionExecutor:
|
|
|
337
355
|
new_inputs = self._generate_run_args(args_list, kwargs)
|
|
338
356
|
output = self._graph_executor(tuple(new_inputs), phase)
|
|
339
357
|
if context.get_context("mode") == context.PYNATIVE_MODE:
|
|
340
|
-
output = _pynative_executor.
|
|
341
|
-
|
|
342
|
-
enable_ge = os.getenv("MS_ENABLE_GE") == "1"
|
|
343
|
-
if enable_ge and self.jit_config_dict is None:
|
|
344
|
-
raise RuntimeError("GE and jit_level=O3 should be used together, but jit_config is None.")
|
|
345
|
-
if self.jit_config_dict:
|
|
346
|
-
enable_jit_level_o3 = self.jit_config_dict.get('jit_level') == "O3"
|
|
347
|
-
if (enable_ge and not enable_jit_level_o3) or (not enable_ge and enable_jit_level_o3):
|
|
348
|
-
raise RuntimeError("GE and jit_level=O3 should be used together, but got MS_ENABLE_GE={}, jit_level={}".
|
|
349
|
-
format(os.getenv("MS_ENABLE_GE"), self.jit_config_dict.get('jit_level')))
|
|
358
|
+
output = _pynative_executor.grad_jit(output, *new_inputs)
|
|
350
359
|
|
|
351
360
|
return output
|
|
352
361
|
|
|
362
|
+
|
|
353
363
|
def compile(self, method_name, *args, **kwargs):
|
|
354
364
|
"""Returns pipeline for the given args."""
|
|
355
365
|
# Check whether hook function registered on Cell object.
|
|
@@ -360,30 +370,38 @@ class _MindsporeFunctionExecutor:
|
|
|
360
370
|
f"pynative mode and remove 'jit' decorator.")
|
|
361
371
|
# Chose dynamic shape tensors or actual input tensors as compile args.
|
|
362
372
|
compile_args = self._generate_compile_args(args)
|
|
373
|
+
key_id = self._get_key_id()
|
|
374
|
+
compile_args = get_auto_dynamic_shape_args_with_check_input_signature(compile_args, key_id,
|
|
375
|
+
self.input_signature)
|
|
376
|
+
|
|
363
377
|
# Restore the mutable attr for every arg.
|
|
364
378
|
compile_args = _restore_mutable_attr(args, compile_args)
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
generate_name = generate_name + ".grad"
|
|
370
|
-
if _is_pynative_parallel():
|
|
371
|
-
generate_name = generate_name[:generate_name.rfind(str(id(self.fn)))] + str(id(self.shard_parent_obj))
|
|
379
|
+
generate_name, echo_function_name = self._get_generate_name()
|
|
380
|
+
# The full Function name
|
|
381
|
+
full_function_name = generate_name
|
|
382
|
+
create_time = ''
|
|
372
383
|
|
|
373
384
|
# Add key with obj
|
|
374
385
|
if self.obj is not None:
|
|
375
386
|
if self.obj.__module__ != self.fn.__module__:
|
|
376
|
-
logger.info(
|
|
387
|
+
logger.info(
|
|
388
|
+
f'The module of `self.obj`: `{self.obj.__module__}` is not same with the module of `self.fn`: '
|
|
389
|
+
f'`{self.fn.__module__}`')
|
|
377
390
|
self.obj.__parse_method__ = method_name
|
|
378
391
|
if isinstance(self.obj, ms.nn.Cell):
|
|
379
392
|
generate_name = generate_name + '.' + str(self.obj.create_time)
|
|
393
|
+
create_time = str(self.obj.create_time)
|
|
380
394
|
else:
|
|
381
395
|
generate_name = generate_name + '.' + str(self._create_time)
|
|
396
|
+
create_time = str(self._create_time)
|
|
397
|
+
|
|
382
398
|
generate_name = generate_name + '.' + str(id(self.obj))
|
|
399
|
+
full_function_name = generate_name
|
|
383
400
|
else:
|
|
384
401
|
# Different instance of same class may use same memory(means same obj_id) at diff times.
|
|
385
402
|
# To avoid unexpected phase matched, add create_time to generate_name.
|
|
386
403
|
generate_name = generate_name + '.' + str(self._create_time)
|
|
404
|
+
create_time = str(self._create_time)
|
|
387
405
|
|
|
388
406
|
self.enable_tuple_broaden = False
|
|
389
407
|
if hasattr(self.obj, "enable_tuple_broaden"):
|
|
@@ -392,16 +410,33 @@ class _MindsporeFunctionExecutor:
|
|
|
392
410
|
self._graph_executor.set_enable_tuple_broaden(self.enable_tuple_broaden)
|
|
393
411
|
key = self._graph_executor.generate_arguments_key(self.fn, compile_args, kwargs, self.enable_tuple_broaden)
|
|
394
412
|
phase = generate_name + '.' + str(key)
|
|
413
|
+
|
|
414
|
+
update_auto_dynamic_shape_phase_with_check_input_signature(compile_args, key_id, phase, self.input_signature)
|
|
415
|
+
|
|
395
416
|
if phase in ms_compile_cache:
|
|
396
417
|
return phase
|
|
397
418
|
|
|
419
|
+
self._check_recompile(full_function_name, create_time, echo_function_name)
|
|
420
|
+
|
|
398
421
|
# If enable compile cache, get the dependency files list and set to graph executor.
|
|
399
422
|
self._set_compile_cache_dep_files()
|
|
400
423
|
if self.jit_config_dict:
|
|
401
424
|
self._graph_executor.set_jit_config(self.jit_config_dict)
|
|
425
|
+
else:
|
|
426
|
+
jit_config_dict = JitConfig().jit_config_dict
|
|
427
|
+
self._graph_executor.set_jit_config(jit_config_dict)
|
|
402
428
|
|
|
403
429
|
if self.obj is None:
|
|
430
|
+
# Set an attribute to fn as an identifier.
|
|
431
|
+
if isinstance(self.fn, types.MethodType):
|
|
432
|
+
setattr(self.fn.__func__, "__jit_function__", True)
|
|
433
|
+
else:
|
|
434
|
+
setattr(self.fn, "__jit_function__", True)
|
|
404
435
|
is_compile = self._graph_executor.compile(self.fn, compile_args, kwargs, phase, True)
|
|
436
|
+
if isinstance(self.fn, types.MethodType):
|
|
437
|
+
delattr(self.fn.__func__, "__jit_function__")
|
|
438
|
+
else:
|
|
439
|
+
delattr(self.fn, "__jit_function__")
|
|
405
440
|
else:
|
|
406
441
|
if isinstance(self.obj, ms.nn.Cell):
|
|
407
442
|
self._graph_executor.set_weights_values(self.obj.parameters_dict())
|
|
@@ -410,8 +445,32 @@ class _MindsporeFunctionExecutor:
|
|
|
410
445
|
if not is_compile:
|
|
411
446
|
raise RuntimeError("Executor compile failed.")
|
|
412
447
|
ms_compile_cache.add(phase)
|
|
448
|
+
|
|
413
449
|
return phase
|
|
414
450
|
|
|
451
|
+
def _check_recompile(self, full_function_name, create_time, echo_function_name):
|
|
452
|
+
"""Warning when the function has been compiled."""
|
|
453
|
+
ignore_dirs = ["mindspore/ops", "mindspore/nn"]
|
|
454
|
+
if any((lambda x: x in full_function_name)(x) for x in ignore_dirs):
|
|
455
|
+
return
|
|
456
|
+
|
|
457
|
+
if full_function_name in function_phases:
|
|
458
|
+
warning_times = 1
|
|
459
|
+
if len(function_phases[full_function_name]) >= warning_times \
|
|
460
|
+
and create_time not in function_phases[full_function_name]:
|
|
461
|
+
tips = "Try to decorate the function with @jit(hash_args=...) " \
|
|
462
|
+
"or @jit(compile_once=True) to reduce the compile time. " \
|
|
463
|
+
"For more details, get instructions about `jit` at " \
|
|
464
|
+
"https://www.mindspore.cn/search?inputValue=jit."
|
|
465
|
+
|
|
466
|
+
logger.warning(f"The {echo_function_name} has been compiled again. "
|
|
467
|
+
f"{tips} ")
|
|
468
|
+
else:
|
|
469
|
+
function_phases[full_function_name] = set()
|
|
470
|
+
|
|
471
|
+
function_phases[full_function_name].add(create_time)
|
|
472
|
+
|
|
473
|
+
|
|
415
474
|
@staticmethod
|
|
416
475
|
def _optimizer_state_init(opt_states):
|
|
417
476
|
"""set data for all optimizer states in case it is executed in graph mode"""
|
|
@@ -422,18 +481,45 @@ class _MindsporeFunctionExecutor:
|
|
|
422
481
|
if opt_param.has_init and (prefix in prefix_list or opt_param.name == "global_step"):
|
|
423
482
|
opt_param.init_data()
|
|
424
483
|
|
|
484
|
+
|
|
485
|
+
def _get_key_id(self):
|
|
486
|
+
"""get key id."""
|
|
487
|
+
if isinstance(self.obj, ms.nn.Cell):
|
|
488
|
+
key_id = str(id(self.obj)) + str(self.obj.create_time)
|
|
489
|
+
else:
|
|
490
|
+
key_id = str(id(self.obj)) + str(self._create_time)
|
|
491
|
+
|
|
492
|
+
if _pynative_executor.grad_flag():
|
|
493
|
+
key_id = key_id + ".grad"
|
|
494
|
+
return key_id
|
|
495
|
+
|
|
496
|
+
|
|
497
|
+
def _get_generate_name(self):
|
|
498
|
+
"""get generate name."""
|
|
499
|
+
generate_name = self.fn.__module__ + "." + self.fn.__name__ + "." + self.fn.__code__.co_filename + "." + str(
|
|
500
|
+
self.fn.__code__.co_firstlineno)
|
|
501
|
+
echo_function_name = "function \"" + self.fn.__name__ + "\" at the file \"" + self.fn.__code__.co_filename \
|
|
502
|
+
+ "\", line " + str(self.fn.__code__.co_firstlineno)
|
|
503
|
+
if _pynative_executor.grad_flag():
|
|
504
|
+
generate_name = generate_name + ".grad"
|
|
505
|
+
if _is_pynative_parallel():
|
|
506
|
+
generate_name = generate_name[:generate_name.rfind(str(id(self.fn)))] + str(id(self.shard_parent_obj))
|
|
507
|
+
return generate_name, echo_function_name
|
|
508
|
+
|
|
509
|
+
|
|
425
510
|
def _set_compile_cache_dep_files(self):
|
|
426
511
|
# If enable compile cache, get the dependency files list
|
|
427
512
|
enable_compile_cache = context.get_context("enable_compile_cache")
|
|
428
|
-
if enable_compile_cache is
|
|
513
|
+
if enable_compile_cache is not True and enable_compile_cache != "1":
|
|
429
514
|
enable_compile_cache = os.getenv('MS_COMPILER_CACHE_ENABLE')
|
|
430
515
|
if enable_compile_cache is True or enable_compile_cache == "1":
|
|
431
516
|
self._graph_executor.set_compile_cache_dep_files(_get_compile_cache_dep_files())
|
|
432
517
|
|
|
518
|
+
|
|
433
519
|
def _generate_compile_args(self, args_list):
|
|
434
520
|
"""Chose dynamic shape tensors or actual input tensors as compile args."""
|
|
435
521
|
# Case: If the shape of input args is dynamic, get dynamic shape tensor from context and use it to compile.
|
|
436
|
-
compile_args = args_list
|
|
522
|
+
compile_args = _pynative_executor.get_dynamic_input(args_list)
|
|
437
523
|
# Case: The `set_inputs()` of Cell object has been set, using these dynamic shape args as compile args.
|
|
438
524
|
if self.fn.__name__ == 'construct' and isinstance(self.obj, ms.nn.Cell) and self.obj.get_inputs():
|
|
439
525
|
compile_args = self.obj.get_inputs()
|
|
@@ -462,7 +548,10 @@ class _MindsporeFunctionExecutor:
|
|
|
462
548
|
f"be 'sens' and added it to compile args.")
|
|
463
549
|
self.input_signature.append(args_list[-1])
|
|
464
550
|
compile_args = tuple(self.input_signature)
|
|
465
|
-
|
|
551
|
+
if self.obj is not None:
|
|
552
|
+
_pynative_executor.set_dynamic_input(self.obj, *compile_args)
|
|
553
|
+
else:
|
|
554
|
+
_pynative_executor.set_dynamic_input(self.fn, *compile_args)
|
|
466
555
|
else:
|
|
467
556
|
if not verify_inputs_signature(self.input_signature, args_list):
|
|
468
557
|
raise ValueError("The input args is incompatible with the args in `input_signature`!")
|
|
@@ -500,14 +589,14 @@ def _get_obj_id(input_obj):
|
|
|
500
589
|
return obj_id + str(id(input_obj))
|
|
501
590
|
|
|
502
591
|
|
|
503
|
-
def
|
|
592
|
+
def _get_jit_hash(hash_input):
|
|
504
593
|
"""Get hash value of single object or list of objects."""
|
|
505
594
|
if isinstance(list, tuple):
|
|
506
595
|
return ".".join(map(_get_obj_id, hash_input))
|
|
507
596
|
return _get_obj_id(hash_input)
|
|
508
597
|
|
|
509
598
|
|
|
510
|
-
def jit(fn=None, input_signature=None, hash_args=None, jit_config=None):
|
|
599
|
+
def jit(fn=None, input_signature=None, hash_args=None, jit_config=None, compile_once=False):
|
|
511
600
|
"""
|
|
512
601
|
Create a callable MindSpore graph from a Python function.
|
|
513
602
|
|
|
@@ -518,15 +607,19 @@ def jit(fn=None, input_signature=None, hash_args=None, jit_config=None):
|
|
|
518
607
|
will not accept `**kwargs`.
|
|
519
608
|
|
|
520
609
|
Args:
|
|
521
|
-
fn (Function): The Python function that will be run as a graph. Default: None.
|
|
610
|
+
fn (Function): The Python function that will be run as a graph. Default: ``None`` .
|
|
522
611
|
input_signature (Tensor): The Tensor which describes the input arguments. The shape and dtype of the Tensor
|
|
523
612
|
will be supplied to this function. If input_signature is specified, each input to `fn` must be a `Tensor`.
|
|
524
613
|
And the input parameters of `fn` cannot accept `**kwargs`. The shape and dtype of actual inputs should
|
|
525
|
-
keep the same as input_signature. Otherwise, TypeError will be raised. Default: None.
|
|
614
|
+
keep the same as input_signature. Otherwise, TypeError will be raised. Default: ``None`` .
|
|
526
615
|
hash_args (Union[Object, List or Tuple of Objects]): The local free variables used inside `fn`,
|
|
527
616
|
like functions or objects of class defined outside `fn`. Calling `fn` again with change of `hash_args`
|
|
528
|
-
will trigger recompilation.
|
|
529
|
-
jit_config (JitConfig): Jit config for compile. Default: None.
|
|
617
|
+
will trigger recompilation. Default: ``None`` .
|
|
618
|
+
jit_config (JitConfig): Jit config for compile. Default: ``None`` .
|
|
619
|
+
compile_once(bool): ``True``: The function would be compiled once when it was created many times.
|
|
620
|
+
But it may be wrong if the free variables were changed. ``False`` : It would be recompiled when
|
|
621
|
+
it was created again
|
|
622
|
+
Default: ``False`` .
|
|
530
623
|
|
|
531
624
|
Returns:
|
|
532
625
|
Function, if `fn` is not None, returns a callable function that will execute the compiled function; If `fn` is
|
|
@@ -570,7 +663,7 @@ def jit(fn=None, input_signature=None, hash_args=None, jit_config=None):
|
|
|
570
663
|
...
|
|
571
664
|
>>> out = tensor_add_with_sig(x, y)
|
|
572
665
|
...
|
|
573
|
-
... # Set hash_args as fn, otherwise cache of compiled
|
|
666
|
+
... # Set hash_args as fn, otherwise cache of compiled closure_fn will not be reused.
|
|
574
667
|
... # While fn differs during calling again, recompilation will be triggered.
|
|
575
668
|
>>> def func(x):
|
|
576
669
|
... return ops.exp(x)
|
|
@@ -584,11 +677,28 @@ def jit(fn=None, input_signature=None, hash_args=None, jit_config=None):
|
|
|
584
677
|
>>> inputs = Tensor(np.ones([10, 10, 10]).astype(np.float32))
|
|
585
678
|
>>> for i in range(10):
|
|
586
679
|
... closure_fn(inputs, func)
|
|
680
|
+
...
|
|
681
|
+
... # Set compile_once = True, otherwise the train_step will be compiled again.
|
|
682
|
+
>>> def train(x):
|
|
683
|
+
... @jit(compile_once = True)
|
|
684
|
+
... def train_step(x):
|
|
685
|
+
... return ops.exp(x)
|
|
686
|
+
... for i in range(10):
|
|
687
|
+
... train_step(x)
|
|
688
|
+
...
|
|
689
|
+
>>> inputs = Tensor(np.ones([10, 10, 10]).astype(np.float32))
|
|
690
|
+
>>> for i in range(10):
|
|
691
|
+
... train(inputs)
|
|
587
692
|
"""
|
|
588
693
|
|
|
589
694
|
def wrap_mindspore(func):
|
|
695
|
+
if not isinstance(compile_once, bool):
|
|
696
|
+
logger.warning(f"The parameter `compile_once` of jit should be a bool, "
|
|
697
|
+
f"but got {type(compile_once)}.")
|
|
590
698
|
if hash_args:
|
|
591
|
-
hash_obj =
|
|
699
|
+
hash_obj = _get_jit_hash(hash_args)
|
|
700
|
+
elif compile_once:
|
|
701
|
+
hash_obj = 0
|
|
592
702
|
else:
|
|
593
703
|
hash_obj = int(time.time() * 1e9)
|
|
594
704
|
|
|
@@ -627,15 +737,15 @@ def ms_function(fn=None, input_signature=None, hash_args=None, jit_config=None):
|
|
|
627
737
|
will not accept `**kwargs`.
|
|
628
738
|
|
|
629
739
|
Args:
|
|
630
|
-
fn (Function): The Python function that will be run as a graph. Default: None.
|
|
740
|
+
fn (Function): The Python function that will be run as a graph. Default: ``None`` .
|
|
631
741
|
input_signature (Tensor): The Tensor which describes the input arguments. The shape and dtype of the Tensor
|
|
632
742
|
will be supplied to this function. If input_signature is specified, each input to `fn` must be a `Tensor`.
|
|
633
743
|
And the input parameters of `fn` cannot accept `**kwargs`. The shape and dtype of actual inputs should
|
|
634
|
-
keep the same as input_signature. Otherwise, TypeError will be raised. Default: None.
|
|
744
|
+
keep the same as input_signature. Otherwise, TypeError will be raised. Default: ``None`` .
|
|
635
745
|
hash_args (Union[Object, List or Tuple of Objects]): The local free variables used inside `fn`,
|
|
636
746
|
like functions or objects of class defined outside `fn`. Calling `fn` again with change of `hash_args`
|
|
637
|
-
will trigger recompilation.
|
|
638
|
-
jit_config (JitConfig): Jit config for compile. Default: None.
|
|
747
|
+
will trigger recompilation. Default: ``None`` .
|
|
748
|
+
jit_config (JitConfig): Jit config for compile. Default: ``None`` .
|
|
639
749
|
|
|
640
750
|
Returns:
|
|
641
751
|
Function, if `fn` is not None, returns a callable function that will execute the compiled function; If `fn` is
|
|
@@ -708,9 +818,9 @@ def _core(fn=None, **flags):
|
|
|
708
818
|
set flag to a graph.
|
|
709
819
|
|
|
710
820
|
Args:
|
|
711
|
-
fn (Function): Function to add flag. Default: None
|
|
821
|
+
fn (Function): Function to add flag. Default: ``None``.
|
|
712
822
|
flags (dict): The following flags can be set core, which indicates that this is a core function or
|
|
713
|
-
other flag. Default: None
|
|
823
|
+
other flag. Default: ``None``.
|
|
714
824
|
|
|
715
825
|
Returns:
|
|
716
826
|
Function, the function with core flag.
|
|
@@ -742,8 +852,8 @@ def _add_flags(fn=None, **flags):
|
|
|
742
852
|
Only supports bool value.
|
|
743
853
|
|
|
744
854
|
Args:
|
|
745
|
-
fn (Function): Function or cell to add flag. Default: None
|
|
746
|
-
flags (dict): Flags use kwargs. Default: None
|
|
855
|
+
fn (Function): Function or cell to add flag. Default: ``None``.
|
|
856
|
+
flags (dict): Flags use kwargs. Default: ``None``.
|
|
747
857
|
|
|
748
858
|
Returns:
|
|
749
859
|
Function, the function with added flags.
|
|
@@ -785,8 +895,8 @@ def _no_recursive(callable_obj):
|
|
|
785
895
|
Supported Platforms:
|
|
786
896
|
``Ascend`` ``GPU`` ``CPU``
|
|
787
897
|
"""
|
|
788
|
-
|
|
789
|
-
if not
|
|
898
|
+
is_cell_subclass = inspect.isclass(callable_obj) and issubclass(callable_obj, ms.nn.Cell)
|
|
899
|
+
if not is_cell_subclass and not inspect.ismethod(callable_obj) and not inspect.isfunction(callable_obj):
|
|
790
900
|
raise TypeError(f"Decorator no_recursive is used for callable object, but got {callable_obj}.")
|
|
791
901
|
_add_flags(callable_obj, no_recursive=True)
|
|
792
902
|
return callable_obj
|
|
@@ -900,12 +1010,12 @@ def jit_class(cls):
|
|
|
900
1010
|
>>> print(out)
|
|
901
1011
|
20
|
|
902
1012
|
"""
|
|
903
|
-
|
|
1013
|
+
from mindspore import nn
|
|
904
1014
|
# Check if cls is of type class.
|
|
905
1015
|
if not inspect.isclass(cls):
|
|
906
1016
|
raise TypeError(f'Decorator jit_class can only be used for class type, but got {cls}.')
|
|
907
1017
|
# Check if cls is nn.Cell.
|
|
908
|
-
if issubclass(cls,
|
|
1018
|
+
if issubclass(cls, nn.Cell):
|
|
909
1019
|
raise TypeError(f"Decorator jit_class is used for user-defined classes and cannot be used for nn.Cell: {cls}.")
|
|
910
1020
|
setattr(cls, '__ms_class__', True)
|
|
911
1021
|
return cls
|
|
@@ -922,8 +1032,9 @@ def set_adapter_config(config):
|
|
|
922
1032
|
raise TypeError(f"The input argument of 'set_adapter_config' should be a dict, but got {config}.")
|
|
923
1033
|
for key, value in config.items():
|
|
924
1034
|
if key == "Tensor":
|
|
925
|
-
setattr(value, "__adapter_tensor__", True)
|
|
926
1035
|
ms_adapter_registry.register_tensor(value)
|
|
1036
|
+
elif key == "Parameter":
|
|
1037
|
+
ms_adapter_registry.register_parameter(value)
|
|
927
1038
|
elif key == "convert_object_map":
|
|
928
1039
|
ms_adapter_registry.register_convert_map(value)
|
|
929
1040
|
else:
|
|
@@ -982,6 +1093,27 @@ def _parameter_broadcast(obj):
|
|
|
982
1093
|
_build_broadcast_graph(broadcast_params_dict, broadcast_phase)
|
|
983
1094
|
|
|
984
1095
|
|
|
1096
|
+
class _no_grad(contextlib.ContextDecorator):
|
|
1097
|
+
"""
|
|
1098
|
+
Context Manager to disable gradient calculation. When enter this context, we will disable calculate
|
|
1099
|
+
gradient. When exit this context, we will resume its prev state.
|
|
1100
|
+
Currently, it can only use in Pynative mode. It also can be used as decorator.
|
|
1101
|
+
"""
|
|
1102
|
+
|
|
1103
|
+
def __init__(self):
|
|
1104
|
+
self.prev_state = False
|
|
1105
|
+
|
|
1106
|
+
def __enter__(self):
|
|
1107
|
+
if context.get_context("mode") == context.GRAPH_MODE:
|
|
1108
|
+
raise RuntimeError("For no_grad feature, currently only support Pynative mode, but got Graph mode.")
|
|
1109
|
+
self.prev_state = _pynative_executor.enable_grad()
|
|
1110
|
+
_pynative_executor.set_enable_grad(False)
|
|
1111
|
+
|
|
1112
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
1113
|
+
_pynative_executor.set_enable_grad(self.prev_state)
|
|
1114
|
+
return False
|
|
1115
|
+
|
|
1116
|
+
|
|
985
1117
|
class _PyNativeExecutor:
|
|
986
1118
|
"""
|
|
987
1119
|
A pynative executor used to compile/manage/run single op.
|
|
@@ -1041,18 +1173,17 @@ class _PyNativeExecutor:
|
|
|
1041
1173
|
"""
|
|
1042
1174
|
return self._executor.real_run_op(*args)
|
|
1043
1175
|
|
|
1044
|
-
def run_op_async(self,
|
|
1176
|
+
def run_op_async(self, *args):
|
|
1045
1177
|
"""
|
|
1046
1178
|
Run single op async.
|
|
1047
1179
|
|
|
1048
1180
|
Args:
|
|
1049
|
-
|
|
1050
|
-
args (tuple): input arguments.
|
|
1181
|
+
args (tuple): Op prim and input arguments.
|
|
1051
1182
|
|
|
1052
1183
|
Return:
|
|
1053
1184
|
StubNode, result of run op.
|
|
1054
1185
|
"""
|
|
1055
|
-
return self._executor.run_op_async(
|
|
1186
|
+
return self._executor.run_op_async(*args)
|
|
1056
1187
|
|
|
1057
1188
|
def new_graph(self, obj, *args, **kwargs):
|
|
1058
1189
|
"""
|
|
@@ -1135,30 +1266,18 @@ class _PyNativeExecutor:
|
|
|
1135
1266
|
"""
|
|
1136
1267
|
self._executor.sync()
|
|
1137
1268
|
|
|
1138
|
-
def
|
|
1269
|
+
def grad_jit(self, output, *args):
|
|
1139
1270
|
"""
|
|
1140
|
-
|
|
1271
|
+
Building grad graph decorated by jit.
|
|
1141
1272
|
|
|
1142
1273
|
Args:
|
|
1143
|
-
|
|
1274
|
+
output (tuple): The function or cell decorated by jit output object.
|
|
1275
|
+
args (tuple): Function or cell decorated by jit input arguments.
|
|
1144
1276
|
|
|
1145
1277
|
Return:
|
|
1146
1278
|
None.
|
|
1147
1279
|
"""
|
|
1148
|
-
self._executor.
|
|
1149
|
-
|
|
1150
|
-
def grad_ms_function(self, output, *args):
|
|
1151
|
-
"""
|
|
1152
|
-
Building grad graph decorated by ms_function.
|
|
1153
|
-
|
|
1154
|
-
Args:
|
|
1155
|
-
output (tuple): The function or cell decorated by ms_function output object.
|
|
1156
|
-
args (tuple): Function or cell decorated by ms_function input arguments.
|
|
1157
|
-
|
|
1158
|
-
Return:
|
|
1159
|
-
None.
|
|
1160
|
-
"""
|
|
1161
|
-
return self._executor.grad_ms_function(output, *args)
|
|
1280
|
+
return self._executor.grad_jit(output, *args)
|
|
1162
1281
|
|
|
1163
1282
|
def grad_flag(self):
|
|
1164
1283
|
"""
|
|
@@ -1181,29 +1300,63 @@ class _PyNativeExecutor:
|
|
|
1181
1300
|
"""
|
|
1182
1301
|
self._executor.set_grad_flag(flag)
|
|
1183
1302
|
|
|
1184
|
-
def
|
|
1303
|
+
def enable_grad(self):
|
|
1304
|
+
"""
|
|
1305
|
+
The global flag whether needing to calculate gradient.
|
|
1306
|
+
|
|
1307
|
+
Return:
|
|
1308
|
+
bool, whether needing to calculate gradient.
|
|
1185
1309
|
"""
|
|
1186
|
-
|
|
1310
|
+
return self._executor.enable_grad()
|
|
1311
|
+
|
|
1312
|
+
def set_enable_grad(self, flag):
|
|
1313
|
+
"""
|
|
1314
|
+
Set the flag of calculating gradient.
|
|
1315
|
+
|
|
1316
|
+
Args:
|
|
1317
|
+
flag (bool): Specifying whether calculating gradient.
|
|
1318
|
+
|
|
1319
|
+
Return:
|
|
1320
|
+
None.
|
|
1321
|
+
"""
|
|
1322
|
+
self._executor.set_enable_grad(flag)
|
|
1323
|
+
|
|
1324
|
+
def set_jit_compile_status(self, status, phase):
|
|
1325
|
+
"""
|
|
1326
|
+
Set jit is compiling
|
|
1187
1327
|
|
|
1188
1328
|
Args:
|
|
1189
|
-
status(bool):
|
|
1329
|
+
status(bool): jit compile status
|
|
1190
1330
|
phase (str): The phase of cell/function instance.
|
|
1191
1331
|
Return:
|
|
1192
1332
|
None.
|
|
1193
1333
|
"""
|
|
1194
|
-
self._executor.
|
|
1334
|
+
self._executor.set_jit_compile_status(status, phase)
|
|
1195
1335
|
|
|
1196
|
-
def set_dynamic_input(self, obj):
|
|
1336
|
+
def set_dynamic_input(self, obj, *args):
|
|
1197
1337
|
"""
|
|
1198
1338
|
Set dynamic shape tensor of input arguments.
|
|
1199
1339
|
|
|
1200
1340
|
Args:
|
|
1201
1341
|
obj (Function/Cell): The function or cell instance.
|
|
1342
|
+
args (tuple): Function or cell dynamic input arguments.
|
|
1202
1343
|
|
|
1203
1344
|
Return:
|
|
1204
1345
|
None.
|
|
1205
1346
|
"""
|
|
1206
|
-
self._executor.set_dynamic_input(obj)
|
|
1347
|
+
self._executor.set_dynamic_input(obj, *args)
|
|
1348
|
+
|
|
1349
|
+
def get_dynamic_input(self, *actual_args):
|
|
1350
|
+
"""
|
|
1351
|
+
Get dynamic shape arguments according to actual input arguments.
|
|
1352
|
+
|
|
1353
|
+
Args:
|
|
1354
|
+
actual_args(tuple): Actual input arguments of Function or Cell.
|
|
1355
|
+
|
|
1356
|
+
Return:
|
|
1357
|
+
dynamic_shape_args(tuple): Dynamic shape arguments of Function or Cell.
|
|
1358
|
+
"""
|
|
1359
|
+
return self._executor.get_dynamic_input(*actual_args)
|
|
1207
1360
|
|
|
1208
1361
|
def is_first_cell(self):
|
|
1209
1362
|
"""
|
|
@@ -1227,7 +1380,6 @@ class _PyNativeExecutor:
|
|
|
1227
1380
|
"""
|
|
1228
1381
|
self._executor.set_hook_changed(cell)
|
|
1229
1382
|
|
|
1230
|
-
|
|
1231
1383
|
def get_top_cell(self):
|
|
1232
1384
|
"""
|
|
1233
1385
|
Get the top cell object.
|
|
@@ -1237,7 +1389,6 @@ class _PyNativeExecutor:
|
|
|
1237
1389
|
"""
|
|
1238
1390
|
return self._top_cell
|
|
1239
1391
|
|
|
1240
|
-
|
|
1241
1392
|
def constant_folding(self, *args):
|
|
1242
1393
|
"""
|
|
1243
1394
|
Get value by infer value.
|
|
@@ -1311,11 +1462,18 @@ class _CellGraphExecutor:
|
|
|
1311
1462
|
"""
|
|
1312
1463
|
self._graph_executor.set_queue_name(queue_name)
|
|
1313
1464
|
|
|
1314
|
-
def
|
|
1465
|
+
def get_queue_name(self, dataset_phase):
|
|
1466
|
+
"""
|
|
1467
|
+
Get cached queue name for the graph loaded from compile cache.
|
|
1468
|
+
:return: cached queue name
|
|
1469
|
+
"""
|
|
1470
|
+
return self._graph_executor.get_queue_name(dataset_phase)
|
|
1471
|
+
|
|
1472
|
+
@staticmethod
|
|
1473
|
+
def _set_dataset_mode(obj):
|
|
1315
1474
|
"""set dataset mode."""
|
|
1316
|
-
# decide whether to sink based on
|
|
1317
|
-
if
|
|
1318
|
-
(args_list is not None and args_list == ()):
|
|
1475
|
+
# decide whether to sink based on the sink_mode flag which is set in connect_network_with_dataset
|
|
1476
|
+
if 'sink_mode' in obj.get_flags().keys() and obj.get_flags()['sink_mode'] is True:
|
|
1319
1477
|
_set_dataset_mode_config('sink')
|
|
1320
1478
|
else:
|
|
1321
1479
|
_set_dataset_mode_config('normal')
|
|
@@ -1333,7 +1491,7 @@ class _CellGraphExecutor:
|
|
|
1333
1491
|
def _set_compile_cache_dep_files(self, phase):
|
|
1334
1492
|
# If enable compile cache, get the dependency files list
|
|
1335
1493
|
enable_compile_cache = context.get_context("enable_compile_cache")
|
|
1336
|
-
if enable_compile_cache is
|
|
1494
|
+
if enable_compile_cache is not True and enable_compile_cache != "1":
|
|
1337
1495
|
enable_compile_cache = os.getenv('MS_COMPILER_CACHE_ENABLE')
|
|
1338
1496
|
if "train" in phase and (enable_compile_cache is True or enable_compile_cache == "1"):
|
|
1339
1497
|
self._graph_executor.set_compile_cache_dep_files(_get_compile_cache_dep_files())
|
|
@@ -1346,7 +1504,7 @@ class _CellGraphExecutor:
|
|
|
1346
1504
|
obj (Function/Cell): The function or cell instance need compile.
|
|
1347
1505
|
phase (str): The name of compile phase. Default: 'predict'.
|
|
1348
1506
|
do_convert (bool): When set to True, convert ME graph to GE graph after compiling graph.
|
|
1349
|
-
jit_config_dict (dict): Jit config for compile. Default: None
|
|
1507
|
+
jit_config_dict (dict): Jit config for compile. Default: ``None``.
|
|
1350
1508
|
args (tuple): Args of the Cell object.
|
|
1351
1509
|
kwargs (dict): Kwargs of the Cell object.
|
|
1352
1510
|
|
|
@@ -1358,15 +1516,18 @@ class _CellGraphExecutor:
|
|
|
1358
1516
|
if not hasattr(obj, obj.__parse_method__):
|
|
1359
1517
|
raise AttributeError(
|
|
1360
1518
|
'The class {} dose not have method {}'.format(obj.__class__.__name__, obj.__parse_method__))
|
|
1519
|
+
key_id = str(id(obj)) + str(obj.create_time)
|
|
1520
|
+
args = get_auto_dynamic_shape_args(args, key_id)
|
|
1361
1521
|
|
|
1362
1522
|
self.enable_tuple_broaden = False
|
|
1363
1523
|
if hasattr(obj, "enable_tuple_broaden"):
|
|
1364
1524
|
self.enable_tuple_broaden = obj.enable_tuple_broaden
|
|
1365
|
-
|
|
1525
|
+
logger.debug("Convert the network.", do_convert)
|
|
1366
1526
|
self._graph_executor.set_enable_tuple_broaden(self.enable_tuple_broaden)
|
|
1367
1527
|
key = self._graph_executor.generate_arguments_key(obj, args, kwargs, self.enable_tuple_broaden)
|
|
1368
1528
|
obj.arguments_key = str(key)
|
|
1369
1529
|
phase = phase + '.' + str(obj.create_time) + '.' + str(id(obj)) + '.' + obj.arguments_key
|
|
1530
|
+
update_auto_dynamic_shape_phase(args, key_id, phase)
|
|
1370
1531
|
|
|
1371
1532
|
if phase in obj.compile_cache and self.has_compiled(phase):
|
|
1372
1533
|
logger.debug("%r graph has existed.", phase)
|
|
@@ -1374,13 +1535,15 @@ class _CellGraphExecutor:
|
|
|
1374
1535
|
|
|
1375
1536
|
obj.check_names()
|
|
1376
1537
|
_check_full_batch()
|
|
1377
|
-
self._set_dataset_mode(
|
|
1538
|
+
self._set_dataset_mode(obj)
|
|
1378
1539
|
self._set_compile_cache_dep_files(phase)
|
|
1379
1540
|
|
|
1380
|
-
enable_ge = context.get_context("enable_ge")
|
|
1381
1541
|
self._graph_executor.set_weights_values(obj.parameters_dict())
|
|
1382
1542
|
if jit_config_dict:
|
|
1383
1543
|
self._graph_executor.set_jit_config(jit_config_dict)
|
|
1544
|
+
else:
|
|
1545
|
+
jit_config_dict = JitConfig().jit_config_dict
|
|
1546
|
+
self._graph_executor.set_jit_config(jit_config_dict)
|
|
1384
1547
|
result = self._graph_executor.compile(obj, args, kwargs, phase, self._use_vm_mode())
|
|
1385
1548
|
obj.compile_cache.add(phase)
|
|
1386
1549
|
if not result:
|
|
@@ -1397,20 +1560,11 @@ class _CellGraphExecutor:
|
|
|
1397
1560
|
elif 'skip_auto_parallel_compile' not in obj.get_flags().keys():
|
|
1398
1561
|
obj.parameter_layout_dict = self._graph_executor.get_parameter_layout(phase)
|
|
1399
1562
|
obj.parallel_parameter_name_list = self._graph_executor.get_parallel_parameter_name_list(phase)
|
|
1400
|
-
if _get_pipeline_stages() > 1 and (not hasattr(obj, "is_first_iteration") or not obj.is_first_iteration):
|
|
1401
|
-
obj.remove_redundant_parameters()
|
|
1402
|
-
|
|
1403
|
-
if not do_convert:
|
|
1404
|
-
return phase, True
|
|
1405
1563
|
|
|
1406
|
-
|
|
1407
|
-
if enable_ge:
|
|
1408
|
-
pass
|
|
1409
|
-
elif "export" in phase:
|
|
1564
|
+
if "export.air" in phase:
|
|
1410
1565
|
self._build_data_graph(obj, phase)
|
|
1411
1566
|
elif BROADCAST_PHASE not in phase and _get_parameter_broadcast():
|
|
1412
1567
|
_parameter_broadcast(obj)
|
|
1413
|
-
|
|
1414
1568
|
return phase, True
|
|
1415
1569
|
|
|
1416
1570
|
def _update_param_node_default_input(self, phase, replace):
|
|
@@ -1489,6 +1643,16 @@ class _CellGraphExecutor:
|
|
|
1489
1643
|
branch_control_input = _generate_branch_control_input(obf_random_seed)
|
|
1490
1644
|
return branch_control_input
|
|
1491
1645
|
|
|
1646
|
+
def _get_func_graph(self, obj, exec_id, use_prefix=False):
|
|
1647
|
+
"""Get func graph from pipeline."""
|
|
1648
|
+
if use_prefix:
|
|
1649
|
+
exec_id = exec_id + '.' + obj.arguments_key
|
|
1650
|
+
if self._graph_executor.has_compiled(exec_id) is False:
|
|
1651
|
+
return None
|
|
1652
|
+
if self.obfuscate_config is not None:
|
|
1653
|
+
raise ValueError('For get func graph, obfuscate_config is currently not supported now.')
|
|
1654
|
+
return self._graph_executor.get_func_graph(exec_id)
|
|
1655
|
+
|
|
1492
1656
|
def _get_func_graph_proto(self, obj, exec_id, ir_type="onnx_ir", use_prefix=False, incremental=False):
|
|
1493
1657
|
"""Get graph proto from pipeline."""
|
|
1494
1658
|
if use_prefix:
|
|
@@ -1530,6 +1694,10 @@ def ms_memory_recycle():
|
|
|
1530
1694
|
When train multi Neural network models in one process, memory used by MindSpore is very large,
|
|
1531
1695
|
this is because MindSpore cached runtime memory for every model.
|
|
1532
1696
|
To recycle these cached memory, users can call this function after training of one model.
|
|
1697
|
+
|
|
1698
|
+
Examples:
|
|
1699
|
+
>>> import mindspore as ms
|
|
1700
|
+
>>> ms.ms_memory_recycle()
|
|
1533
1701
|
"""
|
|
1534
1702
|
if ms_compile_cache:
|
|
1535
1703
|
_cell_graph_executor.del_net_res(None, ms_compile_cache)
|