mindspore 2.3.0rc1__cp38-none-any.whl → 2.3.0rc2__cp38-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +1 -1
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +13 -3
- mindspore/_c_dataengine.cpython-38-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-38-aarch64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +20 -0
- mindspore/_extends/parse/parser.py +1 -1
- mindspore/_extends/parse/standard_method.py +6 -5
- mindspore/_mindspore_offline_debug.cpython-38-aarch64-linux-gnu.so +0 -0
- mindspore/amp.py +5 -5
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +4 -2
- mindspore/common/_register_for_recompute.py +48 -0
- mindspore/common/_stub_tensor.py +1 -0
- mindspore/common/api.py +56 -4
- mindspore/common/dtype.py +5 -3
- mindspore/common/dump.py +2 -2
- mindspore/common/hook_handle.py +51 -4
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +17 -6
- mindspore/common/parameter.py +7 -2
- mindspore/common/recompute.py +247 -0
- mindspore/common/sparse_tensor.py +2 -2
- mindspore/common/symbol.py +1 -1
- mindspore/common/tensor.py +74 -36
- mindspore/communication/__init__.py +3 -3
- mindspore/communication/management.py +30 -30
- mindspore/context.py +28 -15
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +2 -2
- mindspore/dataset/audio/transforms.py +51 -51
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +3 -3
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +3 -3
- mindspore/dataset/engine/datasets_text.py +38 -38
- mindspore/dataset/engine/datasets_user_defined.py +3 -3
- mindspore/dataset/engine/datasets_vision.py +68 -68
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +26 -26
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/vision/__init__.py +3 -3
- mindspore/dataset/vision/transforms.py +92 -92
- mindspore/dataset/vision/utils.py +1 -1
- mindspore/experimental/optim/adadelta.py +2 -2
- mindspore/experimental/optim/adagrad.py +2 -2
- mindspore/experimental/optim/adam.py +2 -2
- mindspore/experimental/optim/adamax.py +2 -2
- mindspore/experimental/optim/adamw.py +2 -2
- mindspore/experimental/optim/asgd.py +2 -2
- mindspore/experimental/optim/lr_scheduler.py +24 -20
- mindspore/experimental/optim/nadam.py +2 -2
- mindspore/experimental/optim/optimizer.py +1 -1
- mindspore/experimental/optim/radam.py +2 -2
- mindspore/experimental/optim/rmsprop.py +2 -2
- mindspore/experimental/optim/rprop.py +2 -2
- mindspore/experimental/optim/sgd.py +2 -2
- mindspore/hal/stream.py +2 -0
- mindspore/include/mindapi/base/types.h +5 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6 -6
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/liblowlatency_collective.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/DeviceBin +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/PkgInspect +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/bin/op_man +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/device/ascend910b/bin/ascend910b.bin +101787 -98559
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_cann_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/host/libasdops_host.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/base/op_register.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/params/mix.h +8 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/params/norm.h +5 -3
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/params/reduce.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/backend/backend.h +3 -3
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/backend/rtbackend.h +3 -3
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/base/types.h +0 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/rt/module/module.h +3 -3
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/include/asdops/utils/svector/svector.h +3 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/asdops/lib/libasdops_static.a +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/add/tiling/add_tiling.h +9 -9
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/apply_rotary_pos_emb_impl.h +2 -6
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_base.h +460 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_bf16.h +217 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_fp16.h +116 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_tiling.h +16 -24
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/apply_rotary_pos_emb/kernel/apply_rotary_pos_emb_value.h +27 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/asdop/asd_op_impl.h +0 -4
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/FlashAttentionScore_impl.h → flash_attention_score/flash_attention_score_impl.h} +2 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/bs_attention_tiling.h → flash_attention_score/flash_attention_score_tiling.h} +15 -19
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/gelu/tiling/gelu_tiling.h +7 -9
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/lccl/lccl_wrapper.h +58 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul/matmul_impl.h +19 -8
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{matmul → matmul_common}/pp_matmul_common_tiling.h +18 -8
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{matmul → matmul_common}/pp_matmul_info.h +7 -4
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{matmul → matmul_common}/tiling_data.h +44 -6
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_common/tiling_utils.h +65 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/matmul_stridedslice_fusion_impl.h +10 -6
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/op_param.h +4 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/kernel/paged_attention_mix_hwsync.h +41 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/PagedAttention_impl.h → paged_attention/paged_attention_impl.h} +1 -1
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/paged_attention/paged_attention_tiling.h +63 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/add_param.h +2 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention_param.h → param/attention_param.h} +11 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/matmul_ext_param.h +37 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/param/sub_param.h +45 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/reshape_and_cache/reshape_and_cache_tiling.h +1 -2
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm.h +23 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm_base.h +175 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm_normal.h +276 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/rms_norm_split_d.h +280 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/kernel/tiling_data.h +35 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/rms_norm/rms_norm_impl.h +45 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/kernel/sub_kernel.h +20 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_impl.h +47 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/sub/sub_tiling.h +25 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/tune_repo/matmul_table.h +323 -23
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/types.h +15 -4
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/utils/log/log_tiling.h +8 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libAdd_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libSub_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_layernorm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libadd_rms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libapply_rotary_pos_emb_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libcast_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libgelu_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libmatmul_stridedslice_fusion_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libms_kernels_internal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libnot_equal_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/libreshape_and_cache_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/lib/librms_norm_impl.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_bf16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bnsd_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/flash_attention_score_fp16_bsh_tri_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_bf16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bnsd_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/paged_attention_fp16_bsh_full_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcal.h +22 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcal_comm.h +70 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcal_types.h +103 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lccl.h +47 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lccl_wrapper.h +58 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/include/lcoc.h +154 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblcal.so +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/lccl/lib/liblccl_wrapper.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/log.py +2 -2
- mindspore/mint/__init__.py +457 -0
- mindspore/mint/nn/__init__.py +430 -0
- mindspore/mint/nn/functional.py +424 -0
- mindspore/mint/optim/__init__.py +24 -0
- mindspore/mint/optim/adamw.py +186 -0
- mindspore/multiprocessing/__init__.py +4 -0
- mindspore/nn/__init__.py +3 -0
- mindspore/nn/cell.py +51 -47
- mindspore/nn/extend/__init__.py +29 -0
- mindspore/nn/extend/basic.py +140 -0
- mindspore/nn/extend/embedding.py +143 -0
- mindspore/nn/extend/layer/__init__.py +27 -0
- mindspore/nn/extend/layer/normalization.py +107 -0
- mindspore/nn/extend/pooling.py +117 -0
- mindspore/nn/generator.py +297 -0
- mindspore/nn/layer/basic.py +109 -1
- mindspore/nn/layer/container.py +2 -2
- mindspore/nn/layer/conv.py +6 -6
- mindspore/nn/layer/embedding.py +1 -1
- mindspore/nn/layer/normalization.py +21 -43
- mindspore/nn/layer/padding.py +4 -0
- mindspore/nn/optim/ada_grad.py +2 -2
- mindspore/nn/optim/adadelta.py +1 -1
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +7 -7
- mindspore/nn/optim/adamax.py +2 -2
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +2 -2
- mindspore/nn/optim/ftrl.py +1 -1
- mindspore/nn/optim/lamb.py +3 -3
- mindspore/nn/optim/lars.py +1 -1
- mindspore/nn/optim/lazyadam.py +2 -2
- mindspore/nn/optim/momentum.py +2 -2
- mindspore/nn/optim/optimizer.py +2 -2
- mindspore/nn/optim/proximal_ada_grad.py +2 -2
- mindspore/nn/optim/rmsprop.py +2 -2
- mindspore/nn/optim/rprop.py +2 -2
- mindspore/nn/optim/sgd.py +2 -2
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +9 -9
- mindspore/nn/wrap/grad_reducer.py +5 -5
- mindspore/ops/_grad_experimental/grad_comm_ops.py +4 -2
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -2
- mindspore/ops/_vmap/vmap_math_ops.py +27 -8
- mindspore/ops/_vmap/vmap_nn_ops.py +66 -8
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +73 -1
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +12 -3
- mindspore/ops/auto_generate/gen_arg_handler.py +24 -0
- mindspore/ops/auto_generate/gen_extend_func.py +274 -0
- mindspore/ops/auto_generate/gen_ops_def.py +889 -22
- mindspore/ops/auto_generate/gen_ops_prim.py +3541 -253
- mindspore/ops/auto_generate/pyboost_inner_prim.py +282 -0
- mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -1
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +9 -0
- mindspore/ops/extend/__init__.py +9 -1
- mindspore/ops/extend/array_func.py +134 -27
- mindspore/ops/extend/math_func.py +3 -3
- mindspore/ops/extend/nn_func.py +363 -2
- mindspore/ops/function/__init__.py +19 -2
- mindspore/ops/function/array_func.py +463 -439
- mindspore/ops/function/clip_func.py +7 -18
- mindspore/ops/function/grad/grad_func.py +5 -5
- mindspore/ops/function/linalg_func.py +4 -4
- mindspore/ops/function/math_func.py +260 -243
- mindspore/ops/function/nn_func.py +825 -62
- mindspore/ops/function/random_func.py +73 -4
- mindspore/ops/function/sparse_unary_func.py +1 -1
- mindspore/ops/function/vmap_func.py +1 -1
- mindspore/ops/functional.py +2 -2
- mindspore/ops/op_info_register.py +1 -31
- mindspore/ops/operations/__init__.py +2 -3
- mindspore/ops/operations/_grad_ops.py +2 -107
- mindspore/ops/operations/_inner_ops.py +5 -5
- mindspore/ops/operations/_sequence_ops.py +2 -2
- mindspore/ops/operations/array_ops.py +11 -233
- mindspore/ops/operations/comm_ops.py +32 -32
- mindspore/ops/operations/custom_ops.py +7 -89
- mindspore/ops/operations/manually_defined/ops_def.py +329 -4
- mindspore/ops/operations/math_ops.py +13 -163
- mindspore/ops/operations/nn_ops.py +9 -316
- mindspore/ops/operations/random_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +3 -3
- mindspore/ops/primitive.py +2 -2
- mindspore/ops_generate/arg_dtype_cast.py +12 -3
- mindspore/ops_generate/arg_handler.py +24 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +2 -0
- mindspore/ops_generate/gen_pyboost_func.py +13 -6
- mindspore/ops_generate/pyboost_utils.py +2 -17
- mindspore/parallel/__init__.py +3 -2
- mindspore/parallel/_auto_parallel_context.py +106 -1
- mindspore/parallel/_parallel_serialization.py +34 -2
- mindspore/parallel/_utils.py +16 -0
- mindspore/parallel/algo_parameter_config.py +4 -4
- mindspore/parallel/checkpoint_transform.py +249 -77
- mindspore/parallel/cluster/process_entity/_api.py +1 -1
- mindspore/parallel/parameter_broadcast.py +1 -1
- mindspore/parallel/shard.py +1 -1
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +1 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +17 -5
- mindspore/profiler/parser/ascend_msprof_exporter.py +3 -3
- mindspore/profiler/parser/ascend_msprof_generator.py +10 -3
- mindspore/profiler/parser/ascend_op_generator.py +26 -9
- mindspore/profiler/parser/ascend_timeline_generator.py +7 -4
- mindspore/profiler/parser/profiler_info.py +11 -1
- mindspore/profiler/profiling.py +13 -5
- mindspore/rewrite/api/node.py +12 -12
- mindspore/rewrite/api/symbol_tree.py +11 -11
- mindspore/run_check/_check_version.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +2 -2
- mindspore/train/amp.py +4 -4
- mindspore/train/anf_ir_pb2.py +8 -2
- mindspore/train/callback/_backup_and_restore.py +2 -2
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +2 -2
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_landscape.py +4 -4
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +2 -2
- mindspore/train/callback/_time_monitor.py +2 -2
- mindspore/train/dataset_helper.py +8 -3
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/mind_ir_pb2.py +22 -17
- mindspore/train/model.py +15 -15
- mindspore/train/serialization.py +18 -18
- mindspore/train/summary/summary_record.py +7 -7
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/version.py +1 -1
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +1 -1
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +309 -262
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/matmul_stridedslice/tiling_data.h +0 -59
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_bf16_BNSD_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_bf16_BSH_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_fp16_BNSD_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/FlashAttentionScore_fp16_BSH_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_bf16_BNSD_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_bf16_BSH_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_fp16_BNSD_mix.o +0 -0
- mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/op_kernels/ascend910b/BSAttention/PagedAttention_fp16_BSH_mix.o +0 -0
- /mindspore/lib/plugin/ascend/ms_kernels_internal/internal_kernel/include/{attention/bs_attention_mix_hwsync.h → flash_attention_score/kernel/flash_attention_score_mix_hwsync.h} +0 -0
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -17,10 +17,36 @@ from mindspore.common._stub_tensor import _convert_stub
|
|
|
17
17
|
from mindspore.ops.auto_generate.gen_arg_handler import *
|
|
18
18
|
from mindspore._c_expression import ArgMaxWithValuePrim_
|
|
19
19
|
from mindspore._c_expression import ArgMinWithValuePrim_
|
|
20
|
+
from mindspore._c_expression import BatchMatMulPrim_
|
|
21
|
+
from mindspore._c_expression import BatchNormGradExtPrim_
|
|
20
22
|
from mindspore._c_expression import BroadcastToPrim_
|
|
21
23
|
from mindspore._c_expression import ConcatPrim_
|
|
24
|
+
from mindspore._c_expression import ConvolutionGradPrim_
|
|
25
|
+
from mindspore._c_expression import ConvolutionPrim_
|
|
26
|
+
from mindspore._c_expression import FFNExtPrim_
|
|
27
|
+
from mindspore._c_expression import FlashAttentionScoreGradPrim_
|
|
28
|
+
from mindspore._c_expression import FlashAttentionScorePrim_
|
|
29
|
+
from mindspore._c_expression import GridSampler2DGradPrim_
|
|
30
|
+
from mindspore._c_expression import GridSampler2DPrim_
|
|
31
|
+
from mindspore._c_expression import GridSampler3DGradPrim_
|
|
32
|
+
from mindspore._c_expression import GridSampler3DPrim_
|
|
33
|
+
from mindspore._c_expression import MatMulPrim_
|
|
34
|
+
from mindspore._c_expression import MaxPoolGradWithIndicesPrim_
|
|
35
|
+
from mindspore._c_expression import MaxPoolGradWithMaskPrim_
|
|
36
|
+
from mindspore._c_expression import MaxPoolWithIndicesPrim_
|
|
37
|
+
from mindspore._c_expression import MaxPoolWithMaskPrim_
|
|
38
|
+
from mindspore._c_expression import OneHotExtPrim_
|
|
39
|
+
from mindspore._c_expression import QuantBatchMatmulPrim_
|
|
40
|
+
from mindspore._c_expression import ReduceAllPrim_
|
|
22
41
|
from mindspore._c_expression import ReduceAnyPrim_
|
|
42
|
+
from mindspore._c_expression import ReverseV2Prim_
|
|
23
43
|
from mindspore._c_expression import SoftmaxPrim_
|
|
44
|
+
from mindspore._c_expression import StackExtPrim_
|
|
45
|
+
from mindspore._c_expression import TrilPrim_
|
|
46
|
+
from mindspore._c_expression import TriuPrim_
|
|
47
|
+
from mindspore._c_expression import UpsampleTrilinear3DGradPrim_
|
|
48
|
+
from mindspore._c_expression import UpsampleTrilinear3DPrim_
|
|
49
|
+
from mindspore._c_expression import WeightQuantBatchMatmulPrim_
|
|
24
50
|
|
|
25
51
|
|
|
26
52
|
class _PyboostArgMaxWithValuePrim(ArgMaxWithValuePrim_):
|
|
@@ -41,6 +67,24 @@ class _PyboostArgMinWithValuePrim(ArgMinWithValuePrim_):
|
|
|
41
67
|
argmin_with_value_impl = _PyboostArgMinWithValuePrim()
|
|
42
68
|
|
|
43
69
|
|
|
70
|
+
class _PyboostBatchMatMulPrim(BatchMatMulPrim_):
|
|
71
|
+
def __call__(self, x, y, transpose_a, transpose_b):
|
|
72
|
+
|
|
73
|
+
return _convert_stub(super().__call__(x, y, transpose_a, transpose_b))
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
batch_mat_mul_impl = _PyboostBatchMatMulPrim()
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class _PyboostBatchNormGradExtPrim(BatchNormGradExtPrim_):
|
|
80
|
+
def __call__(self, dout, input, weight, running_mean, running_var, saved_mean, saved_rstd, training, eps):
|
|
81
|
+
|
|
82
|
+
return _convert_stub(super().__call__(dout, input, weight, running_mean, running_var, saved_mean, saved_rstd, training, eps))
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
batch_norm_grad_ext_impl = _PyboostBatchNormGradExtPrim()
|
|
86
|
+
|
|
87
|
+
|
|
44
88
|
class _PyboostBroadcastToPrim(BroadcastToPrim_):
|
|
45
89
|
def __call__(self, input, shape):
|
|
46
90
|
|
|
@@ -59,6 +103,181 @@ class _PyboostConcatPrim(ConcatPrim_):
|
|
|
59
103
|
concat_impl = _PyboostConcatPrim()
|
|
60
104
|
|
|
61
105
|
|
|
106
|
+
class _PyboostConvolutionGradPrim(ConvolutionGradPrim_):
|
|
107
|
+
def __call__(self, dout, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, output_mask):
|
|
108
|
+
converted_stride = to_strides(stride)
|
|
109
|
+
converted_padding = to_2d_paddings(padding)
|
|
110
|
+
converted_dilation = to_dilations(dilation)
|
|
111
|
+
converted_output_padding = to_output_padding(output_padding)
|
|
112
|
+
return _convert_stub(super().__call__(dout, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, output_mask))
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
convolution_grad_impl = _PyboostConvolutionGradPrim()
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
class _PyboostConvolutionPrim(ConvolutionPrim_):
|
|
119
|
+
def __call__(self, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups):
|
|
120
|
+
converted_stride = to_strides(stride)
|
|
121
|
+
converted_padding = to_2d_paddings(padding)
|
|
122
|
+
converted_dilation = to_dilations(dilation)
|
|
123
|
+
converted_output_padding = to_output_padding(output_padding)
|
|
124
|
+
return _convert_stub(super().__call__(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups))
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
convolution_impl = _PyboostConvolutionPrim()
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
class _PyboostFFNExtPrim(FFNExtPrim_):
|
|
131
|
+
def __call__(self, x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2, activation, inner_precise):
|
|
132
|
+
converted_activation = str_to_enum(activation)
|
|
133
|
+
return _convert_stub(super().__call__(x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2, activation, inner_precise))
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
ffn_ext_impl = _PyboostFFNExtPrim()
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class _PyboostFlashAttentionScoreGradPrim(FlashAttentionScoreGradPrim_):
|
|
140
|
+
def __call__(self, query, key, value, dy, pse_shift, drop_mask, padding_mask, atten_mask, softmax_max, softmax_sum, softmax_in, attention_in, prefix, actual_seq_qlen, actual_seq_kvlen, head_num, keep_prob, scale_value, pre_tokens, next_tokens, inner_precise, input_layout, sparse_mode):
|
|
141
|
+
converted_input_layout = str_to_enum(input_layout)
|
|
142
|
+
return _convert_stub(super().__call__(query, key, value, dy, pse_shift, drop_mask, padding_mask, atten_mask, softmax_max, softmax_sum, softmax_in, attention_in, prefix, actual_seq_qlen, actual_seq_kvlen, head_num, keep_prob, scale_value, pre_tokens, next_tokens, inner_precise, input_layout, sparse_mode))
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
flash_attention_score_grad_impl = _PyboostFlashAttentionScoreGradPrim()
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
class _PyboostFlashAttentionScorePrim(FlashAttentionScorePrim_):
|
|
149
|
+
def __call__(self, query, key, value, real_shift, drop_mask, padding_mask, attn_mask, prefix, actual_seq_qlen, actual_seq_kvlen, head_num, keep_prob, scale_value, pre_tokens, next_tokens, inner_precise, input_layout, sparse_mode):
|
|
150
|
+
converted_input_layout = str_to_enum(input_layout)
|
|
151
|
+
return _convert_stub(super().__call__(query, key, value, real_shift, drop_mask, padding_mask, attn_mask, prefix, actual_seq_qlen, actual_seq_kvlen, head_num, keep_prob, scale_value, pre_tokens, next_tokens, inner_precise, input_layout, sparse_mode))
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
flash_attention_score_impl = _PyboostFlashAttentionScorePrim()
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
class _PyboostGridSampler2DGradPrim(GridSampler2DGradPrim_):
|
|
158
|
+
def __call__(self, grad, input_x, grid, interpolation_mode, padding_mode, align_corners):
|
|
159
|
+
converted_interpolation_mode = str_to_enum(interpolation_mode)
|
|
160
|
+
converted_padding_mode = str_to_enum(padding_mode)
|
|
161
|
+
return _convert_stub(super().__call__(grad, input_x, grid, interpolation_mode, padding_mode, align_corners))
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
grid_sampler_2d_grad_impl = _PyboostGridSampler2DGradPrim()
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
class _PyboostGridSampler2DPrim(GridSampler2DPrim_):
|
|
168
|
+
def __call__(self, input_x, grid, interpolation_mode, padding_mode, align_corners):
|
|
169
|
+
converted_interpolation_mode = str_to_enum(interpolation_mode)
|
|
170
|
+
converted_padding_mode = str_to_enum(padding_mode)
|
|
171
|
+
return _convert_stub(super().__call__(input_x, grid, interpolation_mode, padding_mode, align_corners))
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
grid_sampler_2d_impl = _PyboostGridSampler2DPrim()
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
class _PyboostGridSampler3DGradPrim(GridSampler3DGradPrim_):
|
|
178
|
+
def __call__(self, grad, input_x, grid, interpolation_mode, padding_mode, align_corners):
|
|
179
|
+
converted_interpolation_mode = str_to_enum(interpolation_mode)
|
|
180
|
+
converted_padding_mode = str_to_enum(padding_mode)
|
|
181
|
+
return _convert_stub(super().__call__(grad, input_x, grid, interpolation_mode, padding_mode, align_corners))
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
grid_sampler_3d_grad_impl = _PyboostGridSampler3DGradPrim()
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
class _PyboostGridSampler3DPrim(GridSampler3DPrim_):
|
|
188
|
+
def __call__(self, input_x, grid, interpolation_mode, padding_mode, align_corners):
|
|
189
|
+
converted_interpolation_mode = str_to_enum(interpolation_mode)
|
|
190
|
+
converted_padding_mode = str_to_enum(padding_mode)
|
|
191
|
+
return _convert_stub(super().__call__(input_x, grid, interpolation_mode, padding_mode, align_corners))
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
grid_sampler_3d_impl = _PyboostGridSampler3DPrim()
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
class _PyboostMatMulPrim(MatMulPrim_):
|
|
198
|
+
def __call__(self, input, mat2, transpose_a, transpose_b):
|
|
199
|
+
|
|
200
|
+
return _convert_stub(super().__call__(input, mat2, transpose_a, transpose_b))
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
matmul_impl = _PyboostMatMulPrim()
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
class _PyboostMaxPoolGradWithIndicesPrim(MaxPoolGradWithIndicesPrim_):
|
|
207
|
+
def __call__(self, x, grad, argmax, kernel_size, strides, pads, dilation, ceil_mode, argmax_type):
|
|
208
|
+
converted_kernel_size = to_kernel_size(kernel_size)
|
|
209
|
+
converted_strides = to_strides(strides)
|
|
210
|
+
converted_pads = to_output_padding(pads)
|
|
211
|
+
converted_dilation = to_dilations(dilation)
|
|
212
|
+
return _convert_stub(super().__call__(x, grad, argmax, kernel_size, strides, pads, dilation, ceil_mode, argmax_type))
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
max_pool_grad_with_indices_impl = _PyboostMaxPoolGradWithIndicesPrim()
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
class _PyboostMaxPoolGradWithMaskPrim(MaxPoolGradWithMaskPrim_):
|
|
219
|
+
def __call__(self, x, grad, mask, kernel_size, strides, pads, dilation, ceil_mode, argmax_type):
|
|
220
|
+
converted_kernel_size = to_kernel_size(kernel_size)
|
|
221
|
+
converted_strides = to_strides(strides)
|
|
222
|
+
converted_pads = to_output_padding(pads)
|
|
223
|
+
converted_dilation = to_dilations(dilation)
|
|
224
|
+
return _convert_stub(super().__call__(x, grad, mask, kernel_size, strides, pads, dilation, ceil_mode, argmax_type))
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
max_pool_grad_with_mask_impl = _PyboostMaxPoolGradWithMaskPrim()
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
class _PyboostMaxPoolWithIndicesPrim(MaxPoolWithIndicesPrim_):
|
|
231
|
+
def __call__(self, x, kernel_size, strides, pads, dilation, ceil_mode, argmax_type):
|
|
232
|
+
converted_kernel_size = to_kernel_size(kernel_size)
|
|
233
|
+
converted_strides = to_strides(strides)
|
|
234
|
+
converted_pads = to_output_padding(pads)
|
|
235
|
+
converted_dilation = to_dilations(dilation)
|
|
236
|
+
return _convert_stub(super().__call__(x, kernel_size, strides, pads, dilation, ceil_mode, argmax_type))
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
max_pool_with_indices_impl = _PyboostMaxPoolWithIndicesPrim()
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
class _PyboostMaxPoolWithMaskPrim(MaxPoolWithMaskPrim_):
|
|
243
|
+
def __call__(self, x, kernel_size, strides, pads, dilation, ceil_mode, argmax_type):
|
|
244
|
+
converted_kernel_size = to_kernel_size(kernel_size)
|
|
245
|
+
converted_strides = to_strides(strides)
|
|
246
|
+
converted_pads = to_output_padding(pads)
|
|
247
|
+
converted_dilation = to_dilations(dilation)
|
|
248
|
+
return _convert_stub(super().__call__(x, kernel_size, strides, pads, dilation, ceil_mode, argmax_type))
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
max_pool_with_mask_impl = _PyboostMaxPoolWithMaskPrim()
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
class _PyboostOneHotExtPrim(OneHotExtPrim_):
|
|
255
|
+
def __call__(self, tensor, num_classes, on_value, off_value, axis):
|
|
256
|
+
|
|
257
|
+
return _convert_stub(super().__call__(tensor, num_classes, on_value, off_value, axis))
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
one_hot_ext_impl = _PyboostOneHotExtPrim()
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
class _PyboostQuantBatchMatmulPrim(QuantBatchMatmulPrim_):
|
|
264
|
+
def __call__(self, x1, x2, scale, offset, bias, transpose_x1, transpose_x2, dtype):
|
|
265
|
+
|
|
266
|
+
return _convert_stub(super().__call__(x1, x2, scale, offset, bias, transpose_x1, transpose_x2, dtype))
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
quant_batch_matmul_impl = _PyboostQuantBatchMatmulPrim()
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
class _PyboostReduceAllPrim(ReduceAllPrim_):
|
|
273
|
+
def __call__(self, input, axis, keep_dims):
|
|
274
|
+
|
|
275
|
+
return _convert_stub(super().__call__(input, axis, keep_dims))
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
reduce_all_impl = _PyboostReduceAllPrim()
|
|
279
|
+
|
|
280
|
+
|
|
62
281
|
class _PyboostReduceAnyPrim(ReduceAnyPrim_):
|
|
63
282
|
def __call__(self, x, axis, keep_dims):
|
|
64
283
|
|
|
@@ -68,6 +287,15 @@ class _PyboostReduceAnyPrim(ReduceAnyPrim_):
|
|
|
68
287
|
reduce_any_impl = _PyboostReduceAnyPrim()
|
|
69
288
|
|
|
70
289
|
|
|
290
|
+
class _PyboostReverseV2Prim(ReverseV2Prim_):
|
|
291
|
+
def __call__(self, input, axis):
|
|
292
|
+
|
|
293
|
+
return _convert_stub(super().__call__(input, axis))
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
reverse_v2_impl = _PyboostReverseV2Prim()
|
|
297
|
+
|
|
298
|
+
|
|
71
299
|
class _PyboostSoftmaxPrim(SoftmaxPrim_):
|
|
72
300
|
def __call__(self, input, axis):
|
|
73
301
|
|
|
@@ -75,3 +303,57 @@ class _PyboostSoftmaxPrim(SoftmaxPrim_):
|
|
|
75
303
|
|
|
76
304
|
|
|
77
305
|
softmax_impl = _PyboostSoftmaxPrim()
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
class _PyboostStackExtPrim(StackExtPrim_):
|
|
309
|
+
def __call__(self, tensors, dim):
|
|
310
|
+
|
|
311
|
+
return _convert_stub(super().__call__(tensors, dim))
|
|
312
|
+
|
|
313
|
+
|
|
314
|
+
stack_ext_impl = _PyboostStackExtPrim()
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
class _PyboostTrilPrim(TrilPrim_):
|
|
318
|
+
def __call__(self, input, diagonal):
|
|
319
|
+
|
|
320
|
+
return _convert_stub(super().__call__(input, diagonal))
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
tril_impl = _PyboostTrilPrim()
|
|
324
|
+
|
|
325
|
+
|
|
326
|
+
class _PyboostTriuPrim(TriuPrim_):
|
|
327
|
+
def __call__(self, input, diagonal):
|
|
328
|
+
|
|
329
|
+
return _convert_stub(super().__call__(input, diagonal))
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
triu_impl = _PyboostTriuPrim()
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
class _PyboostUpsampleTrilinear3DGradPrim(UpsampleTrilinear3DGradPrim_):
|
|
336
|
+
def __call__(self, dy, input_size, output_size, scales, align_corners):
|
|
337
|
+
|
|
338
|
+
return _convert_stub(super().__call__(dy, input_size, output_size, scales, align_corners))
|
|
339
|
+
|
|
340
|
+
|
|
341
|
+
upsample_trilinear3d_grad_impl = _PyboostUpsampleTrilinear3DGradPrim()
|
|
342
|
+
|
|
343
|
+
|
|
344
|
+
class _PyboostUpsampleTrilinear3DPrim(UpsampleTrilinear3DPrim_):
|
|
345
|
+
def __call__(self, x, output_size, scales, align_corners):
|
|
346
|
+
|
|
347
|
+
return _convert_stub(super().__call__(x, output_size, scales, align_corners))
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
upsample_trilinear3d_impl = _PyboostUpsampleTrilinear3DPrim()
|
|
351
|
+
|
|
352
|
+
|
|
353
|
+
class _PyboostWeightQuantBatchMatmulPrim(WeightQuantBatchMatmulPrim_):
|
|
354
|
+
def __call__(self, x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias, transpose_x, transpose_weight, antiquant_group_size):
|
|
355
|
+
|
|
356
|
+
return _convert_stub(super().__call__(x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias, transpose_x, transpose_weight, antiquant_group_size))
|
|
357
|
+
|
|
358
|
+
|
|
359
|
+
weight_quant_batch_matmul_impl = _PyboostWeightQuantBatchMatmulPrim()
|
|
@@ -137,7 +137,8 @@ def data_update_by_ops(transfer_type, arg, data, new_index, origin_data, value=N
|
|
|
137
137
|
elif transfer_type == ValueTransferType.kGatherND:
|
|
138
138
|
if isinstance(new_index, list):
|
|
139
139
|
new_index = handle_multi_dim_index_tensor(new_index, arg)
|
|
140
|
-
|
|
140
|
+
new_index = format_index_tensor(new_index, (None, F.shape(data)[:F.shape(new_index)[-1]]))
|
|
141
|
+
data = F.gather_nd(data, new_index)
|
|
141
142
|
elif transfer_type == ValueTransferType.kTensorScatterUpdate:
|
|
142
143
|
if isinstance(new_index, list):
|
|
143
144
|
new_index = handle_multi_dim_index_tensor(new_index, arg)
|
|
@@ -459,13 +459,22 @@ def tuple_index_type_cnt(types, op_name):
|
|
|
459
459
|
def check_value_elements(types):
|
|
460
460
|
"""Judges the type of all elements of the tuple."""
|
|
461
461
|
tensor_number = 0
|
|
462
|
+
last_type = None
|
|
463
|
+
mix_but_no_tensor = False
|
|
462
464
|
for ele in types:
|
|
463
465
|
if isinstance(ele, mstype.TensorType):
|
|
464
466
|
tensor_number += 1
|
|
465
467
|
elif isinstance(ele, (list, tuple)):
|
|
466
468
|
return MIXED
|
|
467
469
|
|
|
470
|
+
if last_type is None:
|
|
471
|
+
last_type = type(ele)
|
|
472
|
+
elif not isinstance(ele, last_type):
|
|
473
|
+
mix_but_no_tensor = True
|
|
474
|
+
|
|
468
475
|
if tensor_number == 0:
|
|
476
|
+
if mix_but_no_tensor:
|
|
477
|
+
return MIXED
|
|
469
478
|
return NO_TENSOR
|
|
470
479
|
if tensor_number == len(types):
|
|
471
480
|
return ALL_TENSOR
|
mindspore/ops/extend/__init__.py
CHANGED
|
@@ -33,13 +33,21 @@ from . import (
|
|
|
33
33
|
nn_func,
|
|
34
34
|
)
|
|
35
35
|
|
|
36
|
-
from .array_func import gather, max, min
|
|
36
|
+
from .array_func import gather, max, min, one_hot, narrow
|
|
37
37
|
from .math_func import (
|
|
38
38
|
baddbmm,
|
|
39
|
+
bmm,
|
|
39
40
|
add,
|
|
40
41
|
sub
|
|
41
42
|
)
|
|
42
43
|
|
|
44
|
+
from .nn_func import (
|
|
45
|
+
conv2d,
|
|
46
|
+
max_pool2d,
|
|
47
|
+
leaky_relu_ext,
|
|
48
|
+
batch_norm
|
|
49
|
+
)
|
|
50
|
+
|
|
43
51
|
__all__ = []
|
|
44
52
|
__all__.extend(array_func.__all__)
|
|
45
53
|
__all__.extend(math_func.__all__)
|
|
@@ -18,12 +18,54 @@
|
|
|
18
18
|
Array Operators
|
|
19
19
|
|
|
20
20
|
"""
|
|
21
|
-
|
|
21
|
+
from mindspore.common import Tensor
|
|
22
22
|
from mindspore.ops.operations.array_ops import ArgMaxWithValue, ArgMinWithValue
|
|
23
23
|
from mindspore.ops._primitive_cache import _get_cache_prim
|
|
24
|
-
from mindspore.ops.auto_generate.gen_ops_prim import gather_d_op
|
|
24
|
+
from mindspore.ops.auto_generate.gen_ops_prim import gather_d_op, slice_ext_op, OneHotExt
|
|
25
|
+
from mindspore.ops.auto_generate.gen_ops_def import max_, min_
|
|
26
|
+
from mindspore import _checkparam as validator
|
|
27
|
+
|
|
25
28
|
|
|
26
29
|
# define Primitive global variables
|
|
30
|
+
def narrow(input, dim, start, length):
|
|
31
|
+
"""
|
|
32
|
+
Returns a narrowed tensor from input tensor, and
|
|
33
|
+
the dimension axis is input from start to start + length.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
input (Tensor): the tensor to narrow.
|
|
37
|
+
dim (int): dimension along which to narrow.
|
|
38
|
+
start (int): the starting dimension.
|
|
39
|
+
length (int): the distance to the ending dimension.
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
Tensor.
|
|
43
|
+
|
|
44
|
+
- output (Tensors) - The narrowed tensor.
|
|
45
|
+
|
|
46
|
+
Raises:
|
|
47
|
+
TypeError: If the input is not a tensor or tuple or list of tensors.
|
|
48
|
+
|
|
49
|
+
Supported Platforms:
|
|
50
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
51
|
+
|
|
52
|
+
Examples:
|
|
53
|
+
>>> import mindspore
|
|
54
|
+
>>> from mindspore import ops
|
|
55
|
+
>>> from mindspore import Tensor
|
|
56
|
+
>>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mindspore.int32)
|
|
57
|
+
>>> output = ops.narrow(x, 0, 0, 2)
|
|
58
|
+
>>> print(output)
|
|
59
|
+
[[ 1 2 3]
|
|
60
|
+
[ 4 5 6]]
|
|
61
|
+
>>> output = ops.narrow(x, 1, 1, 2)
|
|
62
|
+
>>> print(output)
|
|
63
|
+
[[ 2 3]
|
|
64
|
+
[ 5 6]
|
|
65
|
+
[ 8 9]]
|
|
66
|
+
"""
|
|
67
|
+
validator.check_value_type("input", input, Tensor, "narrow")
|
|
68
|
+
return slice_ext_op(input, dim, start, start+length, 1)
|
|
27
69
|
|
|
28
70
|
|
|
29
71
|
def gather(input, dim, index):
|
|
@@ -75,25 +117,32 @@ def gather(input, dim, index):
|
|
|
75
117
|
return gather_d_op(input, dim, index)
|
|
76
118
|
|
|
77
119
|
|
|
78
|
-
def max(input, dim, keepdim=False):
|
|
120
|
+
def max(input, dim=None, keepdim=False):
|
|
79
121
|
"""
|
|
80
|
-
Calculates the maximum value along with the given
|
|
122
|
+
Calculates the maximum value along with the given dimension for the input tensor.
|
|
81
123
|
|
|
82
124
|
Args:
|
|
83
125
|
input (Tensor): The input tensor, can be any dimension. Complex tensor is not supported for now.
|
|
84
|
-
dim (int): The dimension to reduce.
|
|
85
|
-
keepdim (bool): Whether to reduce dimension, if true, the output will keep same dimension
|
|
86
|
-
the output will reduce dimension if false. Default: ``False`` .
|
|
126
|
+
dim (int, optional): The dimension to reduce. Default: ``None`` .
|
|
127
|
+
keepdim (bool, optional): Whether to reduce dimension, if true, the output will keep same dimension
|
|
128
|
+
with the input, the output will reduce dimension if false. Default: ``False`` .
|
|
87
129
|
|
|
88
130
|
Returns:
|
|
89
|
-
|
|
90
|
-
|
|
131
|
+
Tensor if `dim` is the default value ``None`` , the maximum value of input tensor, with the shape :math:`()` ,
|
|
132
|
+
and same dtype as `input`.
|
|
133
|
+
|
|
134
|
+
tuple (Tensor) if `dim` is not the default value ``None`` , tuple of 2 tensors, containing the maximum
|
|
135
|
+
value of the input tensor along the given dimension `dim` and the corresponding index:
|
|
91
136
|
|
|
92
|
-
- values (Tensor) - The maximum value of input tensor
|
|
93
|
-
is
|
|
94
|
-
..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ...,
|
|
95
|
-
..., input_N)` .
|
|
96
|
-
- index (Tensor) - The index for the maximum value of the input tensor
|
|
137
|
+
- **values (Tensor)** - The maximum value of input tensor along the given dimension `dim`, with same dtype as
|
|
138
|
+
`input`. If `keepdim` is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ...,
|
|
139
|
+
input_{axis-1}, 1, input_{axis+1}, ..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ...,
|
|
140
|
+
input_{axis-1}, input_{axis+1}, ..., input_N)` .
|
|
141
|
+
- **index (Tensor)** - The index for the maximum value of the input tensor along the given dimension `dim`, with
|
|
142
|
+
the same shape as `values`.
|
|
143
|
+
|
|
144
|
+
Raises:
|
|
145
|
+
ValueError: If `dim` is the default value ``None`` and `keepdim` is not ``False`` .
|
|
97
146
|
|
|
98
147
|
Supported Platforms:
|
|
99
148
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -108,30 +157,41 @@ def max(input, dim, keepdim=False):
|
|
|
108
157
|
>>> print(output, index)
|
|
109
158
|
[[3.2 0.4 0.4 2.9 4. ]] [[1 1 0 1 1]]
|
|
110
159
|
"""
|
|
160
|
+
if dim is None:
|
|
161
|
+
if keepdim is not False:
|
|
162
|
+
raise ValueError(f"For 'max', the `keepdim` must be False when the `dim` is None, but got {keepdim}")
|
|
163
|
+
return max_(input)
|
|
111
164
|
argmax_with_value_op = _get_cache_prim(ArgMaxWithValue)(dim, keepdim)
|
|
112
165
|
indices, values = argmax_with_value_op(input)
|
|
113
166
|
return values, indices
|
|
114
167
|
|
|
115
168
|
|
|
116
|
-
def min(input, dim, keepdim=False):
|
|
169
|
+
def min(input, dim=None, keepdim=False):
|
|
117
170
|
"""
|
|
118
|
-
Calculates the minimum value along with the given
|
|
171
|
+
Calculates the minimum value along with the given dimension for the input tensor.
|
|
119
172
|
|
|
120
173
|
Args:
|
|
121
174
|
input (Tensor): The input tensor, can be any dimension. Complex tensor is not supported for now.
|
|
122
|
-
dim (int): The dimension to reduce.
|
|
123
|
-
keepdim (bool): Whether to reduce dimension, if true, the output will keep same dimension
|
|
124
|
-
the output will reduce dimension if false. Default: ``False`` .
|
|
175
|
+
dim (int, optional): The dimension to reduce. Default: ``None`` .
|
|
176
|
+
keepdim (bool, optional): Whether to reduce dimension, if true, the output will keep same dimension
|
|
177
|
+
with the input, the output will reduce dimension if false. Default: ``False`` .
|
|
125
178
|
|
|
126
179
|
Returns:
|
|
127
|
-
|
|
128
|
-
|
|
180
|
+
Tensor if `dim` is the default value ``None`` , the minimum value of input tensor, with the shape :math:`()` ,
|
|
181
|
+
and same dtype as `input`.
|
|
182
|
+
|
|
183
|
+
tuple (Tensor) if `dim` is not the default value ``None`` , tuple of 2 tensors, containing the minimum value
|
|
184
|
+
of the input tensor along the given dimension `dim` and the corresponding index:
|
|
129
185
|
|
|
130
|
-
- values (Tensor) - The minimum value of input tensor
|
|
131
|
-
is
|
|
132
|
-
..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ...,
|
|
133
|
-
..., input_N)` .
|
|
134
|
-
- index (Tensor) - The index for the minimum value of the input tensor
|
|
186
|
+
- **values (Tensor)** - The minimum value of input tensor along the given dimension `dim`, with same dtype as
|
|
187
|
+
`input`. If `keepdim` is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ...,
|
|
188
|
+
input_{axis-1}, 1, input_{axis+1}, ..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ...,
|
|
189
|
+
input_{axis-1}, input_{axis+1}, ..., input_N)` .
|
|
190
|
+
- **index (Tensor)** - The index for the minimum value of the input tensor along the given dimension `dim`,
|
|
191
|
+
with the same shape as `values`.
|
|
192
|
+
|
|
193
|
+
Raises:
|
|
194
|
+
ValueError: If `dim` is the default value ``None`` and `keepdim` is not ``False`` .
|
|
135
195
|
|
|
136
196
|
Supported Platforms:
|
|
137
197
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -145,8 +205,55 @@ def min(input, dim, keepdim=False):
|
|
|
145
205
|
>>> print(output, index)
|
|
146
206
|
[0.0] [0]
|
|
147
207
|
"""
|
|
208
|
+
if dim is None:
|
|
209
|
+
if keepdim is not False:
|
|
210
|
+
raise ValueError(f"For 'min', the `keepdim` must be False when the `dim` is None, but got {keepdim}")
|
|
211
|
+
return min_(input)
|
|
148
212
|
argmin_with_value_op = _get_cache_prim(ArgMinWithValue)(dim, keepdim)
|
|
149
213
|
indices, values = argmin_with_value_op(input)
|
|
150
214
|
return values, indices
|
|
151
215
|
|
|
152
|
-
|
|
216
|
+
|
|
217
|
+
def one_hot(tensor, num_classes):
|
|
218
|
+
r"""
|
|
219
|
+
Computes a one-hot tensor.
|
|
220
|
+
|
|
221
|
+
The locations represented by tensor in `tensor` take value `1`, while all
|
|
222
|
+
other locations take value `0`.
|
|
223
|
+
|
|
224
|
+
Args:
|
|
225
|
+
tensor (Tensor): A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
|
|
226
|
+
Data type must be int32 or int64.
|
|
227
|
+
num_classes (Union[int, Tensor]): A scalar defining the depth of the one-hot dimension.
|
|
228
|
+
|
|
229
|
+
Returns:
|
|
230
|
+
Tensor, one-hot tensor.
|
|
231
|
+
|
|
232
|
+
Raises:
|
|
233
|
+
TypeError: If `num_classes` is not an int.
|
|
234
|
+
TypeError: If dtype of `tensor` is not int32 or int64.
|
|
235
|
+
ValueError: If `num_classes` is less than 0.
|
|
236
|
+
|
|
237
|
+
Supported Platforms:
|
|
238
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
239
|
+
|
|
240
|
+
Examples:
|
|
241
|
+
>>> import mindspore
|
|
242
|
+
>>> import numpy as np
|
|
243
|
+
>>> import mindspore.ops as ops
|
|
244
|
+
>>> from mindspore import Tensor
|
|
245
|
+
>>> tensor = Tensor(np.array([0, 1, 2]), mindspore.int32)
|
|
246
|
+
>>> num_classes = 3
|
|
247
|
+
>>> output = ops.extend.one_hot(tensor, num_classes)
|
|
248
|
+
>>> print(output)
|
|
249
|
+
[[1. 0. 0.]
|
|
250
|
+
[0. 1. 0.]
|
|
251
|
+
[0. 0. 1.]]
|
|
252
|
+
"""
|
|
253
|
+
on_value = Tensor(1, dtype=tensor.dtype)
|
|
254
|
+
off_value = Tensor(0, dtype=tensor.dtype)
|
|
255
|
+
onehot = _get_cache_prim(OneHotExt)(-1)
|
|
256
|
+
return onehot(tensor, num_classes, on_value, off_value)
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
__all__ = ['gather', 'max', 'min', 'one_hot']
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright
|
|
1
|
+
# Copyright 2023 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -20,7 +20,7 @@ Math Operators with better performance
|
|
|
20
20
|
"""
|
|
21
21
|
|
|
22
22
|
from mindspore.ops import auto_generate as P
|
|
23
|
-
from mindspore.ops.auto_generate.gen_ops_def import add_ext as add, sub_ext as sub
|
|
23
|
+
from mindspore.ops.auto_generate.gen_ops_def import add_ext as add, sub_ext as sub, bmm_ext as bmm
|
|
24
24
|
|
|
25
25
|
|
|
26
26
|
# define Primitive global variables
|
|
@@ -73,4 +73,4 @@ def baddbmm(input, batch1, batch2, beta=1, alpha=1):
|
|
|
73
73
|
return P.baddbmm(input, batch1, batch2, beta, alpha)
|
|
74
74
|
|
|
75
75
|
|
|
76
|
-
__all__ = ['baddbmm', 'add', 'sub']
|
|
76
|
+
__all__ = ['baddbmm', 'add', 'sub', 'bmm']
|