mindspore 2.7.0rc1__cp311-cp311-win_amd64.whl → 2.7.1__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +5 -2
- mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +2 -2
- mindspore/_extends/builtin_operations.py +3 -3
- mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +3 -3
- mindspore/_extends/parse/compile_config.py +24 -1
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +6 -3
- mindspore/_extends/parse/parser.py +28 -22
- mindspore/_extends/parse/resources.py +1 -1
- mindspore/_extends/parse/standard_method.py +23 -2
- mindspore/_extends/parse/trope.py +2 -1
- mindspore/_extends/pijit/pijit_func_white_list.py +9 -27
- mindspore/amp.py +0 -18
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/base.py +29 -2
- mindspore/common/__init__.py +18 -12
- mindspore/common/_decorator.py +3 -2
- mindspore/common/_grad_function.py +3 -1
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +371 -96
- mindspore/common/_utils.py +7 -43
- mindspore/common/api.py +434 -135
- mindspore/common/dtype.py +98 -57
- mindspore/common/dump.py +7 -108
- mindspore/common/dynamic_shape/__init__.py +0 -0
- mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +15 -23
- mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
- mindspore/common/file_system.py +59 -9
- mindspore/common/hook_handle.py +82 -3
- mindspore/common/jit_config.py +5 -1
- mindspore/common/jit_trace.py +27 -12
- mindspore/common/lazy_inline.py +5 -3
- mindspore/common/np_dtype.py +3 -3
- mindspore/common/parameter.py +17 -127
- mindspore/common/recompute.py +4 -13
- mindspore/common/tensor.py +50 -217
- mindspore/communication/_comm_helper.py +11 -1
- mindspore/communication/comm_func.py +138 -4
- mindspore/communication/management.py +85 -1
- mindspore/config/op_info.config +0 -15
- mindspore/context.py +20 -106
- mindspore/dataset/__init__.py +1 -1
- mindspore/dataset/audio/transforms.py +1 -1
- mindspore/dataset/core/config.py +35 -1
- mindspore/dataset/engine/datasets.py +338 -319
- mindspore/dataset/engine/datasets_user_defined.py +38 -22
- mindspore/dataset/engine/datasets_vision.py +1 -1
- mindspore/dataset/engine/validators.py +1 -15
- mindspore/dataset/transforms/c_transforms.py +2 -2
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/vision/__init__.py +1 -1
- mindspore/dataset/vision/py_transforms.py +8 -8
- mindspore/dataset/vision/transforms.py +17 -5
- mindspore/dataset/vision/utils.py +632 -21
- mindspore/device_context/ascend/op_tuning.py +35 -1
- mindspore/dnnl.dll +0 -0
- mindspore/{profiler/common/validator → graph}/__init__.py +9 -1
- mindspore/graph/custom_pass.py +55 -0
- mindspore/include/api/cell.h +28 -4
- mindspore/include/api/cfg.h +24 -7
- mindspore/include/api/context.h +1 -0
- mindspore/include/api/delegate.h +0 -2
- mindspore/include/api/dual_abi_helper.h +100 -19
- mindspore/include/api/graph.h +14 -1
- mindspore/include/api/kernel.h +16 -3
- mindspore/include/api/kernel_api.h +9 -1
- mindspore/include/api/metrics/accuracy.h +9 -0
- mindspore/include/api/model.h +5 -1
- mindspore/include/api/model_group.h +4 -0
- mindspore/include/api/model_parallel_runner.h +2 -0
- mindspore/include/api/status.h +48 -10
- mindspore/include/api/types.h +6 -1
- mindspore/include/dataset/constants.h +9 -0
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/__init__.py +3 -3
- mindspore/mindrecord/common/exceptions.py +1 -0
- mindspore/mindrecord/config.py +1 -1
- mindspore/{parallel/mpi → mindrecord/core}/__init__.py +4 -1
- mindspore/mindrecord/{shardheader.py → core/shardheader.py} +2 -1
- mindspore/mindrecord/{shardindexgenerator.py → core/shardindexgenerator.py} +1 -1
- mindspore/mindrecord/{shardreader.py → core/shardreader.py} +2 -1
- mindspore/mindrecord/{shardsegment.py → core/shardsegment.py} +2 -2
- mindspore/mindrecord/{shardutils.py → core/shardutils.py} +1 -1
- mindspore/mindrecord/{shardwriter.py → core/shardwriter.py} +1 -1
- mindspore/mindrecord/filereader.py +4 -4
- mindspore/mindrecord/filewriter.py +5 -5
- mindspore/mindrecord/mindpage.py +2 -2
- mindspore/mindrecord/tools/cifar10.py +4 -3
- mindspore/mindrecord/tools/cifar100.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +1 -1
- mindspore/mindrecord/tools/cifar10_to_mr.py +6 -6
- mindspore/mindrecord/tools/csv_to_mr.py +1 -1
- mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
- mindspore/mindrecord/tools/mnist_to_mr.py +1 -1
- mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -1
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_cluster.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_hardware_abstract.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_ops_host.dll → mindspore_ops_cpu.dll} +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mindspore_runtime_utils.dll +0 -0
- mindspore/mindspore_tools.dll +0 -0
- mindspore/mint/__init__.py +15 -10
- mindspore/mint/distributed/__init__.py +4 -0
- mindspore/mint/distributed/distributed.py +392 -69
- mindspore/mint/nn/__init__.py +2 -16
- mindspore/mint/nn/functional.py +4 -110
- mindspore/mint/nn/layer/__init__.py +0 -2
- mindspore/mint/nn/layer/_functions.py +1 -2
- mindspore/mint/nn/layer/activation.py +0 -6
- mindspore/mint/nn/layer/basic.py +0 -47
- mindspore/mint/nn/layer/conv.py +10 -10
- mindspore/mint/nn/layer/normalization.py +11 -16
- mindspore/mint/nn/layer/pooling.py +0 -4
- mindspore/nn/__init__.py +1 -3
- mindspore/nn/cell.py +231 -239
- mindspore/nn/layer/activation.py +4 -2
- mindspore/nn/layer/basic.py +56 -14
- mindspore/nn/layer/container.py +16 -0
- mindspore/nn/layer/embedding.py +4 -169
- mindspore/nn/layer/image.py +1 -1
- mindspore/nn/layer/normalization.py +2 -1
- mindspore/nn/layer/thor_layer.py +4 -85
- mindspore/nn/optim/ada_grad.py +0 -1
- mindspore/nn/optim/adafactor.py +0 -1
- mindspore/nn/optim/adam.py +32 -127
- mindspore/nn/optim/adamax.py +0 -1
- mindspore/nn/optim/asgd.py +0 -1
- mindspore/nn/optim/ftrl.py +8 -102
- mindspore/nn/optim/lamb.py +1 -4
- mindspore/nn/optim/lars.py +0 -3
- mindspore/nn/optim/lazyadam.py +25 -218
- mindspore/nn/optim/momentum.py +5 -43
- mindspore/nn/optim/optimizer.py +6 -55
- mindspore/nn/optim/proximal_ada_grad.py +0 -1
- mindspore/nn/optim/rmsprop.py +0 -1
- mindspore/nn/optim/rprop.py +0 -1
- mindspore/nn/optim/sgd.py +0 -1
- mindspore/nn/optim/tft_wrapper.py +2 -4
- mindspore/nn/optim/thor.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +7 -8
- mindspore/nn/probability/bijector/gumbel_cdf.py +2 -2
- mindspore/nn/probability/bijector/power_transform.py +20 -21
- mindspore/nn/probability/bijector/scalar_affine.py +5 -5
- mindspore/nn/probability/bijector/softplus.py +13 -14
- mindspore/nn/probability/distribution/_utils/utils.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +39 -5
- mindspore/nn/wrap/grad_reducer.py +4 -89
- mindspore/numpy/array_creations.py +4 -4
- mindspore/numpy/fft.py +9 -9
- mindspore/numpy/utils_const.py +1 -1
- mindspore/{nn/reinforcement → onnx}/__init__.py +5 -8
- mindspore/onnx/onnx_export.py +137 -0
- mindspore/opencv_core4110.dll +0 -0
- mindspore/opencv_imgcodecs4110.dll +0 -0
- mindspore/{opencv_imgproc452.dll → opencv_imgproc4110.dll} +0 -0
- mindspore/ops/__init__.py +2 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +38 -2
- mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
- mindspore/ops/_op_impl/aicpu/__init__.py +0 -10
- mindspore/ops/_op_impl/cpu/__init__.py +1 -5
- mindspore/ops/_op_impl/cpu/{buffer_append.py → joinedstr_op.py} +8 -8
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +28 -24
- mindspore/ops/auto_generate/gen_extend_func.py +6 -11
- mindspore/ops/auto_generate/gen_ops_def.py +385 -154
- mindspore/ops/auto_generate/gen_ops_prim.py +5676 -5167
- mindspore/ops/communication.py +97 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +16 -2
- mindspore/ops/composite/multitype_ops/__init__.py +3 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +150 -8
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/composite/multitype_ops/add_impl.py +7 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +27 -0
- mindspore/ops/function/__init__.py +2 -0
- mindspore/ops/function/array_func.py +24 -18
- mindspore/ops/function/comm_func.py +3883 -0
- mindspore/ops/function/debug_func.py +7 -6
- mindspore/ops/function/grad/grad_func.py +4 -12
- mindspore/ops/function/math_func.py +89 -86
- mindspore/ops/function/nn_func.py +92 -313
- mindspore/ops/function/random_func.py +9 -18
- mindspore/ops/functional.py +4 -1
- mindspore/ops/functional_overload.py +377 -30
- mindspore/ops/operations/__init__.py +2 -5
- mindspore/ops/operations/_custom_ops_utils.py +7 -9
- mindspore/ops/operations/_inner_ops.py +12 -50
- mindspore/ops/operations/_rl_inner_ops.py +0 -933
- mindspore/ops/operations/array_ops.py +5 -50
- mindspore/ops/operations/comm_ops.py +95 -17
- mindspore/ops/operations/custom_ops.py +237 -22
- mindspore/ops/operations/debug_ops.py +33 -35
- mindspore/ops/operations/manually_defined/ops_def.py +39 -318
- mindspore/ops/operations/math_ops.py +5 -5
- mindspore/ops/operations/nn_ops.py +3 -3
- mindspore/ops/operations/sparse_ops.py +0 -83
- mindspore/ops/primitive.py +4 -27
- mindspore/ops/tensor_method.py +88 -10
- mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +5 -5
- mindspore/ops_generate/aclnn/gen_aclnn_implement.py +8 -8
- mindspore/ops_generate/api/functions_cc_generator.py +53 -4
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +25 -11
- mindspore/ops_generate/common/gen_constants.py +11 -10
- mindspore/ops_generate/common/op_proto.py +18 -1
- mindspore/ops_generate/common/template.py +102 -245
- mindspore/ops_generate/common/template_utils.py +212 -0
- mindspore/ops_generate/gen_custom_ops.py +69 -0
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +78 -7
- mindspore/ops_generate/op_def_py/base_op_prim_py_generator.py +360 -0
- mindspore/ops_generate/op_def_py/custom_op_prim_py_generator.py +140 -0
- mindspore/ops_generate/op_def_py/op_def_py_generator.py +54 -7
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -312
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +74 -17
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +22 -5
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +0 -16
- mindspore/ops_generate/pyboost/op_template_parser.py +3 -2
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +21 -5
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +2 -2
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +30 -10
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +10 -3
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +19 -9
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +71 -28
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +10 -9
- mindspore/ops_generate/pyboost/pyboost_utils.py +27 -16
- mindspore/ops_generate/resources/yaml_loader.py +13 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +2 -2
- mindspore/parallel/_auto_parallel_context.py +5 -15
- mindspore/parallel/_cell_wrapper.py +1 -1
- mindspore/parallel/_parallel_serialization.py +4 -6
- mindspore/parallel/_ps_context.py +2 -2
- mindspore/parallel/_utils.py +34 -17
- mindspore/parallel/auto_parallel.py +23 -9
- mindspore/parallel/checkpoint_transform.py +20 -2
- mindspore/parallel/cluster/process_entity/_api.py +28 -33
- mindspore/parallel/cluster/process_entity/_utils.py +9 -5
- mindspore/parallel/cluster/run.py +5 -3
- mindspore/{experimental/llm_boost/ascend_native → parallel/distributed}/__init__.py +21 -22
- mindspore/parallel/distributed/distributed_data_parallel.py +393 -0
- mindspore/parallel/distributed/flatten_grad_buffer.py +295 -0
- mindspore/parallel/function/reshard_func.py +6 -5
- mindspore/parallel/nn/parallel_cell_wrapper.py +40 -3
- mindspore/parallel/nn/parallel_grad_reducer.py +0 -8
- mindspore/parallel/shard.py +7 -21
- mindspore/parallel/strategy.py +336 -0
- mindspore/parallel/transform_safetensors.py +127 -20
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +13 -9
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +1 -1
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +1 -1
- mindspore/profiler/common/constant.py +5 -0
- mindspore/profiler/common/file_manager.py +9 -0
- mindspore/profiler/common/msprof_cmd_tool.py +40 -4
- mindspore/profiler/common/path_manager.py +65 -24
- mindspore/profiler/common/profiler_context.py +27 -14
- mindspore/profiler/common/profiler_info.py +3 -3
- mindspore/profiler/common/profiler_meta_data.py +1 -0
- mindspore/profiler/common/profiler_op_analyse.py +10 -6
- mindspore/profiler/common/profiler_path_manager.py +13 -0
- mindspore/profiler/common/util.py +30 -3
- mindspore/profiler/dynamic_profiler.py +91 -46
- mindspore/profiler/envprofiler.py +30 -5
- mindspore/profiler/experimental_config.py +18 -2
- mindspore/profiler/platform/cpu_profiler.py +10 -4
- mindspore/profiler/platform/npu_profiler.py +34 -7
- mindspore/profiler/profiler.py +193 -145
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +2 -2
- mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +108 -24
- mindspore/runtime/__init__.py +9 -6
- mindspore/runtime/executor.py +35 -0
- mindspore/runtime/memory.py +113 -0
- mindspore/runtime/thread_bind_core.py +1 -1
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/{experimental/llm_boost → tools}/__init__.py +5 -5
- mindspore/tools/data_dump.py +130 -0
- mindspore/tools/sdc_detect.py +91 -0
- mindspore/tools/stress_detect.py +63 -0
- mindspore/train/__init__.py +6 -6
- mindspore/train/_utils.py +8 -21
- mindspore/train/amp.py +6 -7
- mindspore/train/callback/_callback.py +2 -1
- mindspore/train/callback/_checkpoint.py +1 -17
- mindspore/train/callback/_flops_collector.py +10 -6
- mindspore/train/callback/_train_fault_tolerance.py +72 -25
- mindspore/train/data_sink.py +5 -9
- mindspore/train/dataset_helper.py +5 -5
- mindspore/train/model.py +41 -230
- mindspore/train/serialization.py +160 -401
- mindspore/train/train_thor/model_thor.py +2 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +6 -3
- mindspore/utils/dlpack.py +92 -0
- mindspore/utils/dryrun.py +1 -1
- mindspore/utils/runtime_execution_order_check.py +10 -0
- mindspore/utils/sdc_detect.py +14 -12
- mindspore/utils/stress_detect.py +43 -0
- mindspore/utils/utils.py +152 -16
- mindspore/version.py +1 -1
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/METADATA +3 -2
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/RECORD +330 -344
- mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
- mindspore/communication/_hccl_management.py +0 -297
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -207
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +0 -52
- mindspore/experimental/llm_boost/atb/__init__.py +0 -23
- mindspore/experimental/llm_boost/atb/boost_base.py +0 -385
- mindspore/experimental/llm_boost/atb/llama_boost.py +0 -137
- mindspore/experimental/llm_boost/atb/qwen_boost.py +0 -124
- mindspore/experimental/llm_boost/register.py +0 -130
- mindspore/experimental/llm_boost/utils.py +0 -31
- mindspore/include/OWNERS +0 -7
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/nn/optim/_dist_optimizer_registry.py +0 -111
- mindspore/nn/reinforcement/_batch_read_write.py +0 -142
- mindspore/nn/reinforcement/_tensors_queue.py +0 -152
- mindspore/nn/reinforcement/tensor_array.py +0 -145
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +0 -113
- mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +0 -96
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +0 -42
- mindspore/ops/_op_impl/cpu/buffer_get.py +0 -28
- mindspore/ops/_op_impl/cpu/buffer_sample.py +0 -28
- mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +0 -42
- mindspore/ops/operations/_tensor_array.py +0 -359
- mindspore/ops/operations/rl_ops.py +0 -288
- mindspore/parallel/_offload_context.py +0 -275
- mindspore/parallel/_recovery_context.py +0 -115
- mindspore/parallel/_transformer/__init__.py +0 -35
- mindspore/parallel/_transformer/layers.py +0 -765
- mindspore/parallel/_transformer/loss.py +0 -251
- mindspore/parallel/_transformer/moe.py +0 -693
- mindspore/parallel/_transformer/op_parallel_config.py +0 -222
- mindspore/parallel/_transformer/transformer.py +0 -3124
- mindspore/parallel/mpi/_mpi_config.py +0 -116
- mindspore/profiler/common/validator/validate_path.py +0 -84
- mindspore/train/memory_profiling_pb2.py +0 -298
- mindspore/utils/hooks.py +0 -81
- /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/WHEEL +0 -0
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/top_level.txt +0 -0
|
@@ -2606,89 +2606,6 @@ class RaggedTensorToTensor(Primitive):
|
|
|
2606
2606
|
self.add_prim_attr("num_row_partition_tensors", self.num_row_partition_tensors)
|
|
2607
2607
|
|
|
2608
2608
|
|
|
2609
|
-
class SparseCross(Primitive):
|
|
2610
|
-
"""
|
|
2611
|
-
Generates sparse cross from a list of sparse and dense tensors.
|
|
2612
|
-
|
|
2613
|
-
Args:
|
|
2614
|
-
hashed_output (bool): If true, returns the hash of the cross instead of the string. This will allow us
|
|
2615
|
-
avoiding string manipulations.
|
|
2616
|
-
num_buckets (int): An int that is >= 0. It is used if "hashed_output" is true.output = hashed_value%num_buckets
|
|
2617
|
-
if num_buckets > 0 else "hashed_value".
|
|
2618
|
-
hash_key (int): Specify the hash_key that will be used by the "FingerprintCat64" function to combine the
|
|
2619
|
-
crosses fingerprints.
|
|
2620
|
-
out_type (mindspore.dtype): The output data type. Defaults to "int64".
|
|
2621
|
-
internal_type (mindspore.dtype): An type int64.
|
|
2622
|
-
|
|
2623
|
-
Inputs:
|
|
2624
|
-
- **indices** (list(Tensor)) - A list of Tensor objects with type int64. 2-D.
|
|
2625
|
-
Indices of each input SparseTensor.
|
|
2626
|
-
- **values** (list(Tensor)) - A list of Tensor objects with types from: int64.
|
|
2627
|
-
1-D. values of each SparseTensor.
|
|
2628
|
-
- **shapes** (list(Tensor)) - A list with the same length as indices of Tensor objects with type int64.
|
|
2629
|
-
1-D. Shapes of each SparseTensor.
|
|
2630
|
-
- **dense_inputs** (list(Tensor)) - A list of Tensor objects with types from: int64.
|
|
2631
|
-
2-D. Columns represented by dense Tensor.
|
|
2632
|
-
|
|
2633
|
-
Outputs:
|
|
2634
|
-
- **output_indices** (Tensor) - A Tensor of type int64. 2-D. Indices of the concatenated SparseTensor.
|
|
2635
|
-
- **output_values** (Tensor) - A Tensor of type "out_type". 1-D.
|
|
2636
|
-
Non-empty values of the concatenated or hashed SparseTensor.
|
|
2637
|
-
- **output_shape** (Tensor) - A Tensor of type int64. 1-D. Shape of the concatenated SparseTensor.
|
|
2638
|
-
|
|
2639
|
-
Raises:
|
|
2640
|
-
TypeError: The indices shape rank is not equal to the shape rank.
|
|
2641
|
-
TypeError: The indices element number is not equal to the value element number.
|
|
2642
|
-
TypeError: The indices shape rank should be 2.
|
|
2643
|
-
TypeError: The denses shape rank should be 2.
|
|
2644
|
-
TypeError: The shapes rank should be 2.
|
|
2645
|
-
|
|
2646
|
-
Supported Platforms:
|
|
2647
|
-
``CPU``
|
|
2648
|
-
|
|
2649
|
-
Examples:
|
|
2650
|
-
>>> from mindspore.ops.operations.sparse_ops import SparseCross
|
|
2651
|
-
>>> indice1 = Tensor([[0,0],[1,0],[1,1]], dtype=mstype.int64)
|
|
2652
|
-
>>> value1 = Tensor([1, 2, 3], dtype=mstype.int64)
|
|
2653
|
-
>>> shape1 = Tensor([2, 2], dtype=mstype.int64)
|
|
2654
|
-
>>> dense1 = Tensor([[1],[2]], dtype=mstype.int64)
|
|
2655
|
-
>>> indice2 = Tensor([[0,0],[1,0],[1,1]], dtype=mstype.int64)
|
|
2656
|
-
>>> value2 = Tensor([1, 2, 3], dtype=mstype.int64)
|
|
2657
|
-
>>> shape2 = Tensor([2, 2], dtype=mstype.int64)
|
|
2658
|
-
>>> dense2 = Tensor([[1],[2]], dtype=mstype.int64)
|
|
2659
|
-
>>> indices = [indice1, indice2]
|
|
2660
|
-
>>> values = [value1, value2]
|
|
2661
|
-
>>> shapes = [shape1, shape2]
|
|
2662
|
-
>>> dense_inputs = [dense1, dense2]
|
|
2663
|
-
>>> hashed_output=True
|
|
2664
|
-
>>> hash_key= 2
|
|
2665
|
-
>>> out_type= mstype.int64
|
|
2666
|
-
>>> internal_type = mstype.int64
|
|
2667
|
-
>>> num_buckets=0
|
|
2668
|
-
>>> sparse_cross = SparseCross(hashed_output, hash_key, out_type, internal_type, num_buckets)
|
|
2669
|
-
>>> out = sparse_cross(indices, values, shapes, dense_inputs)
|
|
2670
|
-
>>> print(out)
|
|
2671
|
-
(Tensor(shape=[5, 2], dtype=Int64, value=
|
|
2672
|
-
[[0, 0],
|
|
2673
|
-
[1, 0],
|
|
2674
|
-
[1, 1],
|
|
2675
|
-
[1, 2],
|
|
2676
|
-
[1, 3]]), Tensor(shape=[5], dtype=Int64, value= [1350190460805457680, 6319552725219729347,
|
|
2677
|
-
4652439303631496997, 7670687697825594049, 174086171018132662]), Tensor(shape=[2], dtype=Int64, value= [2, 4]))
|
|
2678
|
-
"""
|
|
2679
|
-
|
|
2680
|
-
@prim_attr_register
|
|
2681
|
-
def __init__(self, hashed_output, hash_key, out_type, internal_type, num_buckets=0):
|
|
2682
|
-
"""Initialize SparseCross."""
|
|
2683
|
-
self.init_prim_io_names(inputs=["indices", "values", "shapes", "dense_inputs"],
|
|
2684
|
-
outputs=["output_indices", "output_values", "output_shape"])
|
|
2685
|
-
validator.check_value_type("hashed_output", hashed_output, [bool], self.name)
|
|
2686
|
-
validator.check_value_type("hash_key", hash_key, [int], self.name)
|
|
2687
|
-
validator.check_value_type("out_type", out_type, [mstype.Type], self.name)
|
|
2688
|
-
validator.check_value_type("internal_type", internal_type, [mstype.Type], self.name)
|
|
2689
|
-
validator.check_value_type("num_buckets", num_buckets, [int], self.name)
|
|
2690
|
-
|
|
2691
|
-
|
|
2692
2609
|
class RaggedTensorToSparse(Primitive):
|
|
2693
2610
|
r"""
|
|
2694
2611
|
Converts a RaggedTensor into a SparseTensor with the same values.
|
mindspore/ops/primitive.py
CHANGED
|
@@ -20,7 +20,7 @@ import copy
|
|
|
20
20
|
import numpy as np
|
|
21
21
|
from mindspore.common.api import _wrap_func
|
|
22
22
|
from mindspore.log import _LogActionOnce
|
|
23
|
-
from mindspore import
|
|
23
|
+
from mindspore import log as logger
|
|
24
24
|
from mindspore.parallel._utils import _is_in_auto_parallel_mode, _is_in_data_parallel_mode, \
|
|
25
25
|
_is_in_hybrid_parallel_mode, SUPPORTED_TUPLE_IN_TUPLE_STRATEGY
|
|
26
26
|
from mindspore.parallel._ps_context import _is_ps_mode, _is_role_sched
|
|
@@ -214,22 +214,6 @@ class Primitive(Primitive_):
|
|
|
214
214
|
if in_strategy is None and out_strategy is not None:
|
|
215
215
|
raise ValueError(f'The out_strategy of {self.name} is {out_strategy}, need to set in_strategy,'
|
|
216
216
|
f' but got none')
|
|
217
|
-
if not _is_in_auto_parallel_mode():
|
|
218
|
-
mode = context.get_auto_parallel_context("parallel_mode")
|
|
219
|
-
if in_strategy is not None:
|
|
220
|
-
logger.warning(f"The in_strategy/in_layout of the operator in your network "
|
|
221
|
-
f"will not take effect in {mode} mode. "
|
|
222
|
-
f"This means the the shard function called in the network is ignored. \n"
|
|
223
|
-
f"If you want to enable it, please use semi auto or auto parallel mode by "
|
|
224
|
-
f"context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL "
|
|
225
|
-
f"or context.set_auto_parallel_context(parallel_mode=ParallelMode.AUTO_PARALLEL)")
|
|
226
|
-
if out_strategy is not None:
|
|
227
|
-
logger.warning(f"The out_strategy/out_layout of the operator in your network "
|
|
228
|
-
f"will not take effect in {mode} mode."
|
|
229
|
-
f" This means the the shard function called in the network is ignored. \n"
|
|
230
|
-
f"If you want to enable it, please use semi auto or auto parallel mode by "
|
|
231
|
-
f"context.set_auto_parallel_context(parallel_mode=ParallelMode.SEMI_AUTO_PARALLEL "
|
|
232
|
-
f"or context.set_auto_parallel_context(parallel_mode=ParallelMode.AUTO_PARALLEL)")
|
|
233
217
|
|
|
234
218
|
def del_prim_attr(self, name):
|
|
235
219
|
"""
|
|
@@ -458,7 +442,7 @@ class Primitive(Primitive_):
|
|
|
458
442
|
|
|
459
443
|
- If the computation involves something like randomization or global variable, the equivalence
|
|
460
444
|
is not guaranteed currently.
|
|
461
|
-
-
|
|
445
|
+
- Should only be used in Graph mode or in gradient functions that are decorated by @jit.
|
|
462
446
|
|
|
463
447
|
Args:
|
|
464
448
|
mode (bool): Specifies whether the primitive is recomputed. Default: ``True`` .
|
|
@@ -466,7 +450,7 @@ class Primitive(Primitive_):
|
|
|
466
450
|
Examples:
|
|
467
451
|
>>> import numpy as np
|
|
468
452
|
>>> import mindspore as ms
|
|
469
|
-
>>> from mindspore import Tensor, ops, nn
|
|
453
|
+
>>> from mindspore import Tensor, ops, nn, jit
|
|
470
454
|
>>> class NetRecompute(nn.Cell):
|
|
471
455
|
... def __init__(self):
|
|
472
456
|
... super(NetRecompute,self).__init__()
|
|
@@ -481,6 +465,7 @@ class Primitive(Primitive_):
|
|
|
481
465
|
... super(GradNet,self).__init__()
|
|
482
466
|
... self.network = network
|
|
483
467
|
... self.grad = ops.GradOperation()
|
|
468
|
+
... @jit
|
|
484
469
|
... def construct(self, x):
|
|
485
470
|
... g_out = self.grad(self.network)(x)
|
|
486
471
|
... return g_out
|
|
@@ -492,8 +477,6 @@ class Primitive(Primitive_):
|
|
|
492
477
|
>>> print(a)
|
|
493
478
|
[0. 0.5]
|
|
494
479
|
"""
|
|
495
|
-
if context.get_context("mode") == context.PYNATIVE_MODE:
|
|
496
|
-
raise TypeError("Recompute is not supported in pynative mode currently.")
|
|
497
480
|
Validator.check_bool(mode)
|
|
498
481
|
self.add_prim_attr("recompute", mode)
|
|
499
482
|
return self
|
|
@@ -510,8 +493,6 @@ class Primitive(Primitive_):
|
|
|
510
493
|
Args:
|
|
511
494
|
backward_prefetch(Union[str, int]): Specifies whether the activation is prefetched in backward pass.
|
|
512
495
|
"""
|
|
513
|
-
if context.get_context("mode") == context.PYNATIVE_MODE:
|
|
514
|
-
raise ValueError("Offload is not supported in pynative mode currently.")
|
|
515
496
|
self.add_prim_attr("offload", True)
|
|
516
497
|
if isinstance(backward_prefetch, str):
|
|
517
498
|
Validator.check_string(backward_prefetch, ['Auto'], 'backward_prefetch', 'Primitive._offload')
|
|
@@ -551,10 +532,6 @@ class Primitive(Primitive_):
|
|
|
551
532
|
Validator.check_non_negative_int(rank_id, "rank_id", "Primitive.place")
|
|
552
533
|
Validator.check_string(role, "MS_WORKER", "role", "Primitive.place")
|
|
553
534
|
|
|
554
|
-
if context.get_context("mode") == context.PYNATIVE_MODE:
|
|
555
|
-
raise RuntimeError("You are calling Primitive.place in pynative mode."
|
|
556
|
-
"It's only supported in graph mode. Please switch to graph mode.")
|
|
557
|
-
|
|
558
535
|
# Get the execution context and check whether calling of this 'place' method is valid.
|
|
559
536
|
# This is because placing operators to arbitrary processes while other distributed training mode
|
|
560
537
|
# is enabled is very unpredictable and may cause fatal error.
|
mindspore/ops/tensor_method.py
CHANGED
|
@@ -24,8 +24,7 @@ from mindspore.ops.composite.multitype_ops._compile_utils import (
|
|
|
24
24
|
sequence_to_tensor, _tensor_sub, _tensor_pow, _tensor_div, _tensor_floordiv, _tensor_mod
|
|
25
25
|
)
|
|
26
26
|
from mindspore.ops.auto_generate.gen_ops_prim import (
|
|
27
|
-
inplace_scatter_src_op, inplace_scatter_src_reduce_op, inplace_scatter_value_op, inplace_scatter_value_reduce_op
|
|
28
|
-
inplace_copy_op
|
|
27
|
+
inplace_scatter_src_op, inplace_scatter_src_reduce_op, inplace_scatter_value_op, inplace_scatter_value_reduce_op
|
|
29
28
|
)
|
|
30
29
|
from mindspore.ops.auto_generate.gen_ops_prim import (
|
|
31
30
|
floor_div_op, floor_div_scalar_op
|
|
@@ -240,6 +239,7 @@ from mindspore.ops.function.array_func import tensor_scatter_add
|
|
|
240
239
|
from mindspore.ops.auto_generate import select, select_ext_view
|
|
241
240
|
# 94 sigmoid
|
|
242
241
|
from mindspore.ops.auto_generate import sigmoid
|
|
242
|
+
from mindspore.ops.auto_generate import inplace_sigmoid as sigmoid_
|
|
243
243
|
# 95 sin
|
|
244
244
|
from mindspore.ops.auto_generate import sin
|
|
245
245
|
# 96 size
|
|
@@ -367,7 +367,7 @@ from mindspore.ops.auto_generate import acos_ext, acosh_ext, asin_ext, asinh_ext
|
|
|
367
367
|
from mindspore.ops.function.math_func import median
|
|
368
368
|
|
|
369
369
|
# 156
|
|
370
|
-
|
|
370
|
+
from mindspore.ops.function.math_func import permute
|
|
371
371
|
# 157
|
|
372
372
|
from mindspore.ops.auto_generate import xlogy_op
|
|
373
373
|
|
|
@@ -442,6 +442,9 @@ from mindspore.ops.auto_generate.gen_ops_prim import inplace_exp_op
|
|
|
442
442
|
# 1030 log_
|
|
443
443
|
from mindspore.ops.auto_generate.gen_ops_prim import inplace_log_op
|
|
444
444
|
|
|
445
|
+
# 1031 masked_scatter
|
|
446
|
+
from mindspore.ops.auto_generate import masked_scatter
|
|
447
|
+
|
|
445
448
|
from .._checkparam import check_axis_in_range
|
|
446
449
|
from ..ops.composite.multitype_ops import _compile_utils as compile_utils
|
|
447
450
|
|
|
@@ -1038,6 +1041,10 @@ def tensor_sigmoid(input):
|
|
|
1038
1041
|
return sigmoid(input)
|
|
1039
1042
|
|
|
1040
1043
|
|
|
1044
|
+
def tensor_sigmoid_(input):
|
|
1045
|
+
return sigmoid_(input)
|
|
1046
|
+
|
|
1047
|
+
|
|
1041
1048
|
# 95 sin
|
|
1042
1049
|
def tensor_sin(input):
|
|
1043
1050
|
return sin(input)
|
|
@@ -1135,6 +1142,7 @@ def deprecated_tensor_sum(input, axis=None, dtype=None, keepdims=False, initial=
|
|
|
1135
1142
|
|
|
1136
1143
|
# 105 swapaxes
|
|
1137
1144
|
|
|
1145
|
+
|
|
1138
1146
|
# 106 t
|
|
1139
1147
|
def tensor_t(input):
|
|
1140
1148
|
return t(input)
|
|
@@ -1163,6 +1171,7 @@ def deprecated_tensor_tile(input, reps):
|
|
|
1163
1171
|
|
|
1164
1172
|
# 109 tolist
|
|
1165
1173
|
|
|
1174
|
+
|
|
1166
1175
|
# 110 topk
|
|
1167
1176
|
def tensor_topk(input, k, dim=-1, largest=True, sorted=True):
|
|
1168
1177
|
return topk(input, k, dim, largest, sorted)
|
|
@@ -1182,6 +1191,11 @@ def deprecated_tensor_transpose(input, *axes):
|
|
|
1182
1191
|
return transpose(input, perm)
|
|
1183
1192
|
|
|
1184
1193
|
|
|
1194
|
+
def deprecated_tensor_permute(input, *axis):
|
|
1195
|
+
perm = validator.check_transpose_axis(axis, input.ndim)
|
|
1196
|
+
return permute(input, perm)
|
|
1197
|
+
|
|
1198
|
+
|
|
1185
1199
|
# 112 tril
|
|
1186
1200
|
def deprecated_tensor_tril(input, diagonal=0):
|
|
1187
1201
|
return tril(input, diagonal)
|
|
@@ -1194,6 +1208,7 @@ def tensor_trunc(input):
|
|
|
1194
1208
|
|
|
1195
1209
|
# 114 type
|
|
1196
1210
|
|
|
1211
|
+
|
|
1197
1212
|
# 115 type_as
|
|
1198
1213
|
def deprecated_tensor_type_as(input, other):
|
|
1199
1214
|
return input.astype(other.dtype)
|
|
@@ -1453,14 +1468,10 @@ def tensor_atanh(input):
|
|
|
1453
1468
|
return F.atanh(input)
|
|
1454
1469
|
|
|
1455
1470
|
|
|
1456
|
-
def tensor_copy_(input, src):
|
|
1471
|
+
def tensor_copy_(input, src, non_blocking=False):
|
|
1457
1472
|
raise ValueError("should not come here for copy_ method")
|
|
1458
1473
|
|
|
1459
1474
|
|
|
1460
|
-
def deprecated_tensor_copy_(input, src, non_blocking=False):
|
|
1461
|
-
return inplace_copy_op(input, src)
|
|
1462
|
-
|
|
1463
|
-
|
|
1464
1475
|
def tensor_tan(input):
|
|
1465
1476
|
return F.tan(input)
|
|
1466
1477
|
|
|
@@ -1513,12 +1524,18 @@ def deprecated_tensor_logaddexp2(input, other):
|
|
|
1513
1524
|
|
|
1514
1525
|
|
|
1515
1526
|
# 157
|
|
1516
|
-
def tensor_empty(*size, dtype=None, device=None):
|
|
1527
|
+
def tensor_empty(*size, dtype=None, device=None, pin_memory=False):
|
|
1528
|
+
r"""
|
|
1529
|
+
For details, please refer to :func:`mindspore.mint.empty`.
|
|
1530
|
+
"""
|
|
1517
1531
|
logger.error(
|
|
1518
1532
|
"This is a function for empty not should be called. Please check the implementation.")
|
|
1519
1533
|
|
|
1520
1534
|
|
|
1521
|
-
def tensor_empty_like(input, *, dtype=None, device=None):
|
|
1535
|
+
def tensor_empty_like(input, *, dtype=None, device=None, pin_memory=False):
|
|
1536
|
+
"""
|
|
1537
|
+
For details, please refer to :func:`mindspore.mint.empty_like`.
|
|
1538
|
+
"""
|
|
1522
1539
|
raise NotImplementedError(
|
|
1523
1540
|
"This is a function for empty_like should not be called. Please check the implementation.")
|
|
1524
1541
|
|
|
@@ -1813,10 +1830,19 @@ def deprecated_tensor_var(input, axis=None, ddof=0, keepdims=False):
|
|
|
1813
1830
|
return _tensor_div(x_sum, nums - ddof)
|
|
1814
1831
|
|
|
1815
1832
|
|
|
1833
|
+
# 1222
|
|
1834
|
+
def tensor_index_fill_(input, dim, index, value):
|
|
1835
|
+
raise NotImplementedError('Tensor.index_fill_ only supports Ascend.')
|
|
1836
|
+
|
|
1837
|
+
|
|
1816
1838
|
def tensor_kthvalue(input, k, dim=-1, keepdim=False):
|
|
1817
1839
|
raise ValueError("should not come here for kthvalue py_method.")
|
|
1818
1840
|
|
|
1819
1841
|
|
|
1842
|
+
def tensor_index_copy_(input, dim, index, tensor):
|
|
1843
|
+
raise NotImplementedError('Tensor.index_copy_ only supports Ascend.')
|
|
1844
|
+
|
|
1845
|
+
|
|
1820
1846
|
def tensor_sub_empty_(input, other, alpha=1):
|
|
1821
1847
|
raise ValueError("should not come here for sub_ method.")
|
|
1822
1848
|
|
|
@@ -1826,9 +1852,11 @@ def tensor_inplace_sub(input, other, *, alpha=1):
|
|
|
1826
1852
|
return sub(input, other)
|
|
1827
1853
|
return sub_ext(input, other, alpha=alpha)
|
|
1828
1854
|
|
|
1855
|
+
|
|
1829
1856
|
def tensor_new_full(input, size, fill_value, *, dtype=None):
|
|
1830
1857
|
raise NotImplementedError("new_full method support Ascend only")
|
|
1831
1858
|
|
|
1859
|
+
|
|
1832
1860
|
def tensor_div_empty_(input, other, rounding_mode=None):
|
|
1833
1861
|
raise ValueError("should not come here for div_ method.")
|
|
1834
1862
|
|
|
@@ -1860,10 +1888,18 @@ def all_gather_matmul(
|
|
|
1860
1888
|
raise NotImplementedError('all_gather_matmul only supports Ascend.')
|
|
1861
1889
|
|
|
1862
1890
|
|
|
1891
|
+
def conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
|
|
1892
|
+
raise NotImplementedError('conv1d only supports Ascend.')
|
|
1893
|
+
|
|
1894
|
+
|
|
1863
1895
|
def conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
|
|
1864
1896
|
raise NotImplementedError('conv3d only supports Ascend.')
|
|
1865
1897
|
|
|
1866
1898
|
|
|
1899
|
+
def tensor_remainder_(input, other):
|
|
1900
|
+
return _tensor_mod(input, other)
|
|
1901
|
+
|
|
1902
|
+
|
|
1867
1903
|
def tensor_floor_divide_(input, other):
|
|
1868
1904
|
return _tensor_floordiv(input, other)
|
|
1869
1905
|
|
|
@@ -1908,6 +1944,10 @@ def tensor_gelu(input, *, approximate):
|
|
|
1908
1944
|
return gelu(input, approximate)
|
|
1909
1945
|
|
|
1910
1946
|
|
|
1947
|
+
def tensor_bernoulli_(input, p, seed, offset):
|
|
1948
|
+
raise RuntimeError("'bernoulli_' is not supported on this device.")
|
|
1949
|
+
|
|
1950
|
+
|
|
1911
1951
|
def deprecated_pixel_shuffle(input, upscale_factor):
|
|
1912
1952
|
return F.pixel_shuffle(input, upscale_factor)
|
|
1913
1953
|
|
|
@@ -1920,8 +1960,46 @@ def tensor_quant_matmul(x1, x2, scale, *, offset=None, pertoken_scale=None, bias
|
|
|
1920
1960
|
raise NotImplementedError('quant_matmul only supports Ascend.')
|
|
1921
1961
|
|
|
1922
1962
|
|
|
1963
|
+
def tensor_index(input, value):
|
|
1964
|
+
raise NotImplementedError("index only supports Ascend.")
|
|
1965
|
+
|
|
1966
|
+
|
|
1923
1967
|
def tensor_gmm(x, weight, *, bias=None, group_list=None, group_type=0, group_list_type=0):
|
|
1924
1968
|
raise NotImplementedError("gmm has not been implemented by python.")
|
|
1925
1969
|
|
|
1970
|
+
|
|
1926
1971
|
def raise_func(*args, **kwargs):
|
|
1927
1972
|
raise NotImplementedError("this func has not been implemented.")
|
|
1973
|
+
|
|
1974
|
+
|
|
1975
|
+
def tensor_masked_scatter(input, mask, source):
|
|
1976
|
+
return masked_scatter(input, mask, source)
|
|
1977
|
+
|
|
1978
|
+
|
|
1979
|
+
def tensor_inplace_masked_scatter(input, mask, source):
|
|
1980
|
+
return F.inplace_masked_scatter(input, mask, source)
|
|
1981
|
+
|
|
1982
|
+
|
|
1983
|
+
def tensor_broadcast_to(x, shape):
|
|
1984
|
+
return F.broadcast_to(x, shape)
|
|
1985
|
+
|
|
1986
|
+
def tensor_squeeze(input, axis=None):
|
|
1987
|
+
return F.squeeze(input, axis)
|
|
1988
|
+
|
|
1989
|
+
|
|
1990
|
+
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
|
|
1991
|
+
raise NotImplementedError('conv2d only supports Ascend.')
|
|
1992
|
+
|
|
1993
|
+
|
|
1994
|
+
def tensor_real(input):
|
|
1995
|
+
r"""
|
|
1996
|
+
For details, please refer to :func:`mindspore.ops.real`.
|
|
1997
|
+
"""
|
|
1998
|
+
return ops.real(input)
|
|
1999
|
+
|
|
2000
|
+
|
|
2001
|
+
def tensor_imag(input):
|
|
2002
|
+
r"""
|
|
2003
|
+
For details, please refer to :func:`mindspore.ops.imag`.
|
|
2004
|
+
"""
|
|
2005
|
+
return ops.imag(input)
|
|
@@ -30,7 +30,7 @@ from resources.resource_list import ResourceType
|
|
|
30
30
|
|
|
31
31
|
|
|
32
32
|
ACLNN_REG_CODE = """
|
|
33
|
-
#include "$ops_gen_kernel_path/ascend/
|
|
33
|
+
#include "$ops_gen_kernel_path/ascend/aclnn/kernel_mod_impl/aclnn_kernel_mod.h"
|
|
34
34
|
|
|
35
35
|
namespace mindspore {
|
|
36
36
|
namespace kernel {
|
|
@@ -88,14 +88,14 @@ class AclnnKernelRegisterAutoCcGenerator(BaseGenerator):
|
|
|
88
88
|
res_str = template.CC_LICENSE_STR + reg_code
|
|
89
89
|
|
|
90
90
|
save_path = os.path.join(
|
|
91
|
-
work_path, f"{K.MS_OPS_KERNEL_PATH}/ascend/
|
|
91
|
+
work_path, f"{K.MS_OPS_KERNEL_PATH}/ascend/aclnn/kernel_mod_impl/auto_generate")
|
|
92
92
|
file_name = "aclnn_kernel_register_auto.cc"
|
|
93
93
|
gen_utils.save_file(save_path, file_name, res_str)
|
|
94
94
|
|
|
95
95
|
|
|
96
|
-
def get_registed_ops(file_path=f'{K.MS_OPS_KERNEL_PATH}/ascend/
|
|
96
|
+
def get_registed_ops(file_path=f'{K.MS_OPS_KERNEL_PATH}/ascend/aclnn/kernel_mod_impl/'):
|
|
97
97
|
'''get registered ops by search files'''
|
|
98
|
-
# default search in 'ops/kernel/ascend/
|
|
98
|
+
# default search in 'ops/kernel/ascend/aclnn/kernel_mod_impl/'
|
|
99
99
|
search_path = os.path.join(K.WORK_DIR, file_path)
|
|
100
100
|
ret = []
|
|
101
101
|
try:
|
|
@@ -117,7 +117,7 @@ def get_registed_ops(file_path=f'{K.MS_OPS_KERNEL_PATH}/ascend/opapi/'):
|
|
|
117
117
|
|
|
118
118
|
registed_ops = get_registed_ops()
|
|
119
119
|
manual_registed_ops = get_registed_ops(
|
|
120
|
-
f'{K.MS_OPS_KERNEL_PATH}/ascend/
|
|
120
|
+
f'{K.MS_OPS_KERNEL_PATH}/ascend/aclnn/kernel_mod_impl/customize/')
|
|
121
121
|
|
|
122
122
|
|
|
123
123
|
def check_op_registed(op_name, manual=False):
|
|
@@ -118,7 +118,7 @@ def gen_aclnn_kernel(op_proto: OpProto, need_update_shape=False, auto=False):
|
|
|
118
118
|
logging.warning("Kernel {%s} is already registered.", op_name)
|
|
119
119
|
return
|
|
120
120
|
|
|
121
|
-
aclnn_path = f'{MS_OPS_KERNEL_PATH}/ascend/
|
|
121
|
+
aclnn_path = f'{MS_OPS_KERNEL_PATH}/ascend/aclnn/kernel_mod_impl/customize/'
|
|
122
122
|
# merge inner ops
|
|
123
123
|
dispatch = op_proto.op_dispatch
|
|
124
124
|
aclnn_name = ''.join(word.capitalize() for word in op_name.split('_'))
|
|
@@ -130,7 +130,7 @@ def gen_aclnn_kernel(op_proto: OpProto, need_update_shape=False, auto=False):
|
|
|
130
130
|
if auto:
|
|
131
131
|
auto_gen = "_auto_gen"
|
|
132
132
|
kernelmod_name = aclnn_name + "Ascend"
|
|
133
|
-
aclnn_path = f'{MS_OPS_KERNEL_PATH}/ascend/
|
|
133
|
+
aclnn_path = f'{MS_OPS_KERNEL_PATH}/ascend/aclnn/kernel_mod_impl/aclnn_auto_gen/'
|
|
134
134
|
pathlib.Path(os.path.join(K.WORK_DIR, aclnn_path)
|
|
135
135
|
).mkdir(parents=True, exist_ok=True)
|
|
136
136
|
if dispatch.ascend is None:
|
|
@@ -145,9 +145,9 @@ def gen_aclnn_kernel(op_proto: OpProto, need_update_shape=False, auto=False):
|
|
|
145
145
|
kernelmod_h_and_cc_path, need_update_shape)
|
|
146
146
|
|
|
147
147
|
|
|
148
|
-
def get_registed_ops(file_path=f'{MS_OPS_KERNEL_PATH}/ascend/
|
|
148
|
+
def get_registed_ops(file_path=f'{MS_OPS_KERNEL_PATH}/ascend/aclnn/kernel_mod_impl/'):
|
|
149
149
|
'''get registered ops by search files'''
|
|
150
|
-
# default search in 'ops/kernel/ascend/
|
|
150
|
+
# default search in 'ops/kernel/ascend/aclnn/kernel_mod_impl/'
|
|
151
151
|
search_path = os.path.join(K.WORK_DIR, file_path)
|
|
152
152
|
ret = []
|
|
153
153
|
try:
|
|
@@ -167,7 +167,7 @@ def get_registed_ops(file_path=f'{MS_OPS_KERNEL_PATH}/ascend/opapi/'):
|
|
|
167
167
|
|
|
168
168
|
registed_ops = get_registed_ops()
|
|
169
169
|
manual_registed_ops = get_registed_ops(
|
|
170
|
-
f'{MS_OPS_KERNEL_PATH}/ascend/
|
|
170
|
+
f'{MS_OPS_KERNEL_PATH}/ascend/aclnn/kernel_mod_impl/customize/')
|
|
171
171
|
|
|
172
172
|
|
|
173
173
|
def check_op_registed(op_name, manual=False):
|
|
@@ -182,7 +182,7 @@ def generate_aclnn_reg_code(yaml_data):
|
|
|
182
182
|
yaml_str = gen_utils.safe_load_yaml(ops_yaml_path)
|
|
183
183
|
|
|
184
184
|
reg_code = f"""
|
|
185
|
-
#include "{MS_OPS_KERNEL_PATH}/ascend/
|
|
185
|
+
#include "{MS_OPS_KERNEL_PATH}/ascend/aclnn/kernel_mod_impl/aclnn_kernel_mod.h"
|
|
186
186
|
|
|
187
187
|
namespace mindspore {{
|
|
188
188
|
namespace kernel {{
|
|
@@ -224,9 +224,9 @@ def generate_aclnn_reg_file(work_path, yaml_str):
|
|
|
224
224
|
Generate nnacl kernelmod register
|
|
225
225
|
"""
|
|
226
226
|
tmp_register_file = work_path + \
|
|
227
|
-
f'{MS_OPS_KERNEL_PATH}/ascend/
|
|
227
|
+
f'{MS_OPS_KERNEL_PATH}/ascend/aclnn/kernel_mod_impl/tmp_aclnn_kernel_register.cc'
|
|
228
228
|
register_file = work_path + \
|
|
229
|
-
f'{MS_OPS_KERNEL_PATH}/ascend/
|
|
229
|
+
f'{MS_OPS_KERNEL_PATH}/ascend/aclnn/kernel_mod_impl/aclnn_kernel_register_auto.cc'
|
|
230
230
|
reg_code = generate_aclnn_reg_code(yaml_str)
|
|
231
231
|
gen_utils.save_file(
|
|
232
232
|
os.path.dirname(tmp_register_file), os.path.basename(tmp_register_file), gen_utils.cc_license_str + reg_code)
|
|
@@ -23,6 +23,7 @@ from common.template import Template
|
|
|
23
23
|
import common.gen_constants as K
|
|
24
24
|
from common.gen_utils import save_file
|
|
25
25
|
from common.base_generator import BaseGenerator
|
|
26
|
+
from pyboost.op_template_parser import OpTemplateParser
|
|
26
27
|
from pyboost.pyboost_utils import is_optional_param, get_input_dtype, get_return_type
|
|
27
28
|
|
|
28
29
|
|
|
@@ -39,7 +40,7 @@ class FunctionsHeaderGenerator(BaseGenerator):
|
|
|
39
40
|
self.function_interface_template = Template("${return_type} BACKEND_EXPORT ${op_name}(${input_args});")
|
|
40
41
|
self.function_interface_template_comm = Template(
|
|
41
42
|
"${return_type} BACKEND_EXPORT ${op_name}_inner(${input_args}," \
|
|
42
|
-
"CommHandlePtr comm_handle,
|
|
43
|
+
"CommHandlePtr comm_handle, device::DeviceType target);"
|
|
43
44
|
)
|
|
44
45
|
self.function_interface_template_comm_return_handle = Template(
|
|
45
46
|
"${return_type_with_handle} BACKEND_EXPORT ${op_name}(${input_args});"
|
|
@@ -112,10 +113,15 @@ class FunctionsGenerator(BaseGenerator):
|
|
|
112
113
|
"""
|
|
113
114
|
self.FUNCTIONS_CC_TEMPLATE = template.FUNCTIONS_CC_TEMPLATE
|
|
114
115
|
self.FUNCTION_BODY_TEMPLATE = template.FUNCTION_BODY_TEMPLATE
|
|
116
|
+
self.FUNCTION_VIEW_BODY_TEMPLATE = template.FUNCTION_VIEW_BODY_TEMPLATE
|
|
117
|
+
self.FUNCTION_VIEW_CUSTOMIZE_BODY_TEMPLATE = template.FUNCTION_VIEW_CUSTOMIZE_BODY_TEMPLATE
|
|
115
118
|
self.FUNCTION_COMM_BODY_TEMPLATE = template.FUNCTION_COMM_BODY_TEMPLATE
|
|
116
119
|
self.pyboost_func_include_header_template = Template(
|
|
117
120
|
f'#include "{K.MS_PYBOOST_BASE_PATH}/auto_generate/${{operator_name}}.h"\n'
|
|
118
121
|
)
|
|
122
|
+
self.pyboost_view_func_include_header_template = Template(
|
|
123
|
+
f'#include "{K.MS_OPS_VIEW_PATH}/${{operator_name}}_strides_calc.h"\n'
|
|
124
|
+
)
|
|
119
125
|
self.clone_inplace_input_template = Template(
|
|
120
126
|
'GetCloneFunc()(op, prim::kPrim${class_name}, device_target, {${grad_args}});'
|
|
121
127
|
)
|
|
@@ -140,9 +146,14 @@ class FunctionsGenerator(BaseGenerator):
|
|
|
140
146
|
for op_proto in op_protos:
|
|
141
147
|
if op_proto.op_dispatch is None:
|
|
142
148
|
continue
|
|
143
|
-
|
|
144
|
-
self.
|
|
145
|
-
|
|
149
|
+
if op_proto.op_view:
|
|
150
|
+
function_body, pyboost_func_include_header = self._get_function_view_body(op_proto)
|
|
151
|
+
else:
|
|
152
|
+
pyboost_func_include_header = self.pyboost_func_include_header_template.\
|
|
153
|
+
replace(operator_name=op_proto.op_name)
|
|
154
|
+
function_body = self._get_function_body(op_proto)
|
|
155
|
+
func_include_headers_list.append(pyboost_func_include_header)
|
|
156
|
+
op_call_with_grad_list.append(function_body)
|
|
146
157
|
ops_inc_head_set.add(
|
|
147
158
|
template.OP_DEF_INC_HEAD_TEMPLATE.replace(prefix_char=op_proto.op_class.name[0].lower()))
|
|
148
159
|
pyboost_func_h_str = self.FUNCTIONS_CC_TEMPLATE.replace(op_call_with_grad=op_call_with_grad_list,
|
|
@@ -152,6 +163,44 @@ class FunctionsGenerator(BaseGenerator):
|
|
|
152
163
|
file_name = "functions.cc"
|
|
153
164
|
save_file(save_path, file_name, pyboost_func_h_str)
|
|
154
165
|
|
|
166
|
+
def _get_function_view_body(self, op_proto):
|
|
167
|
+
"""
|
|
168
|
+
Get the function body for a given view operator prototype.
|
|
169
|
+
|
|
170
|
+
Args:
|
|
171
|
+
op_proto: The operator prototype.
|
|
172
|
+
|
|
173
|
+
Returns:
|
|
174
|
+
str: The generated function body.
|
|
175
|
+
"""
|
|
176
|
+
function_body_template = self.FUNCTION_VIEW_BODY_TEMPLATE
|
|
177
|
+
pyboost_func_include_header = self.pyboost_view_func_include_header_template.\
|
|
178
|
+
replace(operator_name=op_proto.op_name)
|
|
179
|
+
if not op_proto.bprop_expander or op_proto.op_name in ["reshape"]:
|
|
180
|
+
function_body_template = self.FUNCTION_VIEW_CUSTOMIZE_BODY_TEMPLATE
|
|
181
|
+
pyboost_func_include_header = ""
|
|
182
|
+
op_parser = OpTemplateParser(op_proto)
|
|
183
|
+
input_args = self._get_input_args(op_proto, False)
|
|
184
|
+
input_args_with_type = self._get_input_args(op_proto, True)
|
|
185
|
+
call_args_with_type = op_parser.parse_call_args_with_types(True)
|
|
186
|
+
call_args = OpTemplateParser.parse_original_call_args(op_proto.op_args)
|
|
187
|
+
call_args_tensors = op_parser.get_call_args_tensor()
|
|
188
|
+
storage_calc_str = op_proto.op_class.name
|
|
189
|
+
return_values, _ = op_parser.generate_pyboost_outputs()
|
|
190
|
+
return_type_str = _get_return_type_str(op_proto)
|
|
191
|
+
function_body = function_body_template.replace(op_name=op_proto.op_name,
|
|
192
|
+
class_name=op_proto.op_class.name,
|
|
193
|
+
input_args_with_type=input_args_with_type,
|
|
194
|
+
input_args=input_args,
|
|
195
|
+
storage_calc=storage_calc_str,
|
|
196
|
+
call_args_with_type=call_args_with_type,
|
|
197
|
+
call_args=call_args,
|
|
198
|
+
call_tensors=call_args_tensors,
|
|
199
|
+
input=call_args[0],
|
|
200
|
+
return_values=return_values,
|
|
201
|
+
return_type=return_type_str)
|
|
202
|
+
return function_body, pyboost_func_include_header
|
|
203
|
+
|
|
155
204
|
def _get_function_body(self, op_proto):
|
|
156
205
|
"""
|
|
157
206
|
Get the function body for a given operator prototype.
|