mindspore 2.6.0rc1__cp311-cp311-win_amd64.whl → 2.7.0__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +2 -2
- mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +42 -11
- mindspore/_extends/builtin_operations.py +3 -3
- mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
- mindspore/_extends/optimize/cell_utils.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +3 -3
- mindspore/_extends/parse/compile_config.py +44 -22
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -2
- mindspore/_extends/parse/parser.py +65 -84
- mindspore/_extends/parse/resources.py +39 -0
- mindspore/_extends/parse/standard_method.py +58 -14
- mindspore/_extends/parse/trope.py +8 -1
- mindspore/_extends/pijit/__init__.py +1 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +2 -5
- mindspore/amp.py +4 -22
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +4 -4
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +43 -12
- mindspore/common/_grad_function.py +2 -1
- mindspore/common/_pijit_context.py +28 -7
- mindspore/common/_stub_tensor.py +1 -209
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +178 -53
- mindspore/common/_utils.py +9 -1
- mindspore/common/api.py +377 -203
- mindspore/common/dtype.py +108 -57
- mindspore/common/dump.py +11 -16
- mindspore/common/dynamic_shape/__init__.py +0 -0
- mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +17 -23
- mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
- mindspore/common/file_system.py +59 -9
- mindspore/common/generator.py +5 -3
- mindspore/common/hook_handle.py +33 -5
- mindspore/common/jit_config.py +1 -1
- mindspore/common/jit_trace.py +84 -105
- mindspore/common/np_dtype.py +3 -3
- mindspore/common/parameter.py +27 -29
- mindspore/common/recompute.py +5 -7
- mindspore/common/sparse_tensor.py +0 -3
- mindspore/common/symbol.py +0 -1
- mindspore/common/tensor.py +117 -131
- mindspore/communication/_comm_helper.py +46 -4
- mindspore/communication/management.py +79 -7
- mindspore/context.py +67 -55
- mindspore/dataset/__init__.py +1 -1
- mindspore/dataset/audio/transforms.py +1 -1
- mindspore/dataset/core/config.py +38 -4
- mindspore/dataset/engine/datasets.py +350 -322
- mindspore/dataset/engine/datasets_user_defined.py +70 -24
- mindspore/dataset/engine/iterators.py +2 -2
- mindspore/dataset/engine/obs/config_loader.py +2 -2
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
- mindspore/dataset/transforms/c_transforms.py +2 -2
- mindspore/dataset/transforms/py_transforms.py +7 -3
- mindspore/dataset/transforms/transforms.py +10 -6
- mindspore/dataset/vision/__init__.py +1 -1
- mindspore/dataset/vision/py_transforms.py +8 -8
- mindspore/dataset/vision/transforms.py +17 -5
- mindspore/dataset/vision/utils.py +632 -21
- mindspore/dataset/vision/validators.py +1 -0
- mindspore/device_context/ascend/device.py +1 -1
- mindspore/device_context/ascend/op_tuning.py +35 -1
- mindspore/device_context/gpu/__init__.py +2 -2
- mindspore/device_context/gpu/device.py +1 -1
- mindspore/device_context/gpu/op_precision.py +4 -2
- mindspore/device_context/gpu/op_tuning.py +6 -3
- mindspore/device_manager.py +16 -9
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -4
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/optim/adadelta.py +13 -20
- mindspore/experimental/optim/adagrad.py +15 -22
- mindspore/experimental/optim/adam.py +17 -24
- mindspore/experimental/optim/adamax.py +14 -22
- mindspore/experimental/optim/adamw.py +28 -34
- mindspore/experimental/optim/asgd.py +15 -25
- mindspore/experimental/optim/lr_scheduler.py +27 -45
- mindspore/experimental/optim/nadam.py +14 -24
- mindspore/experimental/optim/optimizer.py +13 -23
- mindspore/experimental/optim/radam.py +18 -24
- mindspore/experimental/optim/rmsprop.py +14 -25
- mindspore/experimental/optim/rprop.py +15 -26
- mindspore/experimental/optim/sgd.py +9 -19
- mindspore/hal/__init__.py +4 -4
- mindspore/hal/contiguous_tensors_handle.py +2 -2
- mindspore/hal/memory.py +27 -7
- mindspore/include/api/cell.h +65 -5
- mindspore/include/api/cfg.h +24 -7
- mindspore/include/api/context.h +1 -0
- mindspore/include/api/delegate.h +10 -2
- mindspore/include/api/dual_abi_helper.h +100 -19
- mindspore/include/api/graph.h +14 -1
- mindspore/include/api/kernel.h +16 -3
- mindspore/include/api/kernel_api.h +9 -1
- mindspore/include/api/metrics/accuracy.h +9 -0
- mindspore/include/api/model.h +8 -1
- mindspore/include/api/model_group.h +4 -0
- mindspore/include/api/model_parallel_runner.h +2 -0
- mindspore/include/api/status.h +48 -10
- mindspore/include/api/types.h +8 -3
- mindspore/include/c_api/model_c.h +0 -58
- mindspore/include/c_api/tensor_c.h +0 -26
- mindspore/include/dataset/constants.h +9 -0
- mindspore/include/dataset/vision_ascend.h +1 -1
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/cifar10.py +61 -11
- mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mindspore_ops_host.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +6 -46
- mindspore/mint/distributed/__init__.py +5 -0
- mindspore/mint/distributed/distributed.py +429 -23
- mindspore/mint/nn/__init__.py +1 -1
- mindspore/mint/nn/functional.py +53 -6
- mindspore/mint/nn/layer/_functions.py +163 -294
- mindspore/mint/nn/layer/activation.py +8 -6
- mindspore/mint/nn/layer/conv.py +140 -104
- mindspore/mint/nn/layer/normalization.py +11 -25
- mindspore/mint/optim/adam.py +19 -18
- mindspore/mint/optim/adamw.py +14 -8
- mindspore/mint/optim/sgd.py +5 -5
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/cell.py +491 -623
- mindspore/nn/grad/cell_grad.py +11 -12
- mindspore/nn/layer/activation.py +36 -36
- mindspore/nn/layer/basic.py +74 -77
- mindspore/nn/layer/channel_shuffle.py +4 -4
- mindspore/nn/layer/combined.py +4 -2
- mindspore/nn/layer/conv.py +117 -110
- mindspore/nn/layer/dense.py +9 -7
- mindspore/nn/layer/embedding.py +50 -52
- mindspore/nn/layer/image.py +38 -40
- mindspore/nn/layer/math.py +111 -112
- mindspore/nn/layer/normalization.py +56 -44
- mindspore/nn/layer/pooling.py +58 -63
- mindspore/nn/layer/rnn_cells.py +33 -33
- mindspore/nn/layer/rnns.py +56 -56
- mindspore/nn/layer/thor_layer.py +74 -73
- mindspore/nn/layer/transformer.py +11 -1
- mindspore/nn/learning_rate_schedule.py +20 -20
- mindspore/nn/loss/loss.py +79 -81
- mindspore/nn/optim/adam.py +4 -6
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +2 -0
- mindspore/nn/optim/lamb.py +1 -3
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/tft_wrapper.py +2 -3
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/utils.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -1
- mindspore/nn/probability/distribution/poisson.py +2 -1
- mindspore/nn/sparse/sparse.py +3 -3
- mindspore/nn/wrap/cell_wrapper.py +73 -42
- mindspore/nn/wrap/grad_reducer.py +37 -52
- mindspore/nn/wrap/loss_scale.py +72 -74
- mindspore/numpy/array_creations.py +7 -7
- mindspore/numpy/fft.py +1 -1
- mindspore/numpy/math_ops.py +5 -5
- mindspore/numpy/utils_const.py +1 -1
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
- mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/{experimental/es/__init__.py → ops/_op_impl/cpu/joinedstr_op.py} +12 -6
- mindspore/ops/_vmap/vmap_array_ops.py +31 -13
- mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +54 -13
- mindspore/ops/auto_generate/gen_extend_func.py +27 -145
- mindspore/ops/auto_generate/gen_ops_def.py +1027 -347
- mindspore/ops/auto_generate/gen_ops_prim.py +2341 -1117
- mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
- mindspore/ops/composite/__init__.py +10 -0
- mindspore/ops/composite/base.py +9 -5
- mindspore/ops/composite/multitype_ops/__init__.py +12 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +133 -109
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
- mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
- mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
- mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
- mindspore/ops/function/__init__.py +4 -1
- mindspore/ops/function/_add_attr_func.py +11 -6
- mindspore/ops/function/array_func.py +19 -102
- mindspore/ops/function/debug_func.py +8 -5
- mindspore/ops/function/grad/grad_func.py +5 -13
- mindspore/ops/function/math_func.py +77 -572
- mindspore/ops/function/nn_func.py +46 -94
- mindspore/ops/function/other_func.py +4 -1
- mindspore/ops/function/random_func.py +44 -5
- mindspore/ops/function/vmap_func.py +2 -1
- mindspore/ops/functional.py +4 -4
- mindspore/ops/functional_overload.py +594 -18
- mindspore/ops/op_info_register.py +21 -0
- mindspore/ops/operations/__init__.py +16 -11
- mindspore/ops/operations/_custom_ops_utils.py +689 -34
- mindspore/ops/operations/_inner_ops.py +14 -18
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +5 -51
- mindspore/ops/operations/comm_ops.py +186 -41
- mindspore/ops/operations/custom_ops.py +303 -177
- mindspore/ops/operations/debug_ops.py +59 -4
- mindspore/ops/operations/image_ops.py +13 -13
- mindspore/ops/operations/manually_defined/ops_def.py +27 -28
- mindspore/ops/operations/math_ops.py +8 -9
- mindspore/ops/operations/nn_ops.py +8 -40
- mindspore/ops/primitive.py +9 -20
- mindspore/ops/tensor_method.py +63 -15
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
- mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
- mindspore/ops_generate/api/functions_cc_generator.py +58 -10
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
- mindspore/ops_generate/common/base_generator.py +14 -0
- mindspore/ops_generate/common/gen_constants.py +8 -3
- mindspore/ops_generate/common/gen_utils.py +0 -19
- mindspore/ops_generate/common/op_proto.py +11 -4
- mindspore/ops_generate/common/template.py +88 -11
- mindspore/ops_generate/gen_ops.py +1 -1
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -16
- mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
- mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
- mindspore/parallel/_auto_parallel_context.py +16 -23
- mindspore/parallel/_cell_wrapper.py +113 -45
- mindspore/parallel/_parallel_serialization.py +4 -3
- mindspore/parallel/_ps_context.py +4 -6
- mindspore/parallel/_tensor.py +167 -12
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/transformer.py +17 -12
- mindspore/parallel/_utils.py +5 -11
- mindspore/parallel/auto_parallel.py +35 -14
- mindspore/parallel/checkpoint_convert.py +3 -3
- mindspore/parallel/checkpoint_transform.py +13 -7
- mindspore/parallel/cluster/process_entity/_api.py +88 -49
- mindspore/parallel/cluster/process_entity/_utils.py +95 -7
- mindspore/parallel/cluster/run.py +48 -7
- mindspore/parallel/function/__init__.py +8 -1
- mindspore/parallel/function/reshard_func.py +12 -12
- mindspore/parallel/nn/__init__.py +15 -2
- mindspore/parallel/nn/parallel_cell_wrapper.py +50 -14
- mindspore/parallel/nn/parallel_grad_reducer.py +7 -14
- mindspore/parallel/shard.py +10 -25
- mindspore/parallel/transform_safetensors.py +469 -174
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +12 -6
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
- mindspore/profiler/analysis/task_manager.py +1 -1
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +10 -9
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +43 -23
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
- mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
- mindspore/profiler/common/constant.py +16 -0
- mindspore/profiler/common/msprof_cmd_tool.py +2 -2
- mindspore/profiler/common/path_manager.py +9 -0
- mindspore/profiler/common/profiler_context.py +50 -29
- mindspore/profiler/common/profiler_info.py +0 -16
- mindspore/profiler/common/profiler_meta_data.py +1 -0
- mindspore/profiler/common/profiler_op_analyse.py +239 -0
- mindspore/profiler/common/profiler_output_path.py +23 -8
- mindspore/profiler/common/profiler_parameters.py +128 -35
- mindspore/profiler/dynamic_profile/__init__.py +0 -0
- mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
- mindspore/profiler/dynamic_profiler.py +374 -338
- mindspore/profiler/envprofiler.py +42 -12
- mindspore/profiler/experimental_config.py +112 -7
- mindspore/profiler/mstx.py +33 -12
- mindspore/profiler/platform/__init__.py +2 -3
- mindspore/profiler/platform/cpu_profiler.py +10 -4
- mindspore/profiler/platform/npu_profiler.py +30 -20
- mindspore/profiler/profiler.py +218 -154
- mindspore/profiler/profiler_action_controller.py +65 -77
- mindspore/profiler/profiler_interface.py +2 -2
- mindspore/profiler/schedule.py +10 -4
- mindspore/rewrite/common/config.py +1 -0
- mindspore/rewrite/common/namer.py +1 -0
- mindspore/rewrite/common/namespace.py +1 -0
- mindspore/rewrite/node/node.py +31 -11
- mindspore/rewrite/parsers/assign_parser.py +1 -1
- mindspore/rewrite/symbol_tree/symbol_tree.py +2 -2
- mindspore/run_check/_check_version.py +7 -10
- mindspore/runtime/__init__.py +8 -6
- mindspore/runtime/event.py +10 -4
- mindspore/runtime/executor.py +87 -45
- mindspore/runtime/memory.py +31 -32
- mindspore/runtime/thread_bind_core.py +299 -165
- mindspore/safeguard/rewrite_obfuscation.py +12 -13
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/_utils.py +17 -7
- mindspore/train/amp.py +43 -23
- mindspore/train/callback/__init__.py +5 -5
- mindspore/train/callback/_callback.py +2 -1
- mindspore/train/callback/_checkpoint.py +4 -14
- mindspore/train/callback/_flops_collector.py +11 -7
- mindspore/train/callback/_landscape.py +0 -1
- mindspore/train/callback/_train_fault_tolerance.py +98 -21
- mindspore/train/data_sink.py +15 -6
- mindspore/train/dataset_helper.py +14 -5
- mindspore/train/model.py +133 -69
- mindspore/train/serialization.py +168 -126
- mindspore/train/summary/summary_record.py +13 -2
- mindspore/train/train_thor/model_thor.py +2 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +0 -6
- mindspore/utils/runtime_execution_order_check.py +163 -77
- mindspore/utils/sdc_detect.py +68 -0
- mindspore/utils/utils.py +14 -17
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/METADATA +5 -4
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/RECORD +403 -442
- mindspore/_deprecated/jit.py +0 -198
- mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
- mindspore/communication/_hccl_management.py +0 -297
- mindspore/experimental/es/embedding_service.py +0 -891
- mindspore/experimental/es/embedding_service_layer.py +0 -581
- mindspore/profiler/common/validator/__init__.py +0 -14
- mindspore/profiler/common/validator/validate_path.py +0 -84
- mindspore/profiler/parser/__init__.py +0 -14
- mindspore/profiler/parser/aicpu_data_parser.py +0 -272
- mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
- mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
- mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
- mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
- mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
- mindspore/profiler/parser/ascend_flops_generator.py +0 -116
- mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
- mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
- mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
- mindspore/profiler/parser/ascend_op_generator.py +0 -334
- mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
- mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
- mindspore/profiler/parser/base_timeline_generator.py +0 -483
- mindspore/profiler/parser/container.py +0 -229
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
- mindspore/profiler/parser/flops_parser.py +0 -531
- mindspore/profiler/parser/framework_enum.py +0 -111
- mindspore/profiler/parser/framework_parser.py +0 -464
- mindspore/profiler/parser/framework_struct.py +0 -61
- mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
- mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
- mindspore/profiler/parser/hccl_parser.py +0 -573
- mindspore/profiler/parser/hwts_log_parser.py +0 -122
- mindspore/profiler/parser/integrator.py +0 -526
- mindspore/profiler/parser/memory_usage_parser.py +0 -277
- mindspore/profiler/parser/minddata_analyzer.py +0 -800
- mindspore/profiler/parser/minddata_parser.py +0 -186
- mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
- mindspore/profiler/parser/op_intermediate_parser.py +0 -149
- mindspore/profiler/parser/optime_parser.py +0 -250
- mindspore/profiler/parser/profiler_info.py +0 -213
- mindspore/profiler/parser/step_trace_parser.py +0 -666
- mindspore/utils/hooks.py +0 -81
- /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/WHEEL +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/entry_points.txt +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0.dist-info}/top_level.txt +0 -0
|
@@ -30,7 +30,6 @@ from mindspore.ops.primitive import prim_attr_register, Primitive, PrimitiveWith
|
|
|
30
30
|
from mindspore._checkparam import check_hook_fn
|
|
31
31
|
from mindspore.ops import operations as P
|
|
32
32
|
|
|
33
|
-
|
|
34
33
|
SUMMARY_TENSOR_CACHE = []
|
|
35
34
|
|
|
36
35
|
|
|
@@ -310,6 +309,8 @@ class TensorDump(Primitive):
|
|
|
310
309
|
"""Initialize TensorDump."""
|
|
311
310
|
if security.enable_security():
|
|
312
311
|
raise ValueError('The TensorDump is not supported, please without `-s on` and recompile source.')
|
|
312
|
+
if input_output not in ['in', 'out']:
|
|
313
|
+
raise ValueError(f"The 'input_output' argument should be one of ['in', 'out'], but got: {input_output}")
|
|
313
314
|
self.add_prim_attr("side_effect_io", True)
|
|
314
315
|
self.add_prim_attr("channel_name", "ms_tensor_dump")
|
|
315
316
|
|
|
@@ -396,6 +397,10 @@ class InsertGradientOf(Primitive):
|
|
|
396
397
|
"""
|
|
397
398
|
Attaches callback to the graph node that will be invoked on the node's gradient.
|
|
398
399
|
|
|
400
|
+
.. warning::
|
|
401
|
+
In the callback, exercise caution when using side-effect operators,
|
|
402
|
+
such as the TensorDump operator, as current support is incomplete.
|
|
403
|
+
|
|
399
404
|
Args:
|
|
400
405
|
f (Function): MindSpore's Function. Callback function.
|
|
401
406
|
|
|
@@ -459,15 +464,65 @@ class InsertGradientOf(Primitive):
|
|
|
459
464
|
self.f = f
|
|
460
465
|
|
|
461
466
|
|
|
467
|
+
class DumpGradient(Primitive):
|
|
468
|
+
"""
|
|
469
|
+
The `DumpGradient` Primitive is a hook, used to dump dout which pass to `x`.
|
|
470
|
+
|
|
471
|
+
Inputs:
|
|
472
|
+
- **path** (str) - The path of the file to be saved.
|
|
473
|
+
- **x** (Tensor) - Input Tensor of any dimension.
|
|
474
|
+
- **input_output** (str) - support value should be one of ['in', 'out'].
|
|
475
|
+
|
|
476
|
+
Supported Platforms:
|
|
477
|
+
``Ascend``
|
|
478
|
+
|
|
479
|
+
Examples:
|
|
480
|
+
>>> import numpy as np
|
|
481
|
+
>>> import mindspore as ms
|
|
482
|
+
>>> from mindspore import ops
|
|
483
|
+
>>> from mindspore import Tensor
|
|
484
|
+
>>> ms.set_context(mode=ms.PYNATIVE_MODE)
|
|
485
|
+
>>> ms.set_device(device_target="Ascend")
|
|
486
|
+
>>> dg = ops.DumpGradient()
|
|
487
|
+
>>> def dout_dump_test(x, y):
|
|
488
|
+
... x = dg("x_dout.npy", x, 'out')
|
|
489
|
+
... print(f"x value is {x}")
|
|
490
|
+
... z = x * y
|
|
491
|
+
... return z
|
|
492
|
+
>>> ms_grad = ms.grad(dout_dump_test, grad_position=(0,1))
|
|
493
|
+
>>> x_grad, y_grad = ms_grad(Tensor(1, ms.float32), Tensor(2, ms.float32))
|
|
494
|
+
>>> print(f"x grad is {x_grad}, y_grad is {y_grad}")
|
|
495
|
+
>>> x_grad_npy = np.load("x_dout.npy")
|
|
496
|
+
>>> print(f"load x_grad from npy, x_grad is {x_grad_npy}")
|
|
497
|
+
x value is 1.0
|
|
498
|
+
x grad is 2.0, y grad is 1.0
|
|
499
|
+
load x_grad from npy, x_grad is array(2., dtype=float32)
|
|
500
|
+
"""
|
|
501
|
+
|
|
502
|
+
@prim_attr_register
|
|
503
|
+
def __init__(self):
|
|
504
|
+
pass
|
|
505
|
+
|
|
506
|
+
def __call__(self, path, x, input_output):
|
|
507
|
+
def _dump_hook(dout):
|
|
508
|
+
P.TensorDump()(path, dout)
|
|
509
|
+
return dout
|
|
510
|
+
x = P.InsertGradientOf(_dump_hook)(x)
|
|
511
|
+
return x
|
|
512
|
+
|
|
513
|
+
|
|
462
514
|
class Morph(PrimitiveWithInfer):
|
|
463
515
|
"""
|
|
464
516
|
The `Morph` Primitive is used to encapsulate a user-defined function `fn`, allowing it to be used as a custom
|
|
465
517
|
Primitive.
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
518
|
+
|
|
519
|
+
The `Morph` Primitive is primarily designed for custom graph optimization in GRAPH mode. For example, it supports
|
|
520
|
+
encapsulation of irregular collective communications (such as :func:`mindspore.ops.AlltoAllV`) in distributed
|
|
521
|
+
auto-parallel training scenarios.
|
|
522
|
+
|
|
469
523
|
When the `Morph` Primitive is applied to inputs, it is actually the encapsulated user-defined function `fn` that is
|
|
470
524
|
applied to the inputs.
|
|
525
|
+
|
|
471
526
|
The main difference between the `Morph` Primitive and :func:`mindspore.ops.Custom` is that the former is expanded
|
|
472
527
|
and replaced by the user-defined `fn` before automatic differentiation, so there is no need to implement a backward
|
|
473
528
|
function.
|
|
@@ -102,19 +102,19 @@ class AdjustContrastv2(Primitive):
|
|
|
102
102
|
``Ascend`` ``GPU`` ``CPU``
|
|
103
103
|
|
|
104
104
|
Examples:
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
105
|
+
>>> images = Tensor([[[1.0, 2.0, 3.0],
|
|
106
|
+
... [4.0, 5.0, 6.0]],
|
|
107
|
+
... [[7.0, 8.0, 9.0],
|
|
108
|
+
... [10.0, 11.0, 12.0]]], mstype.float32)
|
|
109
|
+
>>> contrast_factor = Tensor(2., mstype.float32)
|
|
110
|
+
>>> adjustcontrastv2 = AdjustContrastv2()
|
|
111
|
+
>>> output = adjustcontrastv2(images, contrast_factor)
|
|
112
|
+
>>> print(output)
|
|
113
|
+
[[[-3.5 -2.5 -1.5]
|
|
114
|
+
[ 2.5 3.5 4.5]]
|
|
115
|
+
<BLANKLINE>
|
|
116
|
+
[[ 8.5 9.5 10.5]
|
|
117
|
+
[14.5 15.5 16.5]]]
|
|
118
118
|
"""
|
|
119
119
|
|
|
120
120
|
@prim_attr_register
|
|
@@ -26,7 +26,6 @@ from mindspore.ops._primitive_cache import _get_cache_prim
|
|
|
26
26
|
from mindspore.ops._utils import arg_handler as handler
|
|
27
27
|
from mindspore.ops._utils.arg_dtype_cast import DtypeToEnum
|
|
28
28
|
from mindspore.common import Tensor, CSRTensor, COOTensor
|
|
29
|
-
from mindspore.common._stub_tensor import _convert_stub
|
|
30
29
|
from mindspore._c_expression import typing
|
|
31
30
|
from mindspore._c_expression import TensorPy as Tensor_
|
|
32
31
|
from mindspore._c_expression import pyboost_cast, pyboost_tile, pyboost_zeros, pyboost_ones, pyboost_type_as
|
|
@@ -978,7 +977,7 @@ class ScalarToTensor(PrimitiveWithInfer):
|
|
|
978
977
|
def __call__(self, x, dtype=mstype.float32):
|
|
979
978
|
validator.check_value_type("x", x, [bool, int, float], self.name)
|
|
980
979
|
validator.check_subclass("dtype", dtype, mstype.number, self.name)
|
|
981
|
-
data_type = mstype.
|
|
980
|
+
data_type = mstype._dtype_to_nptype(dtype) # pylint:disable=protected-access
|
|
982
981
|
return Tensor(np.array(x, data_type), dtype=dtype)
|
|
983
982
|
|
|
984
983
|
|
|
@@ -1057,8 +1056,8 @@ class Tile(Primitive):
|
|
|
1057
1056
|
def __call__(self, input, dims):
|
|
1058
1057
|
# Add for jit context.
|
|
1059
1058
|
if jit_context() and jit_context().compiled:
|
|
1060
|
-
return
|
|
1061
|
-
res =
|
|
1059
|
+
return jit_context().default_output()
|
|
1060
|
+
res = pyboost_tile(self, [input, dims])
|
|
1062
1061
|
# Add for jit context.
|
|
1063
1062
|
if jit_context():
|
|
1064
1063
|
if validator.is_stub_tensor(res):
|
|
@@ -1066,7 +1065,6 @@ class Tile(Primitive):
|
|
|
1066
1065
|
return jit_context().run_op(self, res, input, dims)
|
|
1067
1066
|
return res
|
|
1068
1067
|
|
|
1069
|
-
# pylint: disable=missing-docstring
|
|
1070
1068
|
def check_elim(self, *args):
|
|
1071
1069
|
base_tensor, dims = args
|
|
1072
1070
|
if not isinstance(base_tensor, Tensor):
|
|
@@ -1151,7 +1149,7 @@ def scalar_cast(input_x, input_y):
|
|
|
1151
1149
|
Args:
|
|
1152
1150
|
input_x (scalar): The input scalar.
|
|
1153
1151
|
input_y (mindspore.dtype): The type to be cast. Only constant value is allowed.
|
|
1154
|
-
The value should only be mindspore.int64, mindspore.float64, or mindspore.bool
|
|
1152
|
+
The value should only be mindspore.int64, mindspore.float64, or mindspore.bool.
|
|
1155
1153
|
|
|
1156
1154
|
Returns:
|
|
1157
1155
|
Scalar, the type is the same as the python type corresponding to `input_y`.
|
|
@@ -1230,11 +1228,11 @@ class Cast(Primitive):
|
|
|
1230
1228
|
def __call__(self, input_x, dtype):
|
|
1231
1229
|
# Add for jit context.
|
|
1232
1230
|
if jit_context() and jit_context().compiled:
|
|
1233
|
-
return
|
|
1231
|
+
return jit_context().default_output()
|
|
1234
1232
|
should_elim, output = self.check_elim(input_x, dtype)
|
|
1235
1233
|
if should_elim:
|
|
1236
1234
|
return output
|
|
1237
|
-
res =
|
|
1235
|
+
res = pyboost_cast(self, [input_x, dtype_to_type_id('Cast', 'dtype', dtype)])
|
|
1238
1236
|
# Add for jit context.
|
|
1239
1237
|
if jit_context():
|
|
1240
1238
|
if validator.is_stub_tensor(res):
|
|
@@ -1293,7 +1291,7 @@ class TypeAs(Primitive):
|
|
|
1293
1291
|
def __call__(self, input, other):
|
|
1294
1292
|
if input.dtype == other.dtype:
|
|
1295
1293
|
return input
|
|
1296
|
-
return
|
|
1294
|
+
return pyboost_type_as(self, [input, other])
|
|
1297
1295
|
|
|
1298
1296
|
|
|
1299
1297
|
def to_sequence(val):
|
|
@@ -1716,7 +1714,7 @@ def infer_value_for_Arange(start, end, step, dtype=None):
|
|
|
1716
1714
|
if has_float:
|
|
1717
1715
|
np_dtype = np.float32
|
|
1718
1716
|
else:
|
|
1719
|
-
np_dtype = mstype.
|
|
1717
|
+
np_dtype = mstype._dtype_to_nptype(typing.type_id_to_type(dtype)) # pylint:disable=protected-access
|
|
1720
1718
|
return Tensor(np.arange(start, end, step, dtype=np_dtype))
|
|
1721
1719
|
|
|
1722
1720
|
|
|
@@ -1740,7 +1738,7 @@ def _infer_value_for_ReduceExtand(input_x, axis, keep_dims, dtype, prim_name):
|
|
|
1740
1738
|
else:
|
|
1741
1739
|
axis = tuple(range(len(value.shape)))
|
|
1742
1740
|
if dtype is not None:
|
|
1743
|
-
np_dtype = mstype.
|
|
1741
|
+
np_dtype = mstype._dtype_to_nptype(typing.type_id_to_type(dtype)) # pylint:disable=protected-access
|
|
1744
1742
|
value = np_reduce_extand_func(value, axis, dtype=np_dtype, keepdims=keep_dims)
|
|
1745
1743
|
else:
|
|
1746
1744
|
value = np_reduce_extand_func(value, axis, keepdims=keep_dims)
|
|
@@ -1773,7 +1771,7 @@ def infer_value_for_Cast(x, dst_type_enum=None):
|
|
|
1773
1771
|
if x is None or dst_type_enum is None:
|
|
1774
1772
|
return None
|
|
1775
1773
|
dst_type = typing.type_id_to_type(dst_type_enum)
|
|
1776
|
-
src_type = mstype.
|
|
1774
|
+
src_type = mstype._get_py_obj_dtype(x) # pylint:disable=protected-access
|
|
1777
1775
|
validator.check_subclass("input_x", src_type, [mstype.tensor_type, mstype.number], "Cast")
|
|
1778
1776
|
validator.check_subclass("type", dst_type, mstype.number, "Cast")
|
|
1779
1777
|
|
|
@@ -1783,7 +1781,7 @@ def infer_value_for_Cast(x, dst_type_enum=None):
|
|
|
1783
1781
|
dst_type = dst_type.element_type()
|
|
1784
1782
|
|
|
1785
1783
|
value = None
|
|
1786
|
-
np_dst_type = mstype.
|
|
1784
|
+
np_dst_type = mstype._dtype_to_nptype(dst_type) # pylint:disable=protected-access
|
|
1787
1785
|
if isinstance(x, (int, float)):
|
|
1788
1786
|
value = Tensor(np.array(x).astype(np_dst_type), dtype=dst_type)
|
|
1789
1787
|
else:
|
|
@@ -2070,9 +2068,9 @@ class Ones(Primitive):
|
|
|
2070
2068
|
def __call__(self, size, type=None):
|
|
2071
2069
|
# Add for jit context.
|
|
2072
2070
|
if jit_context() and jit_context().compiled:
|
|
2073
|
-
return
|
|
2074
|
-
res =
|
|
2075
|
-
else handler.dtype_to_type_id('Ones', 'type', type)])
|
|
2071
|
+
return jit_context().default_output()
|
|
2072
|
+
res = pyboost_ones(self, [size, type if type is None \
|
|
2073
|
+
else handler.dtype_to_type_id('Ones', 'type', type)])
|
|
2076
2074
|
# Add for jit context.
|
|
2077
2075
|
if jit_context():
|
|
2078
2076
|
if validator.is_stub_tensor(res):
|
|
@@ -2130,9 +2128,9 @@ class Zeros(Primitive):
|
|
|
2130
2128
|
def __call__(self, size, type=None):
|
|
2131
2129
|
# Add for jit context.
|
|
2132
2130
|
if jit_context() and jit_context().compiled:
|
|
2133
|
-
return
|
|
2134
|
-
res =
|
|
2135
|
-
handler.dtype_to_type_id('Zeros', 'type', type)])
|
|
2131
|
+
return jit_context().default_output()
|
|
2132
|
+
res = pyboost_zeros(self, [size, type if type is None else \
|
|
2133
|
+
handler.dtype_to_type_id('Zeros', 'type', type)])
|
|
2136
2134
|
# Add for jit context.
|
|
2137
2135
|
if jit_context():
|
|
2138
2136
|
if validator.is_stub_tensor(res):
|
|
@@ -2217,7 +2215,8 @@ def flash_attention_score(query, key, value, head_num, real_shift=None, drop_mas
|
|
|
2217
2215
|
keep_prob (double, optional): The keep probability of dropout. Value range is (0.0, 1.0]. When `keep_prob`
|
|
2218
2216
|
is 1.0, `drop_mask` should be None.
|
|
2219
2217
|
Default: ``1.0``.
|
|
2220
|
-
scalar_value (double, optional): The scale
|
|
2218
|
+
scalar_value (double, optional): The scale value indicating the scale coefficient, which is used as the
|
|
2219
|
+
scalar of Muls in the calculation. Generally, the value is 1.0 / (D ** 0.5).
|
|
2221
2220
|
Default: ``1.0``.
|
|
2222
2221
|
pre_tokens (int, optional): Parameter for sparse computation, represents how many tokens are counted forward.
|
|
2223
2222
|
When `sparse_mode` is set to 1, 2, 3, or 5, this parameter does not take effect.
|
|
@@ -2599,8 +2598,8 @@ def fused_infer_attention_score(query, key, value, *, pse_shift=None, atten_mask
|
|
|
2599
2598
|
taking exp, and then the sum is computed to obtain softmax_sum. Finally, the log of softmax_sum is taken,
|
|
2600
2599
|
and softmax_max is added to obtain softmax_lse. The softmax_lse is only calculated when softmax_lse_flag
|
|
2601
2600
|
is True, and the shape would be :math:`(B, N, Q\_S, 1)`. If softmax_lse_flag is False, then a tensor with
|
|
2602
|
-
shape :math:`(1)` filled with zeros would be returned. In
|
|
2603
|
-
|
|
2601
|
+
shape :math:`(1)` filled with zeros would be returned. In GE backend, please ensure that the softmax_lse_flag
|
|
2602
|
+
is enabled before using softmax_lse; otherwise, an exception will occur.
|
|
2604
2603
|
|
|
2605
2604
|
Constraints:
|
|
2606
2605
|
- Full Inference Scenario (Q_S > 1):
|
|
@@ -2840,8 +2839,8 @@ class WhileLoop(Primitive):
|
|
|
2840
2839
|
while cond_func(val):
|
|
2841
2840
|
val = loop_func(val)
|
|
2842
2841
|
except Exception as e:
|
|
2843
|
-
raise ValueError("Invalid loop_func, please check input arguments and
|
|
2844
|
-
return value, error info: {}"
|
|
2842
|
+
raise ValueError(f"Invalid loop_func, please check input arguments and "
|
|
2843
|
+
f"return value, error info: {e}")
|
|
2845
2844
|
return val
|
|
2846
2845
|
|
|
2847
2846
|
|
|
@@ -2936,8 +2935,8 @@ class Scan(Primitive):
|
|
|
2936
2935
|
ys.append(y)
|
|
2937
2936
|
i = i + 1
|
|
2938
2937
|
except Exception as e:
|
|
2939
|
-
raise ValueError("Invalid loop_func, please check input arguments and
|
|
2940
|
-
return value, error info: {}"
|
|
2938
|
+
raise ValueError(f"Invalid loop_func, please check input arguments and "
|
|
2939
|
+
f"return value, error info: {e}")
|
|
2941
2940
|
return carry, ys
|
|
2942
2941
|
|
|
2943
2942
|
|
|
@@ -3012,6 +3011,6 @@ class ForiLoop(Primitive):
|
|
|
3012
3011
|
for i in range(lower, upper):
|
|
3013
3012
|
val = loop_func(i, val)
|
|
3014
3013
|
except Exception as e:
|
|
3015
|
-
raise ValueError("Invalid loop_func, please check input arguments and
|
|
3016
|
-
return value, error info: {}"
|
|
3014
|
+
raise ValueError(f"Invalid loop_func, please check input arguments and "
|
|
3015
|
+
f"return value, error info: {e}")
|
|
3017
3016
|
return val
|
|
@@ -882,7 +882,7 @@ class Sub(_MathBinaryOp):
|
|
|
882
882
|
Note:
|
|
883
883
|
- When the two inputs have different shapes, they must be able to broadcast to a common shape.
|
|
884
884
|
- The two inputs can not be bool type at the same time,
|
|
885
|
-
[True, Tensor(True
|
|
885
|
+
[True, Tensor(True), Tensor(np.array([True]))] are all considered bool type.
|
|
886
886
|
- The two inputs comply with the implicit type conversion rules to make the data types
|
|
887
887
|
consistent.
|
|
888
888
|
|
|
@@ -890,7 +890,7 @@ class Sub(_MathBinaryOp):
|
|
|
890
890
|
- **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
|
|
891
891
|
a bool or a tensor whose data type is
|
|
892
892
|
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
893
|
-
`
|
|
893
|
+
`bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
894
894
|
- **y** (Union[Tensor, number.Number, bool]) - The second input, when the first input is a Tensor,
|
|
895
895
|
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
|
|
896
896
|
|
|
@@ -1231,7 +1231,7 @@ class Heaviside(Primitive):
|
|
|
1231
1231
|
0, & \text { if x }<0 \\
|
|
1232
1232
|
\text { values, } & \text { if x }==0 \\
|
|
1233
1233
|
1, & \text { if x }>0
|
|
1234
|
-
\end{array}\right
|
|
1234
|
+
\end{array}\right.
|
|
1235
1235
|
|
|
1236
1236
|
.. warning::
|
|
1237
1237
|
This is an experimental API that is subject to change or deletion.
|
|
@@ -1289,10 +1289,10 @@ class DivNoNan(Primitive):
|
|
|
1289
1289
|
- **x1** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
|
|
1290
1290
|
a bool or a tensor whose data type is
|
|
1291
1291
|
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
1292
|
-
`
|
|
1292
|
+
`bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
1293
1293
|
- **x2** (Union[Tensor, number.Number, bool]) - The second input is a number.Number or
|
|
1294
|
-
a bool when the first input is a bool or a tensor whose data type is number or bool
|
|
1295
|
-
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool
|
|
1294
|
+
a bool when the first input is a bool or a tensor whose data type is number or bool.
|
|
1295
|
+
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
|
|
1296
1296
|
|
|
1297
1297
|
Outputs:
|
|
1298
1298
|
Tensor, the shape is the same as the one after broadcasting,
|
|
@@ -3354,9 +3354,8 @@ class ComplexAbs(Primitive):
|
|
|
3354
3354
|
|
|
3355
3355
|
Examples:
|
|
3356
3356
|
>>> import mindspore
|
|
3357
|
-
>>> import numpy as np
|
|
3358
3357
|
>>> from mindspore import Tensor, ops
|
|
3359
|
-
>>> x = Tensor(
|
|
3358
|
+
>>> x = Tensor(3+4j, mindspore.complex64)
|
|
3360
3359
|
>>> complex_abs = ops.ComplexAbs()
|
|
3361
3360
|
>>> output = complex_abs(x)
|
|
3362
3361
|
>>> print(output)
|
|
@@ -3630,7 +3629,7 @@ class MatrixSolveLs(Primitive):
|
|
|
3630
3629
|
TypeError: If `l2_regularizer` is not float64.
|
|
3631
3630
|
TypeError: If `fast` is not bool.
|
|
3632
3631
|
ValueError: If dimensions of `matrix` or `rhs` is less than 2.
|
|
3633
|
-
ValueError: If shape of `matrix`
|
|
3632
|
+
ValueError: If shape of `matrix` does not match the shape of `rhs`.
|
|
3634
3633
|
|
|
3635
3634
|
Supported Platforms:
|
|
3636
3635
|
``CPU``
|
|
@@ -32,7 +32,6 @@ from mindspore.ops.primitive import PrimitiveWithCheck
|
|
|
32
32
|
from mindspore.ops.primitive import prim_attr_register
|
|
33
33
|
from mindspore.run_check._check_version import AscendEnvChecker
|
|
34
34
|
from mindspore._c_expression import pyboost_all_finite
|
|
35
|
-
from mindspore.common._stub_tensor import _convert_stub
|
|
36
35
|
from ..auto_generate import (CeLU, Flatten, LogSoftmax, LogSoftmaxExt, GLU, ReLU, ReLU6, Dense, Tanh,
|
|
37
36
|
Elu, Sigmoid, Softmax, SoftplusExt, HSwish, HSigmoid, AvgPool, BiasAdd,
|
|
38
37
|
NLLLoss, OneHot, GeLU, FastGeLU, PReLU, RmsNorm, IncreFlashAttention, MSELossExt,
|
|
@@ -42,7 +41,7 @@ from ..auto_generate import (CeLU, Flatten, LogSoftmax, LogSoftmaxExt, GLU, ReLU
|
|
|
42
41
|
UpsampleNearest3D, UpsampleTrilinear3D,
|
|
43
42
|
SoftMarginLoss, UpsampleBilinear2D, UpsampleLinear1D,
|
|
44
43
|
BinaryCrossEntropy, BCEWithLogitsLoss, SoftShrink, AdaptiveMaxPool2D,
|
|
45
|
-
SmoothL1Loss)
|
|
44
|
+
SmoothL1Loss, KvScaleCache)
|
|
46
45
|
from .manually_defined import BatchNorm
|
|
47
46
|
|
|
48
47
|
|
|
@@ -6434,39 +6433,8 @@ class Conv3D(Primitive):
|
|
|
6434
6433
|
|
|
6435
6434
|
Outputs:
|
|
6436
6435
|
Tensor, the value that applied 3D convolution. The shape is :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`.
|
|
6437
|
-
|
|
6438
|
-
`
|
|
6439
|
-
|
|
6440
|
-
.. math::
|
|
6441
|
-
\begin{array}{ll} \\
|
|
6442
|
-
D_{out} = \left \lceil{\frac{D_{in}}{\text{stride[0]}}} \right \rceil \\
|
|
6443
|
-
H_{out} = \left \lceil{\frac{H_{in}}{\text{stride[1]}}} \right \rceil \\
|
|
6444
|
-
W_{out} = \left \lceil{\frac{W_{in}}{\text{stride[2]}}} \right \rceil \\
|
|
6445
|
-
\end{array}
|
|
6446
|
-
|
|
6447
|
-
`pad_mode` is ``"valid"``:
|
|
6448
|
-
|
|
6449
|
-
.. math::
|
|
6450
|
-
\begin{array}{ll} \\
|
|
6451
|
-
D_{out} = \left \lfloor{\frac{D_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) }
|
|
6452
|
-
{\text{stride[0]}} + 1} \right \rfloor \\
|
|
6453
|
-
H_{out} = \left \lfloor{\frac{H_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) }
|
|
6454
|
-
{\text{stride[1]}} + 1} \right \rfloor \\
|
|
6455
|
-
W_{out} = \left \lfloor{\frac{W_{in} - \text{dilation[2]} \times (\text{kernel_size[2]} - 1) }
|
|
6456
|
-
{\text{stride[2]}} + 1} \right \rfloor \\
|
|
6457
|
-
\end{array}
|
|
6458
|
-
|
|
6459
|
-
`pad_mode` is ``"pad"``:
|
|
6460
|
-
|
|
6461
|
-
.. math::
|
|
6462
|
-
\begin{array}{ll} \\
|
|
6463
|
-
D_{out} = \left \lfloor{\frac{D_{in} + pad[0] + pad[1] - (\text{dilation[0]} - 1) \times
|
|
6464
|
-
\text{kernel_size[0]} - 1 }{\text{stride[0]}} + 1} \right \rfloor \\
|
|
6465
|
-
H_{out} = \left \lfloor{\frac{H_{in} + pad[2] + pad[3] - (\text{dilation[1]} - 1) \times
|
|
6466
|
-
\text{kernel_size[1]} - 1 }{\text{stride[1]}} + 1} \right \rfloor \\
|
|
6467
|
-
W_{out} = \left \lfloor{\frac{W_{in} + pad[4] + pad[5] - (\text{dilation[2]} - 1) \times
|
|
6468
|
-
\text{kernel_size[2]} - 1 }{\text{stride[2]}} + 1} \right \rfloor \\
|
|
6469
|
-
\end{array}
|
|
6436
|
+
To see how different pad modes affect the output shape, please refer to
|
|
6437
|
+
:class:`mindspore.nn.Conv3d` for more details.
|
|
6470
6438
|
|
|
6471
6439
|
Raises:
|
|
6472
6440
|
TypeError: If `out_channel` or `group` is not an int.
|
|
@@ -7151,8 +7119,8 @@ class Conv3DTranspose(Primitive):
|
|
|
7151
7119
|
self.format = validator.check_string(data_format, ['NCDHW'], 'format', self.name)
|
|
7152
7120
|
self.add_prim_attr('data_format', self.format)
|
|
7153
7121
|
|
|
7154
|
-
self.output_padding = _check_3d_int_or_tuple('output_padding', output_padding, self.name,
|
|
7155
|
-
|
|
7122
|
+
self.output_padding = _check_3d_int_or_tuple('output_padding', output_padding, self.name, allow_five=False,
|
|
7123
|
+
ret_five=True, greater_zero=False, pad_value=0)
|
|
7156
7124
|
output_padding_ = (self.output_padding[2], self.output_padding[3], self.output_padding[4])
|
|
7157
7125
|
if self.pad_mode != 'pad' and output_padding_ != (0, 0, 0):
|
|
7158
7126
|
raise ValueError(f"For '{self.name}', the 'output_padding' must be zero or (0, 0, 0) "
|
|
@@ -7402,8 +7370,8 @@ class ApplyAdagradDA(Primitive):
|
|
|
7402
7370
|
>>> global_step = Tensor(2, mstype.int32)
|
|
7403
7371
|
>>> output = net(grad, lr, l1, l2, global_step)
|
|
7404
7372
|
>>> print(output)
|
|
7405
|
-
[[-0.00073906
|
|
7406
|
-
[-0.00059699
|
|
7373
|
+
[[-0.00073906 -0.00136889]
|
|
7374
|
+
[-0.00059699 -0.00142478]]
|
|
7407
7375
|
"""
|
|
7408
7376
|
|
|
7409
7377
|
__mindspore_signature__ = (
|
|
@@ -9289,4 +9257,4 @@ class AllFinite(Primitive):
|
|
|
9289
9257
|
"in the current environment does not support AllFinite.")
|
|
9290
9258
|
|
|
9291
9259
|
def __call__(self, *args):
|
|
9292
|
-
return
|
|
9260
|
+
return pyboost_all_finite(self, args)
|
mindspore/ops/primitive.py
CHANGED
|
@@ -170,10 +170,13 @@ class Primitive(Primitive_):
|
|
|
170
170
|
raise TypeError(f'The element of strategy must be tuple/Layout type, but got:{type(in_ele)}')
|
|
171
171
|
if isinstance(in_ele, tuple):
|
|
172
172
|
for in_value in in_ele:
|
|
173
|
-
if not isinstance(in_value, int) and self.name not in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY
|
|
173
|
+
if not isinstance(in_value, int) and self.name not in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY \
|
|
174
|
+
and not self.attrs.get("self_define_shard", False):
|
|
174
175
|
raise TypeError(f'The {log_info}: {strategy} of {self.name} is not valid,'
|
|
175
176
|
f' the value of strategy must be int type, but got:{type(in_value)}')
|
|
176
|
-
if isinstance(in_value, Layout) and (
|
|
177
|
+
if isinstance(in_value, Layout) and (
|
|
178
|
+
self.name in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY or self.attrs.get("self_define_shard",
|
|
179
|
+
False)):
|
|
177
180
|
is_layout.append(True)
|
|
178
181
|
continue
|
|
179
182
|
is_layout.append(False)
|
|
@@ -315,7 +318,7 @@ class Primitive(Primitive_):
|
|
|
315
318
|
out_is_layout = self._check_shard_strategy(out_strategy, "out_strategy")
|
|
316
319
|
is_layout = in_is_layout if in_is_layout is not None else out_is_layout
|
|
317
320
|
if out_is_layout is not None and is_layout != out_is_layout and \
|
|
318
|
-
self.name not in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY:
|
|
321
|
+
self.name not in SUPPORTED_TUPLE_IN_TUPLE_STRATEGY and not self.attrs.get("self_define_shard", False):
|
|
319
322
|
raise ValueError(f'The in_strategy type must equal to the out_strategy type, '
|
|
320
323
|
f'one using tuple(tuple) and the other using tuple(Layout) is not allowed.')
|
|
321
324
|
|
|
@@ -409,12 +412,6 @@ class Primitive(Primitive_):
|
|
|
409
412
|
return output
|
|
410
413
|
return _run_op(self, self.name, args)
|
|
411
414
|
|
|
412
|
-
def __getstate__(self):
|
|
413
|
-
return self.__dict__
|
|
414
|
-
|
|
415
|
-
def __setstate__(self, d):
|
|
416
|
-
self.__dict__.update(d)
|
|
417
|
-
|
|
418
415
|
def __deepcopy__(self, memo):
|
|
419
416
|
return type(self)(**self.init_attrs)
|
|
420
417
|
|
|
@@ -461,7 +458,7 @@ class Primitive(Primitive_):
|
|
|
461
458
|
|
|
462
459
|
- If the computation involves something like randomization or global variable, the equivalence
|
|
463
460
|
is not guaranteed currently.
|
|
464
|
-
-
|
|
461
|
+
- Should only be used in Graph mode or in gradient functions that are decorated by @jit.
|
|
465
462
|
|
|
466
463
|
Args:
|
|
467
464
|
mode (bool): Specifies whether the primitive is recomputed. Default: ``True`` .
|
|
@@ -469,7 +466,7 @@ class Primitive(Primitive_):
|
|
|
469
466
|
Examples:
|
|
470
467
|
>>> import numpy as np
|
|
471
468
|
>>> import mindspore as ms
|
|
472
|
-
>>> from mindspore import Tensor, ops, nn
|
|
469
|
+
>>> from mindspore import Tensor, ops, nn, jit
|
|
473
470
|
>>> class NetRecompute(nn.Cell):
|
|
474
471
|
... def __init__(self):
|
|
475
472
|
... super(NetRecompute,self).__init__()
|
|
@@ -484,6 +481,7 @@ class Primitive(Primitive_):
|
|
|
484
481
|
... super(GradNet,self).__init__()
|
|
485
482
|
... self.network = network
|
|
486
483
|
... self.grad = ops.GradOperation()
|
|
484
|
+
... @jit
|
|
487
485
|
... def construct(self, x):
|
|
488
486
|
... g_out = self.grad(self.network)(x)
|
|
489
487
|
... return g_out
|
|
@@ -495,8 +493,6 @@ class Primitive(Primitive_):
|
|
|
495
493
|
>>> print(a)
|
|
496
494
|
[0. 0.5]
|
|
497
495
|
"""
|
|
498
|
-
if context.get_context("mode") == context.PYNATIVE_MODE:
|
|
499
|
-
raise TypeError("Recompute is not supported in pynative mode currently.")
|
|
500
496
|
Validator.check_bool(mode)
|
|
501
497
|
self.add_prim_attr("recompute", mode)
|
|
502
498
|
return self
|
|
@@ -513,8 +509,6 @@ class Primitive(Primitive_):
|
|
|
513
509
|
Args:
|
|
514
510
|
backward_prefetch(Union[str, int]): Specifies whether the activation is prefetched in backward pass.
|
|
515
511
|
"""
|
|
516
|
-
if context.get_context("mode") == context.PYNATIVE_MODE:
|
|
517
|
-
raise ValueError("Offload is not supported in pynative mode currently.")
|
|
518
512
|
self.add_prim_attr("offload", True)
|
|
519
513
|
if isinstance(backward_prefetch, str):
|
|
520
514
|
Validator.check_string(backward_prefetch, ['Auto'], 'backward_prefetch', 'Primitive._offload')
|
|
@@ -554,10 +548,6 @@ class Primitive(Primitive_):
|
|
|
554
548
|
Validator.check_non_negative_int(rank_id, "rank_id", "Primitive.place")
|
|
555
549
|
Validator.check_string(role, "MS_WORKER", "role", "Primitive.place")
|
|
556
550
|
|
|
557
|
-
if context.get_context("mode") == context.PYNATIVE_MODE:
|
|
558
|
-
raise RuntimeError("You are calling Primitive.place in pynative mode."
|
|
559
|
-
"It's only supported in graph mode. Please switch to graph mode.")
|
|
560
|
-
|
|
561
551
|
# Get the execution context and check whether calling of this 'place' method is valid.
|
|
562
552
|
# This is because placing operators to arbitrary processes while other distributed training mode
|
|
563
553
|
# is enabled is very unpredictable and may cause fatal error.
|
|
@@ -1032,7 +1022,6 @@ def _run_op(obj, op_name, args):
|
|
|
1032
1022
|
res = _pynative_executor.run_op_async(obj, op_name, args)
|
|
1033
1023
|
# Add for jit context.
|
|
1034
1024
|
if jit_context():
|
|
1035
|
-
# todo support TensorPy
|
|
1036
1025
|
return jit_context().run_op(obj, res, *args)
|
|
1037
1026
|
return res
|
|
1038
1027
|
|