mindspore 2.6.0__cp310-cp310-win_amd64.whl → 2.7.0__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +2 -2
- mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +42 -11
- mindspore/_extends/builtin_operations.py +3 -3
- mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
- mindspore/_extends/optimize/cell_utils.py +96 -0
- mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +3 -3
- mindspore/_extends/parse/compile_config.py +44 -22
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -2
- mindspore/_extends/parse/parser.py +64 -83
- mindspore/_extends/parse/resources.py +39 -0
- mindspore/_extends/parse/standard_method.py +47 -14
- mindspore/_extends/parse/trope.py +8 -1
- mindspore/_extends/pijit/__init__.py +1 -2
- mindspore/_extends/pijit/pijit_func_white_list.py +2 -5
- mindspore/amp.py +4 -22
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +4 -4
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +43 -12
- mindspore/common/_grad_function.py +2 -1
- mindspore/common/_pijit_context.py +28 -7
- mindspore/common/_stub_tensor.py +1 -209
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +177 -52
- mindspore/common/_utils.py +9 -1
- mindspore/common/api.py +338 -208
- mindspore/common/dtype.py +108 -57
- mindspore/common/dump.py +11 -16
- mindspore/common/dynamic_shape/__init__.py +0 -0
- mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +17 -23
- mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
- mindspore/common/file_system.py +59 -9
- mindspore/common/generator.py +2 -3
- mindspore/common/hook_handle.py +33 -5
- mindspore/common/jit_config.py +1 -1
- mindspore/common/jit_trace.py +84 -105
- mindspore/common/np_dtype.py +3 -3
- mindspore/common/parameter.py +27 -29
- mindspore/common/recompute.py +5 -7
- mindspore/common/sparse_tensor.py +0 -3
- mindspore/common/symbol.py +0 -1
- mindspore/common/tensor.py +84 -133
- mindspore/communication/_comm_helper.py +46 -4
- mindspore/communication/management.py +79 -7
- mindspore/context.py +47 -38
- mindspore/dataset/__init__.py +1 -1
- mindspore/dataset/audio/transforms.py +1 -1
- mindspore/dataset/core/config.py +38 -4
- mindspore/dataset/engine/datasets.py +350 -322
- mindspore/dataset/engine/datasets_user_defined.py +69 -23
- mindspore/dataset/engine/iterators.py +2 -2
- mindspore/dataset/engine/obs/config_loader.py +2 -2
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
- mindspore/dataset/transforms/c_transforms.py +2 -2
- mindspore/dataset/transforms/py_transforms.py +7 -3
- mindspore/dataset/transforms/transforms.py +10 -6
- mindspore/dataset/vision/__init__.py +1 -1
- mindspore/dataset/vision/py_transforms.py +8 -8
- mindspore/dataset/vision/transforms.py +17 -5
- mindspore/dataset/vision/utils.py +632 -21
- mindspore/dataset/vision/validators.py +1 -0
- mindspore/device_context/ascend/device.py +1 -1
- mindspore/device_context/ascend/op_tuning.py +35 -1
- mindspore/device_context/gpu/__init__.py +2 -2
- mindspore/device_context/gpu/device.py +1 -1
- mindspore/device_context/gpu/op_precision.py +4 -2
- mindspore/device_context/gpu/op_tuning.py +6 -3
- mindspore/device_manager.py +16 -9
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +5 -4
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/optim/adadelta.py +13 -20
- mindspore/experimental/optim/adagrad.py +15 -22
- mindspore/experimental/optim/adam.py +17 -24
- mindspore/experimental/optim/adamax.py +14 -22
- mindspore/experimental/optim/adamw.py +28 -34
- mindspore/experimental/optim/asgd.py +15 -25
- mindspore/experimental/optim/lr_scheduler.py +27 -45
- mindspore/experimental/optim/nadam.py +14 -24
- mindspore/experimental/optim/optimizer.py +13 -23
- mindspore/experimental/optim/radam.py +18 -24
- mindspore/experimental/optim/rmsprop.py +14 -25
- mindspore/experimental/optim/rprop.py +15 -26
- mindspore/experimental/optim/sgd.py +9 -19
- mindspore/hal/__init__.py +4 -4
- mindspore/hal/contiguous_tensors_handle.py +2 -2
- mindspore/hal/memory.py +1 -0
- mindspore/include/api/cell.h +65 -5
- mindspore/include/api/cfg.h +24 -7
- mindspore/include/api/context.h +1 -0
- mindspore/include/api/delegate.h +10 -2
- mindspore/include/api/dual_abi_helper.h +100 -19
- mindspore/include/api/graph.h +14 -1
- mindspore/include/api/kernel.h +16 -3
- mindspore/include/api/kernel_api.h +9 -1
- mindspore/include/api/metrics/accuracy.h +9 -0
- mindspore/include/api/model.h +8 -1
- mindspore/include/api/model_group.h +4 -0
- mindspore/include/api/model_parallel_runner.h +2 -0
- mindspore/include/api/status.h +48 -10
- mindspore/include/api/types.h +8 -3
- mindspore/include/c_api/model_c.h +0 -58
- mindspore/include/c_api/tensor_c.h +0 -26
- mindspore/include/dataset/constants.h +9 -0
- mindspore/include/dataset/vision_ascend.h +1 -1
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/cifar10.py +61 -11
- mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mindspore_ops_host.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +4 -44
- mindspore/mint/distributed/__init__.py +5 -0
- mindspore/mint/distributed/distributed.py +425 -19
- mindspore/mint/nn/__init__.py +1 -1
- mindspore/mint/nn/functional.py +53 -6
- mindspore/mint/nn/layer/_functions.py +163 -294
- mindspore/mint/nn/layer/activation.py +8 -6
- mindspore/mint/nn/layer/conv.py +125 -101
- mindspore/mint/nn/layer/normalization.py +11 -25
- mindspore/mint/optim/adam.py +19 -18
- mindspore/mint/optim/adamw.py +14 -8
- mindspore/mint/optim/sgd.py +5 -5
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/nn/cell.py +488 -620
- mindspore/nn/grad/cell_grad.py +11 -12
- mindspore/nn/layer/activation.py +36 -36
- mindspore/nn/layer/basic.py +74 -77
- mindspore/nn/layer/channel_shuffle.py +4 -4
- mindspore/nn/layer/combined.py +4 -2
- mindspore/nn/layer/conv.py +86 -85
- mindspore/nn/layer/dense.py +9 -7
- mindspore/nn/layer/embedding.py +50 -52
- mindspore/nn/layer/image.py +38 -40
- mindspore/nn/layer/math.py +111 -112
- mindspore/nn/layer/normalization.py +56 -44
- mindspore/nn/layer/pooling.py +58 -63
- mindspore/nn/layer/rnn_cells.py +33 -33
- mindspore/nn/layer/rnns.py +56 -56
- mindspore/nn/layer/thor_layer.py +74 -73
- mindspore/nn/layer/transformer.py +11 -1
- mindspore/nn/learning_rate_schedule.py +20 -20
- mindspore/nn/loss/loss.py +79 -81
- mindspore/nn/optim/adam.py +2 -4
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/lamb.py +1 -3
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/tft_wrapper.py +2 -3
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/_utils/utils.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -1
- mindspore/nn/probability/distribution/poisson.py +2 -1
- mindspore/nn/sparse/sparse.py +3 -3
- mindspore/nn/wrap/cell_wrapper.py +73 -42
- mindspore/nn/wrap/grad_reducer.py +37 -52
- mindspore/nn/wrap/loss_scale.py +72 -74
- mindspore/numpy/array_creations.py +7 -7
- mindspore/numpy/fft.py +1 -1
- mindspore/numpy/math_ops.py +1 -1
- mindspore/numpy/utils_const.py +1 -1
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
- mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
- mindspore/ops/_op_impl/cpu/__init__.py +1 -0
- mindspore/{experimental/es/__init__.py → ops/_op_impl/cpu/joinedstr_op.py} +12 -6
- mindspore/ops/_vmap/vmap_array_ops.py +6 -13
- mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +29 -10
- mindspore/ops/auto_generate/gen_extend_func.py +5 -55
- mindspore/ops/auto_generate/gen_ops_def.py +753 -273
- mindspore/ops/auto_generate/gen_ops_prim.py +1687 -958
- mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
- mindspore/ops/composite/__init__.py +10 -0
- mindspore/ops/composite/base.py +9 -5
- mindspore/ops/composite/multitype_ops/__init__.py +12 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +132 -108
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
- mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
- mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
- mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
- mindspore/ops/function/__init__.py +4 -1
- mindspore/ops/function/_add_attr_func.py +11 -6
- mindspore/ops/function/array_func.py +17 -100
- mindspore/ops/function/debug_func.py +8 -5
- mindspore/ops/function/grad/grad_func.py +5 -13
- mindspore/ops/function/math_func.py +65 -399
- mindspore/ops/function/nn_func.py +44 -61
- mindspore/ops/function/other_func.py +4 -1
- mindspore/ops/function/random_func.py +31 -4
- mindspore/ops/functional.py +2 -3
- mindspore/ops/functional_overload.py +486 -18
- mindspore/ops/op_info_register.py +21 -0
- mindspore/ops/operations/__init__.py +5 -2
- mindspore/ops/operations/_custom_ops_utils.py +675 -8
- mindspore/ops/operations/_inner_ops.py +14 -18
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +4 -50
- mindspore/ops/operations/comm_ops.py +186 -41
- mindspore/ops/operations/custom_ops.py +244 -175
- mindspore/ops/operations/debug_ops.py +55 -4
- mindspore/ops/operations/image_ops.py +13 -13
- mindspore/ops/operations/manually_defined/ops_def.py +27 -28
- mindspore/ops/operations/math_ops.py +8 -9
- mindspore/ops/operations/nn_ops.py +6 -7
- mindspore/ops/primitive.py +9 -20
- mindspore/ops/tensor_method.py +52 -11
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
- mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
- mindspore/ops_generate/api/functions_cc_generator.py +58 -10
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
- mindspore/ops_generate/common/base_generator.py +14 -0
- mindspore/ops_generate/common/gen_constants.py +7 -2
- mindspore/ops_generate/common/gen_utils.py +0 -19
- mindspore/ops_generate/common/op_proto.py +11 -4
- mindspore/ops_generate/common/template.py +88 -11
- mindspore/ops_generate/gen_ops.py +1 -1
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
- mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -16
- mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
- mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
- mindspore/parallel/_auto_parallel_context.py +9 -17
- mindspore/parallel/_cell_wrapper.py +106 -40
- mindspore/parallel/_parallel_serialization.py +4 -3
- mindspore/parallel/_ps_context.py +4 -6
- mindspore/parallel/_tensor.py +167 -12
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/transformer.py +17 -12
- mindspore/parallel/_utils.py +5 -11
- mindspore/parallel/auto_parallel.py +33 -12
- mindspore/parallel/checkpoint_convert.py +3 -3
- mindspore/parallel/checkpoint_transform.py +5 -1
- mindspore/parallel/cluster/process_entity/_api.py +88 -49
- mindspore/parallel/cluster/process_entity/_utils.py +95 -7
- mindspore/parallel/cluster/run.py +48 -7
- mindspore/parallel/function/__init__.py +8 -1
- mindspore/parallel/function/reshard_func.py +7 -6
- mindspore/parallel/nn/__init__.py +15 -2
- mindspore/parallel/nn/parallel_cell_wrapper.py +50 -14
- mindspore/parallel/nn/parallel_grad_reducer.py +7 -14
- mindspore/parallel/shard.py +9 -23
- mindspore/parallel/transform_safetensors.py +468 -174
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +3 -0
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
- mindspore/profiler/analysis/task_manager.py +1 -1
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +10 -9
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +43 -23
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
- mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
- mindspore/profiler/common/constant.py +16 -0
- mindspore/profiler/common/msprof_cmd_tool.py +2 -2
- mindspore/profiler/common/path_manager.py +9 -0
- mindspore/profiler/common/profiler_context.py +50 -29
- mindspore/profiler/common/profiler_info.py +0 -16
- mindspore/profiler/common/profiler_meta_data.py +1 -0
- mindspore/profiler/common/profiler_op_analyse.py +239 -0
- mindspore/profiler/common/profiler_output_path.py +23 -8
- mindspore/profiler/common/profiler_parameters.py +128 -35
- mindspore/profiler/dynamic_profile/__init__.py +0 -0
- mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
- mindspore/profiler/dynamic_profiler.py +374 -338
- mindspore/profiler/envprofiler.py +42 -12
- mindspore/profiler/experimental_config.py +112 -7
- mindspore/profiler/mstx.py +33 -12
- mindspore/profiler/platform/__init__.py +2 -3
- mindspore/profiler/platform/cpu_profiler.py +10 -4
- mindspore/profiler/platform/npu_profiler.py +30 -20
- mindspore/profiler/profiler.py +218 -154
- mindspore/profiler/profiler_action_controller.py +65 -77
- mindspore/profiler/profiler_interface.py +2 -2
- mindspore/profiler/schedule.py +10 -4
- mindspore/rewrite/common/config.py +1 -0
- mindspore/rewrite/common/namer.py +1 -0
- mindspore/rewrite/common/namespace.py +1 -0
- mindspore/rewrite/node/node.py +31 -11
- mindspore/rewrite/parsers/assign_parser.py +1 -1
- mindspore/rewrite/symbol_tree/symbol_tree.py +2 -2
- mindspore/run_check/_check_version.py +7 -10
- mindspore/runtime/__init__.py +8 -6
- mindspore/runtime/event.py +10 -4
- mindspore/runtime/executor.py +87 -45
- mindspore/runtime/memory.py +22 -30
- mindspore/runtime/thread_bind_core.py +299 -165
- mindspore/safeguard/rewrite_obfuscation.py +12 -13
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/_utils.py +9 -5
- mindspore/train/amp.py +43 -23
- mindspore/train/callback/__init__.py +5 -5
- mindspore/train/callback/_callback.py +2 -1
- mindspore/train/callback/_checkpoint.py +4 -14
- mindspore/train/callback/_flops_collector.py +11 -7
- mindspore/train/callback/_landscape.py +0 -1
- mindspore/train/callback/_train_fault_tolerance.py +72 -18
- mindspore/train/data_sink.py +15 -6
- mindspore/train/dataset_helper.py +14 -5
- mindspore/train/model.py +49 -47
- mindspore/train/serialization.py +168 -126
- mindspore/train/summary/summary_record.py +13 -2
- mindspore/train/train_thor/model_thor.py +2 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +0 -6
- mindspore/utils/runtime_execution_order_check.py +162 -78
- mindspore/utils/sdc_detect.py +68 -0
- mindspore/utils/utils.py +14 -17
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/METADATA +5 -4
- {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/RECORD +400 -439
- mindspore/_deprecated/jit.py +0 -198
- mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
- mindspore/communication/_hccl_management.py +0 -297
- mindspore/experimental/es/embedding_service.py +0 -891
- mindspore/experimental/es/embedding_service_layer.py +0 -581
- mindspore/profiler/common/validator/__init__.py +0 -14
- mindspore/profiler/common/validator/validate_path.py +0 -84
- mindspore/profiler/parser/__init__.py +0 -14
- mindspore/profiler/parser/aicpu_data_parser.py +0 -272
- mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
- mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
- mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
- mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
- mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
- mindspore/profiler/parser/ascend_flops_generator.py +0 -116
- mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
- mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
- mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
- mindspore/profiler/parser/ascend_op_generator.py +0 -334
- mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
- mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
- mindspore/profiler/parser/base_timeline_generator.py +0 -483
- mindspore/profiler/parser/container.py +0 -229
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
- mindspore/profiler/parser/flops_parser.py +0 -531
- mindspore/profiler/parser/framework_enum.py +0 -111
- mindspore/profiler/parser/framework_parser.py +0 -464
- mindspore/profiler/parser/framework_struct.py +0 -61
- mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
- mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
- mindspore/profiler/parser/hccl_parser.py +0 -573
- mindspore/profiler/parser/hwts_log_parser.py +0 -122
- mindspore/profiler/parser/integrator.py +0 -526
- mindspore/profiler/parser/memory_usage_parser.py +0 -277
- mindspore/profiler/parser/minddata_analyzer.py +0 -800
- mindspore/profiler/parser/minddata_parser.py +0 -186
- mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
- mindspore/profiler/parser/op_intermediate_parser.py +0 -149
- mindspore/profiler/parser/optime_parser.py +0 -250
- mindspore/profiler/parser/profiler_info.py +0 -213
- mindspore/profiler/parser/step_trace_parser.py +0 -666
- mindspore/utils/hooks.py +0 -81
- /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
- {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/WHEEL +0 -0
- {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/entry_points.txt +0 -0
- {mindspore-2.6.0.dist-info → mindspore-2.7.0.dist-info}/top_level.txt +0 -0
|
@@ -21,7 +21,6 @@ import numbers
|
|
|
21
21
|
import hashlib
|
|
22
22
|
import numpy as np
|
|
23
23
|
import mindspore.ops as ops
|
|
24
|
-
from mindspore.ops import operations as P
|
|
25
24
|
from mindspore.ops.operations import _inner_ops as inner
|
|
26
25
|
from mindspore.common.parameter import Parameter
|
|
27
26
|
from mindspore.common.initializer import initializer, Initializer
|
|
@@ -98,35 +97,34 @@ class _BatchNorm(Cell):
|
|
|
98
97
|
|
|
99
98
|
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
|
|
100
99
|
|
|
101
|
-
self.shape =
|
|
102
|
-
self.reduce_mean =
|
|
103
|
-
self.square =
|
|
104
|
-
self.sqrt =
|
|
105
|
-
self.cast =
|
|
106
|
-
self.dtype =
|
|
107
|
-
self.reshape =
|
|
100
|
+
self.shape = ops.Shape()
|
|
101
|
+
self.reduce_mean = ops.ReduceMean(keep_dims=True)
|
|
102
|
+
self.square = ops.Square()
|
|
103
|
+
self.sqrt = ops.Sqrt()
|
|
104
|
+
self.cast = ops.Cast()
|
|
105
|
+
self.dtype = ops.DType()
|
|
106
|
+
self.reshape = ops.Reshape()
|
|
108
107
|
self._target = context.get_context("device_target")
|
|
109
|
-
self.is_graph_mode = context.get_context("mode") == context.GRAPH_MODE
|
|
110
108
|
self.momentum = 1.0 - momentum
|
|
111
109
|
|
|
112
|
-
self.bn_train =
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
110
|
+
self.bn_train = ops.BatchNorm(is_training=True,
|
|
111
|
+
epsilon=self.eps,
|
|
112
|
+
momentum=self.momentum,
|
|
113
|
+
data_format=self.format)
|
|
116
114
|
|
|
117
|
-
self.bn_infer =
|
|
115
|
+
self.bn_infer = ops.BatchNorm(is_training=False, epsilon=self.eps, data_format=self.format)
|
|
118
116
|
if _is_in_auto_parallel_mode():
|
|
119
117
|
data_parallel_strategy = ((1,), (1,))
|
|
120
118
|
data_parallel_strategy_one = ((1,), ())
|
|
121
119
|
else:
|
|
122
120
|
data_parallel_strategy = None
|
|
123
121
|
data_parallel_strategy_one = None
|
|
124
|
-
self.sub_mean =
|
|
125
|
-
self.sub_var =
|
|
126
|
-
self.mul_mean =
|
|
127
|
-
self.mul_var =
|
|
128
|
-
self.assign_sub_mean =
|
|
129
|
-
self.assign_sub_var =
|
|
122
|
+
self.sub_mean = ops.Sub().shard(data_parallel_strategy)
|
|
123
|
+
self.sub_var = ops.Sub().shard(data_parallel_strategy)
|
|
124
|
+
self.mul_mean = ops.Mul().shard(data_parallel_strategy_one)
|
|
125
|
+
self.mul_var = ops.Mul().shard(data_parallel_strategy_one)
|
|
126
|
+
self.assign_sub_mean = ops.AssignSub().shard(data_parallel_strategy)
|
|
127
|
+
self.assign_sub_var = ops.AssignSub().shard(data_parallel_strategy)
|
|
130
128
|
|
|
131
129
|
@staticmethod
|
|
132
130
|
@_primexpr
|
|
@@ -464,8 +462,8 @@ class BatchNorm3d(Cell):
|
|
|
464
462
|
use_batch_statistics=use_batch_statistics,
|
|
465
463
|
data_format="NCHW",
|
|
466
464
|
dtype=dtype)
|
|
467
|
-
self.shape =
|
|
468
|
-
self.reshape =
|
|
465
|
+
self.shape = ops.Shape()
|
|
466
|
+
self.reshape = ops.Reshape()
|
|
469
467
|
|
|
470
468
|
@staticmethod
|
|
471
469
|
@_primexpr
|
|
@@ -519,16 +517,20 @@ class SyncBatchNorm(_BatchNorm):
|
|
|
519
517
|
parameters. When set to ``False`` , :math:`\gamma` and :math:`\beta` are unlearnable parameters.
|
|
520
518
|
Default: ``True`` .
|
|
521
519
|
gamma_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initializer for the :math:`\gamma`
|
|
522
|
-
weight. The values of str refer to the function
|
|
520
|
+
weight. The values of str refer to the function :func:`mindspore.common.initializer.initializer`
|
|
521
|
+
including ``'zeros'`` , ``'ones'`` ,
|
|
523
522
|
``'xavier_uniform'`` , ``'he_uniform'`` , etc. Default: ``'ones'`` .
|
|
524
523
|
beta_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initializer for the :math:`\beta` weight.
|
|
525
|
-
The values of str refer to the function
|
|
524
|
+
The values of str refer to the function :func:`mindspore.common.initializer.initializer`
|
|
525
|
+
including ``'zeros'`` , ``'ones'`` ,
|
|
526
526
|
``'xavier_uniform'`` , ``'he_uniform'`` , etc. Default: ``'zeros'`` .
|
|
527
527
|
moving_mean_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initializer for the moving mean.
|
|
528
|
-
The values of str refer to the function
|
|
528
|
+
The values of str refer to the function :func:`mindspore.common.initializer.initializer`
|
|
529
|
+
including ``'zeros'`` , ``'ones'`` ,
|
|
529
530
|
``'xavier_uniform'`` , ``'he_uniform'`` , etc. Default: ``'zeros'`` .
|
|
530
531
|
moving_var_init (Union[Tensor, str, Initializer, numbers.Number], optional): Initializer for the moving
|
|
531
|
-
variance. The values of str refer to the function
|
|
532
|
+
variance. The values of str refer to the function :func:`mindspore.common.initializer.initializer`
|
|
533
|
+
including ``'zeros'`` , ``'ones'`` ,
|
|
532
534
|
``'xavier_uniform'`` , ``'he_uniform'`` , etc. Default: ``'ones'`` .
|
|
533
535
|
use_batch_statistics (bool, optional): If ``true`` , use the mean value and variance value of current batch
|
|
534
536
|
data. If ``false`` , use the mean value and variance value of specified value. If ``None`` , training
|
|
@@ -651,9 +653,9 @@ class SyncBatchNorm(_BatchNorm):
|
|
|
651
653
|
rank_list_name = '_'.join('%s' % id for id in sub_group)
|
|
652
654
|
group_dict = _syncbatchnorm_group_dict()
|
|
653
655
|
if rank_list_name not in group_dict:
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
hash_name =
|
|
656
|
+
sha256 = hashlib.sha256()
|
|
657
|
+
sha256.update(rank_list_name.encode('utf-8'))
|
|
658
|
+
hash_name = sha256.hexdigest()
|
|
657
659
|
self.group_name = str(self.group_device_num) + '_' + hash_name
|
|
658
660
|
group_dict[rank_list_name] = self.group_name
|
|
659
661
|
management.create_group(self.group_name, sub_group)
|
|
@@ -705,10 +707,12 @@ class LayerNorm(Cell):
|
|
|
705
707
|
begin_params_axis (int): The begin axis of the parameter input :math:`(\gamma, \beta)` to
|
|
706
708
|
apply LayerNorm, the value should be in [-1, R). Default: ``-1`` .
|
|
707
709
|
gamma_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the :math:`\gamma` weight.
|
|
708
|
-
The values of str refer to the function
|
|
710
|
+
The values of str refer to the function :func:`mindspore.common.initializer.initializer`
|
|
711
|
+
including ``'zeros'`` , ``'ones'`` ,
|
|
709
712
|
``'xavier_uniform'`` , ``'he_uniform'`` , etc. Default: ``'ones'`` .
|
|
710
713
|
beta_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the :math:`\beta` weight.
|
|
711
|
-
The values of str refer to the function
|
|
714
|
+
The values of str refer to the function :func:`mindspore.common.initializer.initializer`
|
|
715
|
+
including ``'zeros'`` , ``'ones'`` ,
|
|
712
716
|
``'xavier_uniform'`` , ``'he_uniform'`` , etc. Default: ``'zeros'`` .
|
|
713
717
|
epsilon (float): A value added to the denominator for numerical stability(:math:`\epsilon`). Default: ``1e-7`` .
|
|
714
718
|
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
@@ -766,9 +770,9 @@ class LayerNorm(Cell):
|
|
|
766
770
|
gamma_init, normalized_shape, dtype=dtype), name="gamma")
|
|
767
771
|
self.beta = Parameter(initializer(
|
|
768
772
|
beta_init, normalized_shape, dtype=dtype), name="beta")
|
|
769
|
-
self.layer_norm =
|
|
770
|
-
|
|
771
|
-
|
|
773
|
+
self.layer_norm = ops.LayerNorm(begin_norm_axis=self.begin_norm_axis,
|
|
774
|
+
begin_params_axis=self.begin_params_axis,
|
|
775
|
+
epsilon=self.epsilon)
|
|
772
776
|
|
|
773
777
|
def construct(self, input_x):
|
|
774
778
|
y, _, _ = self.layer_norm(input_x, self.gamma.astype(input_x.dtype), self.beta.astype(input_x.dtype))
|
|
@@ -911,9 +915,9 @@ class _InstanceNorm(Cell):
|
|
|
911
915
|
self.beta = Parameter(initializer(
|
|
912
916
|
beta_init, num_features, dtype=dtype), name="beta", requires_grad=affine)
|
|
913
917
|
|
|
914
|
-
self.shape =
|
|
918
|
+
self.shape = ops.Shape()
|
|
915
919
|
self.momentum = momentum
|
|
916
|
-
self.instance_bn =
|
|
920
|
+
self.instance_bn = ops.InstanceNorm(epsilon=self.eps, momentum=self.momentum)
|
|
917
921
|
|
|
918
922
|
def construct(self, x):
|
|
919
923
|
self._check_input_dim(self.shape(x), self.cls_name)
|
|
@@ -968,10 +972,12 @@ class InstanceNorm1d(_InstanceNorm):
|
|
|
968
972
|
running_mean and running_var computation. Default: ``0.1`` .
|
|
969
973
|
affine (bool): A bool value. When set to True, gamma and beta can be learned. Default: ``True`` .
|
|
970
974
|
gamma_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the gamma weight.
|
|
971
|
-
The values of str refer to the function
|
|
975
|
+
The values of str refer to the function :func:`mindspore.common.initializer.initializer`
|
|
976
|
+
including ``'zeros'`` , ``'ones'`` , etc.
|
|
972
977
|
When initialized with Tensor, the shape should be :math:`(C)`. Default: ``'ones'`` .
|
|
973
978
|
beta_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the beta weight.
|
|
974
|
-
The values of str refer to the function
|
|
979
|
+
The values of str refer to the function :func:`mindspore.common.initializer.initializer`
|
|
980
|
+
including ``'zeros'`` , ``'ones'`` , etc.
|
|
975
981
|
When initialized with Tensor, the shape should be :math:`(C)`. Default: ``'zeros'`` .
|
|
976
982
|
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
977
983
|
|
|
@@ -1046,10 +1052,12 @@ class InstanceNorm2d(_InstanceNorm):
|
|
|
1046
1052
|
running_mean and running_var computation. Default: ``0.1`` .
|
|
1047
1053
|
affine (bool): A bool value. When set to ``True`` , gamma and beta can be learned. Default: ``True`` .
|
|
1048
1054
|
gamma_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the gamma weight.
|
|
1049
|
-
The values of str refer to the function
|
|
1055
|
+
The values of str refer to the function :func:`mindspore.common.initializer.initializer`
|
|
1056
|
+
including ``'zeros'`` , ``'ones'`` , etc.
|
|
1050
1057
|
When initialized with Tensor, the shape should be :math:`(C)`. Default: ``'ones'`` .
|
|
1051
1058
|
beta_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the beta weight.
|
|
1052
|
-
The values of str refer to the function
|
|
1059
|
+
The values of str refer to the function :func:`mindspore.common.initializer.initializer`
|
|
1060
|
+
including ``'zeros'`` , ``'ones'`` , etc.
|
|
1053
1061
|
When initialized with Tensor, the shape should be :math:`(C)`. Default: ``'zeros'`` .
|
|
1054
1062
|
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
1055
1063
|
|
|
@@ -1123,10 +1131,12 @@ class InstanceNorm3d(_InstanceNorm):
|
|
|
1123
1131
|
running_mean and running_var computation. Default: ``0.1`` .
|
|
1124
1132
|
affine (bool): A bool value. When set to ``True`` , gamma and beta can be learned. Default: ``True`` .
|
|
1125
1133
|
gamma_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the gamma weight.
|
|
1126
|
-
The values of str refer to the function
|
|
1134
|
+
The values of str refer to the function :func:`mindspore.common.initializer.initializer`
|
|
1135
|
+
including ``'zeros'`` , ``'ones'`` , etc.
|
|
1127
1136
|
When initialized with Tensor, the shape should be :math:`(C)`. Default: ``'ones'`` .
|
|
1128
1137
|
beta_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the beta weight.
|
|
1129
|
-
The values of str refer to the function
|
|
1138
|
+
The values of str refer to the function :func:`mindspore.common.initializer.initializer`
|
|
1139
|
+
including ``'zeros'`` , ``'ones'`` , etc.
|
|
1130
1140
|
When initialized with Tensor, the shape should be :math:`(C)`. Default: ``'zeros'`` .
|
|
1131
1141
|
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
1132
1142
|
|
|
@@ -1193,11 +1203,13 @@ class GroupNorm(Cell):
|
|
|
1193
1203
|
affine (bool): A bool value, this layer will have learnable affine parameters when set to ``true`` .
|
|
1194
1204
|
Default: ``True`` .
|
|
1195
1205
|
gamma_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the gamma weight.
|
|
1196
|
-
The values of str refer to the function
|
|
1206
|
+
The values of str refer to the function :func:`mindspore.common.initializer.initializer`
|
|
1207
|
+
including ``'zeros'`` , ``'ones'`` ,
|
|
1197
1208
|
``'xavier_uniform'`` , ``'he_uniform'`` , etc. Default: ``'ones'`` . If gamma_init is a Tensor, the shape
|
|
1198
1209
|
must be :math:`(num\_channels)`.
|
|
1199
1210
|
beta_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the beta weight.
|
|
1200
|
-
The values of str refer to the function
|
|
1211
|
+
The values of str refer to the function :func:`mindspore.common.initializer.initializer`
|
|
1212
|
+
including ``'zeros'`` , ``'ones'`` ,
|
|
1201
1213
|
``'xavier_uniform'`` , ``'he_uniform'`` , etc. Default: ``'zeros'`` . If beta_init is a Tensor, the shape
|
|
1202
1214
|
must be :math:`(num\_channels)`.
|
|
1203
1215
|
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
mindspore/nn/layer/pooling.py
CHANGED
|
@@ -15,8 +15,6 @@
|
|
|
15
15
|
"""pooling"""
|
|
16
16
|
from __future__ import absolute_import
|
|
17
17
|
|
|
18
|
-
from mindspore.ops import operations as P
|
|
19
|
-
from mindspore.ops import functional as F
|
|
20
18
|
import mindspore.ops as ops
|
|
21
19
|
from mindspore._checkparam import _check_3d_int_or_tuple
|
|
22
20
|
from mindspore import _checkparam as validator
|
|
@@ -27,6 +25,7 @@ from mindspore.common import dtype as mstype
|
|
|
27
25
|
from mindspore.nn.cell import Cell
|
|
28
26
|
from mindspore._c_expression import MSContext
|
|
29
27
|
from mindspore.ops.auto_generate import avg_pool1d_ext
|
|
28
|
+
from mindspore.ops.function.nn_func import max_pool2d_ext
|
|
30
29
|
|
|
31
30
|
|
|
32
31
|
__all__ = ['AvgPool3d', 'MaxPool3d', 'AvgPool2d', 'MaxPool2d', 'AvgPool1d', 'MaxPool1d', 'FractionalMaxPool2d',
|
|
@@ -413,13 +412,13 @@ class MaxPool3d(_PoolNd):
|
|
|
413
412
|
if pad_mode.upper() != "PAD":
|
|
414
413
|
raise ValueError(f"For {self.cls_name}, the pad_mode must be 'pad' when dilation is not 1 "
|
|
415
414
|
f"or return_indices is True, but got pad_mode:{pad_mode}.")
|
|
416
|
-
self.max_pool =
|
|
417
|
-
|
|
415
|
+
self.max_pool = ops.MaxPool3DWithArgmax(ksize=kernel_size, strides=stride, pads=padding,
|
|
416
|
+
dilation=dilation, ceil_mode=ceil_mode)
|
|
418
417
|
else:
|
|
419
418
|
self.only_pad = False
|
|
420
419
|
ceil_mode = None if not ceil_mode else True
|
|
421
|
-
self.max_pool =
|
|
422
|
-
|
|
420
|
+
self.max_pool = ops.MaxPool3D(kernel_size=kernel_size, strides=stride, pad_mode=pad_mode, pad_list=padding,
|
|
421
|
+
ceil_mode=ceil_mode)
|
|
423
422
|
|
|
424
423
|
def construct(self, x):
|
|
425
424
|
expand_batch = False
|
|
@@ -567,18 +566,18 @@ class MaxPool2d(_PoolNd):
|
|
|
567
566
|
stride = (1, self.stride, self.stride)
|
|
568
567
|
self.padding = _check_maxpool_padding(padding, 2, self.cls_name)
|
|
569
568
|
dilation = _cal_dilation(dilation, 2, self.cls_name)
|
|
570
|
-
self.max_pool =
|
|
571
|
-
|
|
569
|
+
self.max_pool = ops.MaxPool3DWithArgmax(ksize=kernel_size, strides=stride, pads=self.padding,
|
|
570
|
+
dilation=dilation, ceil_mode=ceil_mode)
|
|
572
571
|
else:
|
|
573
572
|
self.use_pad = False
|
|
574
573
|
if padding != 0 or dilation != 1 or return_indices or ceil_mode:
|
|
575
574
|
raise ValueError(f"For MaxPool2d, the parameter 'padding', 'dilation', 'return_indices', 'ceil_mode' "
|
|
576
575
|
f"can not be set to non-default value when pad_mode is not 'pad', "
|
|
577
576
|
f"but got pad_mode:{pad_mode}.")
|
|
578
|
-
self.max_pool =
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
577
|
+
self.max_pool = ops.MaxPool(kernel_size=self.kernel_size,
|
|
578
|
+
strides=self.stride,
|
|
579
|
+
pad_mode=self.pad_mode,
|
|
580
|
+
data_format=self.format)
|
|
582
581
|
|
|
583
582
|
def construct(self, x):
|
|
584
583
|
expand_batch = False
|
|
@@ -686,20 +685,16 @@ class MaxPool2dExt(Cell):
|
|
|
686
685
|
ceil_mode=False):
|
|
687
686
|
"""Initialize MaxPool2d."""
|
|
688
687
|
super(MaxPool2dExt, self).__init__()
|
|
688
|
+
self.kernel_size = kernel_size
|
|
689
|
+
self.stride = stride if (stride is not None) else kernel_size
|
|
690
|
+
self.padding = padding
|
|
691
|
+
self.dilation = dilation
|
|
689
692
|
self.return_indices = return_indices
|
|
690
|
-
|
|
691
|
-
if return_indices:
|
|
692
|
-
self.max_pool_func_ = ops.auto_generate.gen_ops_prim.MaxPoolWithIndices(kernel_size, strides, padding,
|
|
693
|
-
dilation, ceil_mode)
|
|
694
|
-
else:
|
|
695
|
-
self.max_pool_func_ = ops.auto_generate.gen_ops_prim.MaxPoolWithMask(kernel_size, strides, padding,
|
|
696
|
-
dilation, ceil_mode)
|
|
693
|
+
self.ceil_mode = ceil_mode
|
|
697
694
|
|
|
698
695
|
def construct(self, input):
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
return out, indices
|
|
702
|
-
return out
|
|
696
|
+
return max_pool2d_ext(input, self.kernel_size, self.stride, self.padding,
|
|
697
|
+
self.dilation, self.ceil_mode, self.return_indices)
|
|
703
698
|
|
|
704
699
|
|
|
705
700
|
class MaxPool1d(_PoolNd):
|
|
@@ -815,8 +810,8 @@ class MaxPool1d(_PoolNd):
|
|
|
815
810
|
self.stride = (1, 1, stride)
|
|
816
811
|
self.padding = _check_maxpool_padding(padding, 1, self.cls_name)
|
|
817
812
|
dilation = _cal_dilation(dilation, 1, self.cls_name)
|
|
818
|
-
self.max_pool =
|
|
819
|
-
|
|
813
|
+
self.max_pool = ops.MaxPool3DWithArgmax(ksize=self.kernel_size, strides=self.stride, pads=self.padding,
|
|
814
|
+
dilation=dilation, ceil_mode=ceil_mode)
|
|
820
815
|
|
|
821
816
|
else:
|
|
822
817
|
self.use_pad = False
|
|
@@ -824,13 +819,13 @@ class MaxPool1d(_PoolNd):
|
|
|
824
819
|
raise ValueError(f"For MaxPool1d, the parameter 'padding', 'dilation', 'return_indices', 'ceil_mode' "
|
|
825
820
|
f"can not be set to non-default value when pad_mode is not 'pad', "
|
|
826
821
|
f"but got pad_mode:{pad_mode}.")
|
|
827
|
-
self.max_pool =
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
self.shape =
|
|
831
|
-
self.reduce_mean =
|
|
832
|
-
self.expand =
|
|
833
|
-
self.squeeze =
|
|
822
|
+
self.max_pool = ops.MaxPool(kernel_size=self.kernel_size,
|
|
823
|
+
strides=self.stride,
|
|
824
|
+
pad_mode=self.pad_mode)
|
|
825
|
+
self.shape = ops.shape
|
|
826
|
+
self.reduce_mean = ops.ReduceMean(keep_dims=True)
|
|
827
|
+
self.expand = ops.ExpandDims()
|
|
828
|
+
self.squeeze = ops.Squeeze(2)
|
|
834
829
|
|
|
835
830
|
def construct(self, x):
|
|
836
831
|
expand_batch = False
|
|
@@ -1010,8 +1005,8 @@ class AvgPool3d(_PoolNd):
|
|
|
1010
1005
|
if divisor_override is not None and divisor_override <= 0:
|
|
1011
1006
|
raise ValueError(f"For '{self.cls_name}', the 'divisor_override' must be > 0, but got {divisor_override}.")
|
|
1012
1007
|
divisor_override = 0 if divisor_override is None else divisor_override
|
|
1013
|
-
self.avg_pool =
|
|
1014
|
-
|
|
1008
|
+
self.avg_pool = ops.AvgPool3D(self.kernel_size, self.stride, pad_mode, padding, ceil_mode, count_include_pad,
|
|
1009
|
+
divisor_override)
|
|
1015
1010
|
|
|
1016
1011
|
def construct(self, x):
|
|
1017
1012
|
expand_batch = False
|
|
@@ -1272,15 +1267,15 @@ class AvgPool2d(_PoolNd):
|
|
|
1272
1267
|
stride = (1,) + self.stride
|
|
1273
1268
|
elif isinstance(self.stride, int):
|
|
1274
1269
|
stride = (1, self.stride, self.stride)
|
|
1275
|
-
self.avg_pool =
|
|
1276
|
-
|
|
1277
|
-
|
|
1270
|
+
self.avg_pool = ops.AvgPool3D(kernel_size=kernel_size, strides=stride, pad_mode=pad_mode, pad=padding,
|
|
1271
|
+
ceil_mode=ceil_mode,
|
|
1272
|
+
count_include_pad=count_include_pad, divisor_override=divisor_override)
|
|
1278
1273
|
else:
|
|
1279
1274
|
self.is_expand = False
|
|
1280
|
-
self.avg_pool =
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
|
|
1275
|
+
self.avg_pool = ops.AvgPool(kernel_size=self.kernel_size,
|
|
1276
|
+
strides=self.stride,
|
|
1277
|
+
pad_mode=self.pad_mode,
|
|
1278
|
+
data_format=self.format)
|
|
1284
1279
|
|
|
1285
1280
|
def construct(self, x):
|
|
1286
1281
|
expand_batch = False
|
|
@@ -1396,21 +1391,21 @@ class AvgPool1d(_PoolNd):
|
|
|
1396
1391
|
self.is_expand_3d = True
|
|
1397
1392
|
kernel_size = (1, 1, self.kernel_size)
|
|
1398
1393
|
stride = (1, 1, self.stride)
|
|
1399
|
-
self.avg_pool =
|
|
1400
|
-
|
|
1401
|
-
|
|
1394
|
+
self.avg_pool = ops.AvgPool3D(kernel_size=kernel_size, strides=stride, pad_mode=pad_mode, pad=padding,
|
|
1395
|
+
ceil_mode=ceil_mode,
|
|
1396
|
+
count_include_pad=count_include_pad)
|
|
1402
1397
|
else:
|
|
1403
1398
|
self.is_expand_3d = False
|
|
1404
1399
|
self.kernel_size = (1, self.kernel_size)
|
|
1405
1400
|
self.stride = (1, self.stride)
|
|
1406
|
-
self.avg_pool =
|
|
1407
|
-
|
|
1408
|
-
|
|
1409
|
-
self.shape =
|
|
1410
|
-
self.reduce_mean =
|
|
1411
|
-
self.slice =
|
|
1412
|
-
self.expand =
|
|
1413
|
-
self.squeeze =
|
|
1401
|
+
self.avg_pool = ops.AvgPool(kernel_size=self.kernel_size,
|
|
1402
|
+
strides=self.stride,
|
|
1403
|
+
pad_mode=self.pad_mode)
|
|
1404
|
+
self.shape = ops.shape
|
|
1405
|
+
self.reduce_mean = ops.ReduceMean(keep_dims=True)
|
|
1406
|
+
self.slice = ops.Slice()
|
|
1407
|
+
self.expand = ops.ExpandDims()
|
|
1408
|
+
self.squeeze = ops.Squeeze(2)
|
|
1414
1409
|
|
|
1415
1410
|
def construct(self, x):
|
|
1416
1411
|
expand_batch = False
|
|
@@ -1510,11 +1505,11 @@ class AdaptiveAvgPool1d(Cell):
|
|
|
1510
1505
|
super(AdaptiveAvgPool1d, self).__init__()
|
|
1511
1506
|
validator.check_value_type('output_size', output_size, [int], self.cls_name)
|
|
1512
1507
|
validator.check_int(output_size, 1, validator.GE, "output_size", self.cls_name)
|
|
1513
|
-
self.shape =
|
|
1514
|
-
self.expand =
|
|
1515
|
-
self.squeeze =
|
|
1508
|
+
self.shape = ops.shape
|
|
1509
|
+
self.expand = ops.ExpandDims()
|
|
1510
|
+
self.squeeze = ops.Squeeze(2)
|
|
1516
1511
|
self.output_size = output_size
|
|
1517
|
-
self.dtype =
|
|
1512
|
+
self.dtype = ops.DType()
|
|
1518
1513
|
|
|
1519
1514
|
def construct(self, input):
|
|
1520
1515
|
_adaptive_shape_check(self.shape(input), self.output_size, self.cls_name)
|
|
@@ -1528,7 +1523,7 @@ class AdaptiveAvgPool1d(Cell):
|
|
|
1528
1523
|
kernel_size = (1, kernel_size)
|
|
1529
1524
|
|
|
1530
1525
|
input = self.expand(input, 2)
|
|
1531
|
-
avg_pool =
|
|
1526
|
+
avg_pool = ops.AvgPool(kernel_size=kernel_size, strides=stride)
|
|
1532
1527
|
input = avg_pool(input)
|
|
1533
1528
|
input = self.squeeze(input)
|
|
1534
1529
|
|
|
@@ -1591,7 +1586,7 @@ class AdaptiveAvgPool2d(Cell):
|
|
|
1591
1586
|
def __init__(self, output_size):
|
|
1592
1587
|
"""Initialize AdaptiveAvgPool2d."""
|
|
1593
1588
|
super(AdaptiveAvgPool2d, self).__init__()
|
|
1594
|
-
self.adaptive_avgpool2d =
|
|
1589
|
+
self.adaptive_avgpool2d = ops.AdaptiveAvgPool2D(output_size)
|
|
1595
1590
|
|
|
1596
1591
|
def construct(self, input):
|
|
1597
1592
|
return self.adaptive_avgpool2d(input)
|
|
@@ -1728,11 +1723,11 @@ class AdaptiveMaxPool1d(Cell):
|
|
|
1728
1723
|
super(AdaptiveMaxPool1d, self).__init__()
|
|
1729
1724
|
validator.check_int(output_size, 1, validator.GE, "output_size", self.cls_name)
|
|
1730
1725
|
validator.check_value_type('output_size', output_size, [int], self.cls_name)
|
|
1731
|
-
self.expand =
|
|
1732
|
-
self.squeeze =
|
|
1726
|
+
self.expand = ops.ExpandDims()
|
|
1727
|
+
self.squeeze = ops.Squeeze(2)
|
|
1733
1728
|
self.output_size = output_size
|
|
1734
|
-
self.shape =
|
|
1735
|
-
self.dtype =
|
|
1729
|
+
self.shape = ops.shape
|
|
1730
|
+
self.dtype = ops.DType()
|
|
1736
1731
|
|
|
1737
1732
|
def construct(self, x):
|
|
1738
1733
|
_adaptive_shape_check(self.shape(x), self.output_size, self.cls_name)
|
|
@@ -1745,7 +1740,7 @@ class AdaptiveMaxPool1d(Cell):
|
|
|
1745
1740
|
stride = (1, width // self.output_size)
|
|
1746
1741
|
kernel_size = (1, kernel_size)
|
|
1747
1742
|
|
|
1748
|
-
max_pool =
|
|
1743
|
+
max_pool = ops.MaxPool(kernel_size=kernel_size, strides=stride)
|
|
1749
1744
|
x = self.expand(x, 2)
|
|
1750
1745
|
x = max_pool(x)
|
|
1751
1746
|
x = self.squeeze(x)
|
mindspore/nn/layer/rnn_cells.py
CHANGED
|
@@ -19,7 +19,7 @@ from functools import wraps
|
|
|
19
19
|
import math
|
|
20
20
|
import numpy as np
|
|
21
21
|
|
|
22
|
-
import mindspore.ops as
|
|
22
|
+
import mindspore.ops as ops
|
|
23
23
|
import mindspore.common.dtype as mstype
|
|
24
24
|
from mindspore import log as logger
|
|
25
25
|
from mindspore.common.tensor import Tensor
|
|
@@ -40,17 +40,17 @@ def _check_input_dtype(input_dtype, param_name, allow_dtypes, cls_name):
|
|
|
40
40
|
@constexpr(check=False)
|
|
41
41
|
def _check_is_tensor(param_name, input_data, cls_name):
|
|
42
42
|
"""Internal function, used to check whether the input data is Tensor."""
|
|
43
|
-
if input_data is not None and not isinstance(
|
|
43
|
+
if input_data is not None and not isinstance(ops.typeof(input_data), mstype.TensorType):
|
|
44
44
|
raise TypeError(f"For '{cls_name}', the '{param_name}' must be '{mstype.TensorType}', "
|
|
45
|
-
f"but got '{
|
|
45
|
+
f"but got '{ops.typeof(input_data)}'")
|
|
46
46
|
|
|
47
47
|
|
|
48
48
|
@constexpr
|
|
49
49
|
def _check_is_tuple(param_name, input_data, cls_name):
|
|
50
50
|
"""Internal function, used to check whether the input data is Tensor."""
|
|
51
|
-
if input_data is not None and not isinstance(
|
|
51
|
+
if input_data is not None and not isinstance(ops.typeof(input_data), mstype.Tuple):
|
|
52
52
|
raise TypeError(f"For '{cls_name}', the '{param_name}' must be '{mstype.Tuple}', "
|
|
53
|
-
f"but got '{
|
|
53
|
+
f"but got '{ops.typeof(input_data)}'")
|
|
54
54
|
|
|
55
55
|
|
|
56
56
|
@constexpr
|
|
@@ -78,41 +78,41 @@ def _check_lstmcell_init(func):
|
|
|
78
78
|
def _rnn_tanh_cell(inputs, hidden, w_ih, w_hh, b_ih, b_hh):
|
|
79
79
|
"""RNN cell function with tanh activation"""
|
|
80
80
|
if b_ih is None:
|
|
81
|
-
igates =
|
|
82
|
-
hgates =
|
|
81
|
+
igates = ops.MatMul(False, True)(inputs, w_ih)
|
|
82
|
+
hgates = ops.MatMul(False, True)(hidden, w_hh)
|
|
83
83
|
else:
|
|
84
|
-
igates =
|
|
85
|
-
hgates =
|
|
86
|
-
return
|
|
84
|
+
igates = ops.MatMul(False, True)(inputs, w_ih) + b_ih
|
|
85
|
+
hgates = ops.MatMul(False, True)(hidden, w_hh) + b_hh
|
|
86
|
+
return ops.Tanh()(igates + hgates)
|
|
87
87
|
|
|
88
88
|
|
|
89
89
|
def _rnn_relu_cell(inputs, hidden, w_ih, w_hh, b_ih, b_hh):
|
|
90
90
|
"""RNN cell function with relu activation"""
|
|
91
91
|
if b_ih is None:
|
|
92
|
-
igates =
|
|
93
|
-
hgates =
|
|
92
|
+
igates = ops.MatMul(False, True)(inputs, w_ih)
|
|
93
|
+
hgates = ops.MatMul(False, True)(hidden, w_hh)
|
|
94
94
|
else:
|
|
95
|
-
igates =
|
|
96
|
-
hgates =
|
|
97
|
-
return
|
|
95
|
+
igates = ops.MatMul(False, True)(inputs, w_ih) + b_ih
|
|
96
|
+
hgates = ops.MatMul(False, True)(hidden, w_hh) + b_hh
|
|
97
|
+
return ops.ReLU()(igates + hgates)
|
|
98
98
|
|
|
99
99
|
|
|
100
100
|
def _lstm_cell(inputs, hidden, w_ih, w_hh, b_ih, b_hh):
|
|
101
101
|
"""LSTM cell function"""
|
|
102
102
|
hx, cx = hidden
|
|
103
103
|
if b_ih is None:
|
|
104
|
-
gates =
|
|
104
|
+
gates = ops.MatMul(False, True)(inputs, w_ih) + ops.MatMul(False, True)(hx, w_hh)
|
|
105
105
|
else:
|
|
106
|
-
gates =
|
|
107
|
-
ingate, forgetgate, cellgate, outgate =
|
|
106
|
+
gates = ops.MatMul(False, True)(inputs, w_ih) + ops.MatMul(False, True)(hx, w_hh) + b_ih + b_hh
|
|
107
|
+
ingate, forgetgate, cellgate, outgate = ops.Split(1, 4)(gates)
|
|
108
108
|
|
|
109
|
-
ingate =
|
|
110
|
-
forgetgate =
|
|
111
|
-
cellgate =
|
|
112
|
-
outgate =
|
|
109
|
+
ingate = ops.Sigmoid()(ingate)
|
|
110
|
+
forgetgate = ops.Sigmoid()(forgetgate)
|
|
111
|
+
cellgate = ops.Tanh()(cellgate)
|
|
112
|
+
outgate = ops.Sigmoid()(outgate)
|
|
113
113
|
|
|
114
114
|
cy = (forgetgate * cx) + (ingate * cellgate)
|
|
115
|
-
hy = outgate *
|
|
115
|
+
hy = outgate * ops.Tanh()(cy)
|
|
116
116
|
|
|
117
117
|
return hy, cy
|
|
118
118
|
|
|
@@ -120,17 +120,17 @@ def _lstm_cell(inputs, hidden, w_ih, w_hh, b_ih, b_hh):
|
|
|
120
120
|
def _gru_cell(inputs, hidden, w_ih, w_hh, b_ih, b_hh):
|
|
121
121
|
"""GRU cell function"""
|
|
122
122
|
if b_ih is None:
|
|
123
|
-
gi =
|
|
124
|
-
gh =
|
|
123
|
+
gi = ops.MatMul(False, True)(inputs, w_ih)
|
|
124
|
+
gh = ops.MatMul(False, True)(hidden, w_hh)
|
|
125
125
|
else:
|
|
126
|
-
gi =
|
|
127
|
-
gh =
|
|
128
|
-
i_r, i_i, i_n =
|
|
129
|
-
h_r, h_i, h_n =
|
|
130
|
-
|
|
131
|
-
resetgate =
|
|
132
|
-
inputgate =
|
|
133
|
-
newgate =
|
|
126
|
+
gi = ops.MatMul(False, True)(inputs, w_ih) + b_ih
|
|
127
|
+
gh = ops.MatMul(False, True)(hidden, w_hh) + b_hh
|
|
128
|
+
i_r, i_i, i_n = ops.Split(1, 3)(gi)
|
|
129
|
+
h_r, h_i, h_n = ops.Split(1, 3)(gh)
|
|
130
|
+
|
|
131
|
+
resetgate = ops.Sigmoid()(i_r + h_r)
|
|
132
|
+
inputgate = ops.Sigmoid()(i_i + h_i)
|
|
133
|
+
newgate = ops.Tanh()(i_n + resetgate * h_n)
|
|
134
134
|
hy = newgate + inputgate * (hidden - newgate)
|
|
135
135
|
|
|
136
136
|
return hy
|