mindspore 2.6.0rc1__cp39-cp39-win_amd64.whl → 2.7.0rc1__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +1 -1
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +40 -9
- mindspore/{_deprecated → _extends/optimize}/__init__.py +9 -3
- mindspore/_extends/optimize/cell_utils.py +96 -0
- mindspore/_extends/parse/__init__.py +2 -2
- mindspore/_extends/parse/compile_config.py +44 -22
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -1
- mindspore/_extends/parse/parser.py +37 -62
- mindspore/_extends/parse/resources.py +39 -0
- mindspore/_extends/parse/standard_method.py +43 -13
- mindspore/_extends/parse/trope.py +8 -1
- mindspore/_extends/pijit/__init__.py +1 -2
- mindspore/amp.py +4 -4
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/adasum.py +1 -1
- mindspore/boost/boost_cell_wrapper.py +4 -4
- mindspore/common/__init__.py +27 -2
- mindspore/common/_grad_function.py +2 -1
- mindspore/common/_pijit_context.py +28 -7
- mindspore/common/_stub_tensor.py +1 -209
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +77 -16
- mindspore/common/api.py +238 -113
- mindspore/common/dtype.py +21 -11
- mindspore/common/dump.py +10 -15
- mindspore/common/generator.py +5 -3
- mindspore/common/hook_handle.py +11 -2
- mindspore/common/jit_config.py +1 -1
- mindspore/common/jit_trace.py +84 -105
- mindspore/common/parameter.py +26 -12
- mindspore/common/recompute.py +3 -3
- mindspore/common/sparse_tensor.py +0 -3
- mindspore/common/symbol.py +0 -1
- mindspore/common/tensor.py +81 -81
- mindspore/communication/_comm_helper.py +46 -4
- mindspore/communication/management.py +79 -7
- mindspore/context.py +58 -40
- mindspore/dataset/core/config.py +3 -3
- mindspore/dataset/engine/datasets.py +20 -7
- mindspore/dataset/engine/datasets_user_defined.py +33 -3
- mindspore/dataset/engine/iterators.py +2 -2
- mindspore/dataset/engine/obs/config_loader.py +2 -2
- mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +8 -0
- mindspore/dataset/transforms/py_transforms.py +7 -3
- mindspore/dataset/transforms/transforms.py +7 -3
- mindspore/dataset/vision/validators.py +1 -0
- mindspore/device_context/ascend/device.py +1 -1
- mindspore/device_context/gpu/__init__.py +2 -2
- mindspore/device_context/gpu/device.py +1 -1
- mindspore/device_context/gpu/op_precision.py +4 -2
- mindspore/device_context/gpu/op_tuning.py +6 -3
- mindspore/device_manager.py +16 -9
- mindspore/dnnl.dll +0 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +3 -7
- mindspore/experimental/llm_boost/atb/boost_base.py +2 -3
- mindspore/experimental/optim/adadelta.py +13 -20
- mindspore/experimental/optim/adagrad.py +15 -22
- mindspore/experimental/optim/adam.py +17 -24
- mindspore/experimental/optim/adamax.py +14 -22
- mindspore/experimental/optim/adamw.py +28 -34
- mindspore/experimental/optim/asgd.py +15 -25
- mindspore/experimental/optim/lr_scheduler.py +27 -45
- mindspore/experimental/optim/nadam.py +14 -24
- mindspore/experimental/optim/optimizer.py +13 -23
- mindspore/experimental/optim/radam.py +18 -24
- mindspore/experimental/optim/rmsprop.py +14 -25
- mindspore/experimental/optim/rprop.py +15 -26
- mindspore/experimental/optim/sgd.py +9 -19
- mindspore/hal/__init__.py +4 -4
- mindspore/hal/contiguous_tensors_handle.py +2 -2
- mindspore/hal/memory.py +27 -7
- mindspore/include/api/cell.h +37 -1
- mindspore/include/api/delegate.h +10 -0
- mindspore/include/api/model.h +3 -0
- mindspore/include/api/types.h +2 -2
- mindspore/include/c_api/model_c.h +0 -58
- mindspore/include/c_api/tensor_c.h +0 -26
- mindspore/include/dataset/vision_ascend.h +1 -1
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/cifar10.py +60 -11
- mindspore/mindrecord/tools/cifar10_to_mr.py +5 -0
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mindspore_ops_host.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/__init__.py +6 -46
- mindspore/mint/distributed/__init__.py +1 -0
- mindspore/mint/distributed/distributed.py +212 -9
- mindspore/mint/nn/__init__.py +1 -1
- mindspore/mint/nn/functional.py +53 -6
- mindspore/mint/nn/layer/_functions.py +164 -294
- mindspore/mint/nn/layer/activation.py +8 -6
- mindspore/mint/nn/layer/conv.py +137 -101
- mindspore/mint/nn/layer/normalization.py +8 -22
- mindspore/mint/optim/adam.py +19 -18
- mindspore/mint/optim/adamw.py +14 -8
- mindspore/mint/optim/sgd.py +5 -5
- mindspore/nn/cell.py +328 -502
- mindspore/nn/grad/cell_grad.py +11 -12
- mindspore/nn/layer/activation.py +32 -34
- mindspore/nn/layer/basic.py +67 -64
- mindspore/nn/layer/channel_shuffle.py +4 -4
- mindspore/nn/layer/combined.py +4 -2
- mindspore/nn/layer/conv.py +117 -110
- mindspore/nn/layer/dense.py +9 -7
- mindspore/nn/layer/embedding.py +50 -52
- mindspore/nn/layer/image.py +37 -39
- mindspore/nn/layer/math.py +111 -112
- mindspore/nn/layer/normalization.py +56 -44
- mindspore/nn/layer/pooling.py +58 -63
- mindspore/nn/layer/rnn_cells.py +33 -33
- mindspore/nn/layer/rnns.py +56 -56
- mindspore/nn/layer/thor_layer.py +74 -73
- mindspore/nn/layer/transformer.py +11 -1
- mindspore/nn/learning_rate_schedule.py +20 -20
- mindspore/nn/loss/loss.py +79 -81
- mindspore/nn/optim/adam.py +3 -3
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +2 -0
- mindspore/nn/optim/optimizer.py +1 -1
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/probability/distribution/exponential.py +2 -1
- mindspore/nn/probability/distribution/poisson.py +2 -1
- mindspore/nn/sparse/sparse.py +3 -3
- mindspore/nn/wrap/cell_wrapper.py +34 -37
- mindspore/nn/wrap/grad_reducer.py +37 -37
- mindspore/nn/wrap/loss_scale.py +72 -74
- mindspore/numpy/array_creations.py +5 -5
- mindspore/numpy/fft.py +1 -1
- mindspore/numpy/math_ops.py +5 -5
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +51 -13
- mindspore/ops/_grad_experimental/grad_debug_ops.py +14 -0
- mindspore/ops/_vmap/vmap_array_ops.py +31 -13
- mindspore/ops/_vmap/vmap_nn_ops.py +8 -16
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +42 -11
- mindspore/ops/auto_generate/gen_extend_func.py +23 -141
- mindspore/ops/auto_generate/gen_ops_def.py +727 -321
- mindspore/ops/auto_generate/gen_ops_prim.py +1721 -984
- mindspore/ops/auto_generate/pyboost_inner_prim.py +31 -1
- mindspore/ops/composite/__init__.py +10 -0
- mindspore/ops/composite/base.py +8 -4
- mindspore/ops/composite/multitype_ops/__init__.py +12 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +133 -109
- mindspore/ops/composite/multitype_ops/add_impl.py +70 -2
- mindspore/ops/composite/multitype_ops/div_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/floordiv_impl.py +29 -0
- mindspore/ops/composite/multitype_ops/getitem_impl.py +11 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +5 -3
- mindspore/ops/composite/multitype_ops/mul_impl.py +49 -0
- mindspore/ops/composite/multitype_ops/setitem_impl.py +57 -0
- mindspore/ops/composite/multitype_ops/sub_impl.py +34 -0
- mindspore/ops/composite/multitype_ops/zeros_like_impl.py +14 -0
- mindspore/ops/function/__init__.py +3 -1
- mindspore/ops/function/_add_attr_func.py +11 -6
- mindspore/ops/function/array_func.py +9 -96
- mindspore/ops/function/debug_func.py +4 -3
- mindspore/ops/function/grad/grad_func.py +1 -1
- mindspore/ops/function/math_func.py +33 -540
- mindspore/ops/function/nn_func.py +28 -74
- mindspore/ops/function/other_func.py +4 -1
- mindspore/ops/function/random_func.py +44 -5
- mindspore/ops/function/vmap_func.py +2 -1
- mindspore/ops/functional.py +2 -3
- mindspore/ops/functional_overload.py +571 -6
- mindspore/ops/op_info_register.py +21 -0
- mindspore/ops/operations/__init__.py +16 -11
- mindspore/ops/operations/_custom_ops_utils.py +689 -34
- mindspore/ops/operations/_inner_ops.py +3 -6
- mindspore/ops/operations/_sequence_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +2 -2
- mindspore/ops/operations/comm_ops.py +185 -26
- mindspore/ops/operations/custom_ops.py +294 -174
- mindspore/ops/operations/debug_ops.py +59 -4
- mindspore/ops/operations/image_ops.py +13 -13
- mindspore/ops/operations/manually_defined/ops_def.py +15 -16
- mindspore/ops/operations/math_ops.py +3 -4
- mindspore/ops/operations/nn_ops.py +7 -39
- mindspore/ops/primitive.py +6 -10
- mindspore/ops/tensor_method.py +47 -8
- mindspore/ops_generate/api/cpp_create_prim_instance_helper_generator.py +1 -1
- mindspore/ops_generate/api/functional_map_cpp_generator.py +10 -9
- mindspore/ops_generate/api/functions_cc_generator.py +58 -10
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +1 -1
- mindspore/ops_generate/common/base_generator.py +14 -0
- mindspore/ops_generate/common/gen_constants.py +8 -3
- mindspore/ops_generate/common/gen_utils.py +0 -19
- mindspore/ops_generate/common/op_proto.py +11 -4
- mindspore/ops_generate/common/template.py +88 -11
- mindspore/ops_generate/gen_ops.py +1 -1
- mindspore/ops_generate/op_def/lite_ops_cpp_generator.py +4 -4
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_name_h_generator.py +0 -3
- mindspore/ops_generate/op_def/ops_primitive_h_generator.py +0 -4
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -2
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +49 -8
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +2 -2
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +31 -0
- mindspore/ops_generate/pyboost/op_template_parser.py +98 -72
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +70 -273
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +14 -6
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +316 -0
- mindspore/ops_generate/pyboost/pyboost_functions_py_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +5 -3
- mindspore/ops_generate/pyboost/pyboost_inner_prim_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_internal_functions_cpp_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_functions_h_generator.py +76 -0
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +125 -0
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +4 -3
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +348 -61
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_utils.py +118 -9
- mindspore/ops_generate/tensor_py_cc_generator.py +1 -24
- mindspore/parallel/_auto_parallel_context.py +11 -8
- mindspore/parallel/_cell_wrapper.py +113 -45
- mindspore/parallel/_parallel_serialization.py +1 -1
- mindspore/parallel/_ps_context.py +4 -6
- mindspore/parallel/_tensor.py +167 -12
- mindspore/parallel/_transformer/moe.py +1 -1
- mindspore/parallel/_transformer/transformer.py +13 -8
- mindspore/parallel/auto_parallel.py +14 -7
- mindspore/parallel/checkpoint_convert.py +3 -3
- mindspore/parallel/checkpoint_transform.py +11 -7
- mindspore/parallel/cluster/process_entity/_api.py +84 -48
- mindspore/parallel/cluster/process_entity/_utils.py +95 -7
- mindspore/parallel/cluster/run.py +43 -4
- mindspore/parallel/function/__init__.py +8 -1
- mindspore/parallel/function/reshard_func.py +6 -7
- mindspore/parallel/nn/__init__.py +15 -2
- mindspore/parallel/nn/parallel_cell_wrapper.py +9 -10
- mindspore/parallel/nn/parallel_grad_reducer.py +7 -6
- mindspore/parallel/shard.py +3 -4
- mindspore/parallel/transform_safetensors.py +463 -174
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/analysis/parser/timeline_assembly_factory/ascend_timeline_assembler.py +7 -7
- mindspore/profiler/analysis/parser/timeline_assembly_factory/base_timeline_assembler.py +3 -0
- mindspore/profiler/analysis/parser/timeline_assembly_factory/trace_view_container.py +12 -6
- mindspore/profiler/analysis/parser/timeline_creator/cpu_op_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/fwk_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_creator/msprof_timeline_creator.py +4 -4
- mindspore/profiler/analysis/parser/timeline_creator/scope_layer_timeline_creator.py +3 -3
- mindspore/profiler/analysis/parser/timeline_event/fwk_event.py +4 -1
- mindspore/profiler/analysis/parser/timeline_event/timeline_event_pool.py +2 -1
- mindspore/profiler/analysis/task_manager.py +1 -1
- mindspore/profiler/analysis/viewer/ascend_communication_viewer.py +5 -1
- mindspore/profiler/analysis/viewer/ascend_integrate_viewer.py +2 -1
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +42 -22
- mindspore/profiler/analysis/viewer/ascend_step_trace_time_viewer.py +3 -2
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +9 -5
- mindspore/profiler/analysis/viewer/ms_operator_details_viewer.py +132 -0
- mindspore/profiler/common/constant.py +16 -0
- mindspore/profiler/common/profiler_context.py +25 -27
- mindspore/profiler/common/profiler_info.py +0 -16
- mindspore/profiler/common/profiler_op_analyse.py +235 -0
- mindspore/profiler/common/profiler_output_path.py +23 -8
- mindspore/profiler/common/profiler_parameters.py +128 -35
- mindspore/profiler/dynamic_profile/__init__.py +0 -0
- mindspore/profiler/dynamic_profile/dynamic_monitor_proxy.py +39 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_config_context.py +666 -0
- mindspore/profiler/dynamic_profile/dynamic_profiler_utils.py +62 -0
- mindspore/profiler/dynamic_profiler.py +305 -314
- mindspore/profiler/envprofiler.py +12 -7
- mindspore/profiler/experimental_config.py +96 -6
- mindspore/profiler/mstx.py +33 -12
- mindspore/profiler/platform/__init__.py +2 -3
- mindspore/profiler/platform/npu_profiler.py +29 -19
- mindspore/profiler/profiler.py +35 -19
- mindspore/profiler/profiler_action_controller.py +64 -76
- mindspore/profiler/schedule.py +10 -4
- mindspore/rewrite/common/config.py +1 -0
- mindspore/rewrite/common/namer.py +1 -0
- mindspore/rewrite/common/namespace.py +1 -0
- mindspore/rewrite/node/node.py +31 -11
- mindspore/rewrite/parsers/assign_parser.py +1 -1
- mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +7 -10
- mindspore/runtime/__init__.py +5 -5
- mindspore/runtime/event.py +10 -4
- mindspore/runtime/executor.py +60 -45
- mindspore/runtime/memory.py +30 -32
- mindspore/runtime/thread_bind_core.py +298 -164
- mindspore/safeguard/rewrite_obfuscation.py +12 -13
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/_utils.py +14 -4
- mindspore/train/amp.py +43 -20
- mindspore/train/callback/__init__.py +5 -5
- mindspore/train/callback/_checkpoint.py +3 -6
- mindspore/train/callback/_flops_collector.py +1 -1
- mindspore/train/callback/_landscape.py +0 -1
- mindspore/train/callback/_train_fault_tolerance.py +97 -16
- mindspore/train/data_sink.py +11 -2
- mindspore/train/dataset_helper.py +9 -0
- mindspore/train/model.py +135 -55
- mindspore/train/serialization.py +133 -111
- mindspore/train/summary/summary_record.py +13 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +3 -2
- mindspore/utils/dryrun.py +0 -6
- mindspore/utils/runtime_execution_order_check.py +163 -77
- mindspore/utils/sdc_detect.py +68 -0
- mindspore/utils/utils.py +6 -9
- mindspore/version.py +1 -1
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +5 -4
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +333 -371
- mindspore/_deprecated/jit.py +0 -198
- mindspore/experimental/es/__init__.py +0 -22
- mindspore/experimental/es/embedding_service.py +0 -891
- mindspore/experimental/es/embedding_service_layer.py +0 -581
- mindspore/profiler/parser/__init__.py +0 -14
- mindspore/profiler/parser/aicpu_data_parser.py +0 -272
- mindspore/profiler/parser/ascend_analysis/__init__.py +0 -14
- mindspore/profiler/parser/ascend_analysis/constant.py +0 -71
- mindspore/profiler/parser/ascend_analysis/file_manager.py +0 -180
- mindspore/profiler/parser/ascend_analysis/function_event.py +0 -185
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +0 -136
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +0 -131
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +0 -104
- mindspore/profiler/parser/ascend_analysis/path_manager.py +0 -313
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +0 -123
- mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +0 -86
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +0 -75
- mindspore/profiler/parser/ascend_cluster_generator.py +0 -116
- mindspore/profiler/parser/ascend_communicate_generator.py +0 -314
- mindspore/profiler/parser/ascend_flops_generator.py +0 -116
- mindspore/profiler/parser/ascend_fpbp_generator.py +0 -82
- mindspore/profiler/parser/ascend_hccl_generator.py +0 -271
- mindspore/profiler/parser/ascend_integrate_generator.py +0 -42
- mindspore/profiler/parser/ascend_memory_generator.py +0 -185
- mindspore/profiler/parser/ascend_msprof_exporter.py +0 -282
- mindspore/profiler/parser/ascend_msprof_generator.py +0 -187
- mindspore/profiler/parser/ascend_op_generator.py +0 -334
- mindspore/profiler/parser/ascend_steptrace_generator.py +0 -94
- mindspore/profiler/parser/ascend_timeline_generator.py +0 -545
- mindspore/profiler/parser/base_timeline_generator.py +0 -483
- mindspore/profiler/parser/container.py +0 -229
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +0 -697
- mindspore/profiler/parser/flops_parser.py +0 -531
- mindspore/profiler/parser/framework_enum.py +0 -111
- mindspore/profiler/parser/framework_parser.py +0 -464
- mindspore/profiler/parser/framework_struct.py +0 -61
- mindspore/profiler/parser/gpu_analysis/__init__.py +0 -14
- mindspore/profiler/parser/gpu_analysis/function_event.py +0 -44
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +0 -89
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +0 -72
- mindspore/profiler/parser/hccl_parser.py +0 -573
- mindspore/profiler/parser/hwts_log_parser.py +0 -122
- mindspore/profiler/parser/integrator.py +0 -526
- mindspore/profiler/parser/memory_usage_parser.py +0 -277
- mindspore/profiler/parser/minddata_analyzer.py +0 -800
- mindspore/profiler/parser/minddata_parser.py +0 -186
- mindspore/profiler/parser/minddata_pipeline_parser.py +0 -299
- mindspore/profiler/parser/op_intermediate_parser.py +0 -149
- mindspore/profiler/parser/optime_parser.py +0 -250
- mindspore/profiler/parser/profiler_info.py +0 -213
- mindspore/profiler/parser/step_trace_parser.py +0 -666
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.6.0rc1.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
mindspore/nn/layer/conv.py
CHANGED
|
@@ -19,7 +19,7 @@ import math
|
|
|
19
19
|
import numpy as np
|
|
20
20
|
|
|
21
21
|
from mindspore import context
|
|
22
|
-
from mindspore
|
|
22
|
+
from mindspore import ops
|
|
23
23
|
import mindspore.common.dtype as mstype
|
|
24
24
|
from mindspore.common.parameter import Parameter
|
|
25
25
|
from mindspore.common.initializer import initializer, HeUniform, Uniform, _calculate_fan_in_and_fan_out
|
|
@@ -272,20 +272,20 @@ class Conv2d(_Conv):
|
|
|
272
272
|
|
|
273
273
|
.. math::
|
|
274
274
|
\begin{array}{ll} \\
|
|
275
|
-
H_{out} = \left \
|
|
276
|
-
{\text{stride[0]}}} \right \
|
|
277
|
-
W_{out} = \left \
|
|
278
|
-
{\text{stride[1]}}} \right \
|
|
275
|
+
H_{out} = \left \lfloor{\frac{H_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) - 1}
|
|
276
|
+
{\text{stride[0]}}} \right \rfloor + 1 \\
|
|
277
|
+
W_{out} = \left \lfloor{\frac{W_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) - 1}
|
|
278
|
+
{\text{stride[1]}}} \right \rfloor + 1 \\
|
|
279
279
|
\end{array}
|
|
280
280
|
|
|
281
281
|
pad_mode is ``'pad'``:
|
|
282
282
|
|
|
283
283
|
.. math::
|
|
284
284
|
\begin{array}{ll} \\
|
|
285
|
-
H_{out} = \left \lfloor{\frac{H_{in} + padding[0] + padding[1] -
|
|
286
|
-
\text{
|
|
287
|
-
W_{out} = \left \lfloor{\frac{W_{in} + padding[2] + padding[3] -
|
|
288
|
-
\text{
|
|
285
|
+
H_{out} = \left \lfloor{\frac{H_{in} + padding[0] + padding[1] - \text{dilation[0]} \times
|
|
286
|
+
(\text{kernel_size[0]} - 1) - 1}{\text{stride[0]}}} \right \rfloor + 1 \\
|
|
287
|
+
W_{out} = \left \lfloor{\frac{W_{in} + padding[2] + padding[3] - \text{dilation[1]} \times
|
|
288
|
+
(\text{kernel_size[1]} - 1) - 1}{\text{stride[1]}}} \right \rfloor + 1 \\
|
|
289
289
|
\end{array}
|
|
290
290
|
|
|
291
291
|
Raises:
|
|
@@ -351,16 +351,16 @@ class Conv2d(_Conv):
|
|
|
351
351
|
bias_init,
|
|
352
352
|
data_format,
|
|
353
353
|
dtype=dtype)
|
|
354
|
-
self.conv2d =
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
self.bias_add =
|
|
354
|
+
self.conv2d = ops.Conv2D(out_channel=self.out_channels,
|
|
355
|
+
kernel_size=self.kernel_size,
|
|
356
|
+
mode=1,
|
|
357
|
+
pad_mode=self.pad_mode,
|
|
358
|
+
pad=self.padding,
|
|
359
|
+
stride=self.stride,
|
|
360
|
+
dilation=self.dilation,
|
|
361
|
+
group=self.group,
|
|
362
|
+
data_format=self.data_format)
|
|
363
|
+
self.bias_add = ops.BiasAdd(data_format=self.data_format)
|
|
364
364
|
|
|
365
365
|
def construct(self, x):
|
|
366
366
|
output = self.conv2d(x, self.weight)
|
|
@@ -476,19 +476,25 @@ class Conv1d(_Conv):
|
|
|
476
476
|
pad_mode is ``'same'``:
|
|
477
477
|
|
|
478
478
|
.. math::
|
|
479
|
-
|
|
479
|
+
\begin{array}{ll} \\
|
|
480
|
+
L_{out} = \left \lceil{\frac{L_{in}}{\text{stride}}} \right \rceil \\
|
|
481
|
+
\end{array}
|
|
480
482
|
|
|
481
483
|
pad_mode is ``'valid'``:
|
|
482
484
|
|
|
483
485
|
.. math::
|
|
484
|
-
|
|
485
|
-
|
|
486
|
+
\begin{array}{ll} \\
|
|
487
|
+
L_{out} = \left \lfloor{\frac{L_{in} - \text{dilation} \times (\text{kernel_size} - 1) - 1}
|
|
488
|
+
{\text{stride}}} \right \rfloor + 1 \\
|
|
489
|
+
\end{array}
|
|
486
490
|
|
|
487
491
|
pad_mode is ``'pad'``:
|
|
488
492
|
|
|
489
493
|
.. math::
|
|
490
|
-
|
|
491
|
-
|
|
494
|
+
\begin{array}{ll} \\
|
|
495
|
+
L_{out} = \left \lfloor{\frac{L_{in} + 2 \times {padding} - \text{dilation} \times
|
|
496
|
+
(\text{kernel_size} - 1) - 1}{\text{stride}}} \right \rfloor + 1 \\
|
|
497
|
+
\end{array}
|
|
492
498
|
|
|
493
499
|
Raises:
|
|
494
500
|
TypeError: If `in_channels`, `out_channels`, `kernel_size`, `stride`, `padding` or `dilation` is not an int.
|
|
@@ -541,8 +547,8 @@ class Conv1d(_Conv):
|
|
|
541
547
|
kernel_size = (1, kernel_size)
|
|
542
548
|
stride = (1, stride)
|
|
543
549
|
dilation = (1, dilation)
|
|
544
|
-
get_shape =
|
|
545
|
-
get_dtype =
|
|
550
|
+
get_shape = ops.Shape()
|
|
551
|
+
get_dtype = ops.DType()
|
|
546
552
|
if isinstance(weight_init, Tensor):
|
|
547
553
|
weight_init_shape = get_shape(weight_init)
|
|
548
554
|
Validator.check_equal_int(len(weight_init_shape), 3, 'weight_init_shape', self.cls_name)
|
|
@@ -566,18 +572,18 @@ class Conv1d(_Conv):
|
|
|
566
572
|
dtype=dtype)
|
|
567
573
|
self.padding = (0, 0, padding, padding)
|
|
568
574
|
Validator.check_string(pad_mode, ['valid', 'same', 'pad'], 'pad_mode', self.cls_name)
|
|
569
|
-
self.conv2d =
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
self.bias_add =
|
|
578
|
-
self.expand_dims =
|
|
579
|
-
self.squeeze =
|
|
580
|
-
self.shape =
|
|
575
|
+
self.conv2d = ops.Conv2D(out_channel=self.out_channels,
|
|
576
|
+
kernel_size=self.kernel_size,
|
|
577
|
+
mode=1,
|
|
578
|
+
pad_mode=self.pad_mode,
|
|
579
|
+
pad=self.padding,
|
|
580
|
+
stride=self.stride,
|
|
581
|
+
dilation=self.dilation,
|
|
582
|
+
group=self.group)
|
|
583
|
+
self.bias_add = ops.BiasAdd()
|
|
584
|
+
self.expand_dims = ops.ExpandDims()
|
|
585
|
+
self.squeeze = ops.Squeeze(2)
|
|
586
|
+
self.shape = ops.Shape()
|
|
581
587
|
|
|
582
588
|
def construct(self, x):
|
|
583
589
|
x = self.expand_dims(x, 2)
|
|
@@ -727,24 +733,24 @@ class Conv3d(_Conv):
|
|
|
727
733
|
|
|
728
734
|
.. math::
|
|
729
735
|
\begin{array}{ll} \\
|
|
730
|
-
D_{out} = \left \lfloor{\frac{D_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) }
|
|
731
|
-
{\text{stride[0]}}
|
|
732
|
-
H_{out} = \left \lfloor{\frac{H_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) }
|
|
733
|
-
{\text{stride[1]}}
|
|
734
|
-
W_{out} = \left \lfloor{\frac{W_{in} - \text{dilation[2]} \times (\text{kernel_size[2]} - 1) }
|
|
735
|
-
{\text{stride[2]}}
|
|
736
|
+
D_{out} = \left \lfloor{\frac{D_{in} - \text{dilation[0]} \times (\text{kernel_size[0]} - 1) - 1}
|
|
737
|
+
{\text{stride[0]}}} \right \rfloor + 1 \\
|
|
738
|
+
H_{out} = \left \lfloor{\frac{H_{in} - \text{dilation[1]} \times (\text{kernel_size[1]} - 1) - 1}
|
|
739
|
+
{\text{stride[1]}}} \right \rfloor + 1 \\
|
|
740
|
+
W_{out} = \left \lfloor{\frac{W_{in} - \text{dilation[2]} \times (\text{kernel_size[2]} - 1) - 1}
|
|
741
|
+
{\text{stride[2]}}} \right \rfloor + 1 \\
|
|
736
742
|
\end{array}
|
|
737
743
|
|
|
738
744
|
pad_mode is ``'pad'`` :
|
|
739
745
|
|
|
740
746
|
.. math::
|
|
741
747
|
\begin{array}{ll} \\
|
|
742
|
-
D_{out} = \left \lfloor{\frac{D_{in} + padding[0] + padding[1] -
|
|
743
|
-
\text{kernel_size[0]} - 1 }{\text{stride[0]}}
|
|
744
|
-
H_{out} = \left \lfloor{\frac{H_{in} + padding[2] + padding[3] -
|
|
745
|
-
\text{kernel_size[1]} - 1 }{\text{stride[1]}}
|
|
746
|
-
W_{out} = \left \lfloor{\frac{W_{in} + padding[4] + padding[5] -
|
|
747
|
-
\text{kernel_size[2]} - 1 }{\text{stride[2]}}
|
|
748
|
+
D_{out} = \left \lfloor{\frac{D_{in} + padding[0] + padding[1] - \text{dilation[0]} \times
|
|
749
|
+
(\text{kernel_size[0]} - 1) - 1}{\text{stride[0]}}} \right \rfloor + 1 \\
|
|
750
|
+
H_{out} = \left \lfloor{\frac{H_{in} + padding[2] + padding[3] - \text{dilation[1]} \times
|
|
751
|
+
(\text{kernel_size[1]} - 1) - 1}{\text{stride[1]}}} \right \rfloor + 1 \\
|
|
752
|
+
W_{out} = \left \lfloor{\frac{W_{in} + padding[4] + padding[5] - \text{dilation[2]} \times
|
|
753
|
+
(\text{kernel_size[2]} - 1) - 1}{\text{stride[2]}}} \right \rfloor + 1 \\
|
|
748
754
|
\end{array}
|
|
749
755
|
|
|
750
756
|
Raises:
|
|
@@ -812,20 +818,20 @@ class Conv3d(_Conv):
|
|
|
812
818
|
data_format,
|
|
813
819
|
dtype=dtype)
|
|
814
820
|
out_channels = self.out_channels // group
|
|
815
|
-
self.conv3d =
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
819
|
-
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
self.bias_add =
|
|
825
|
-
self.shape =
|
|
826
|
-
self.concat =
|
|
827
|
-
self.split_0 =
|
|
828
|
-
self.split_1 =
|
|
821
|
+
self.conv3d = ops.Conv3D(out_channel=out_channels,
|
|
822
|
+
kernel_size=self.kernel_size,
|
|
823
|
+
mode=1,
|
|
824
|
+
pad_mode=self.pad_mode,
|
|
825
|
+
pad=self.padding,
|
|
826
|
+
stride=self.stride,
|
|
827
|
+
dilation=self.dilation,
|
|
828
|
+
group=1,
|
|
829
|
+
data_format=self.data_format)
|
|
830
|
+
self.bias_add = ops.BiasAdd(data_format=self.data_format)
|
|
831
|
+
self.shape = ops.Shape()
|
|
832
|
+
self.concat = ops.Concat(1)
|
|
833
|
+
self.split_0 = ops.Split(0, self.group)
|
|
834
|
+
self.split_1 = ops.Split(1, self.group)
|
|
829
835
|
|
|
830
836
|
def construct(self, x):
|
|
831
837
|
if self.group == 1:
|
|
@@ -935,11 +941,12 @@ class Conv3dTranspose(_Conv):
|
|
|
935
941
|
Initializer for more details. Default: ``None`` , bias will be initialized using Uniform.
|
|
936
942
|
data_format (str, optional): The optional value for data format. Currently only support ``'NCDHW'`` .
|
|
937
943
|
Default: ``'NCDHW'`` .
|
|
938
|
-
dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters.
|
|
944
|
+
dtype (:class:`mindspore.dtype`, optional): Dtype of Parameters. Should be the same as dtype of input.
|
|
945
|
+
Default: ``mstype.float32`` .
|
|
939
946
|
|
|
940
947
|
Inputs:
|
|
941
948
|
- **x** (Tensor) - Tensor of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`.
|
|
942
|
-
Currently input data dtype only supports float16 and float32.
|
|
949
|
+
Currently input data dtype for Ascend only supports float16; for CPU/GPU only supports float16 and float32.
|
|
943
950
|
|
|
944
951
|
Outputs:
|
|
945
952
|
Tensor, the shape is :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`.
|
|
@@ -982,7 +989,7 @@ class Conv3dTranspose(_Conv):
|
|
|
982
989
|
TypeError: If `in_channels`, `out_channels` or `group` is not an int.
|
|
983
990
|
TypeError: If `kernel_size`, `stride`, `padding` , `dilation` or `output_padding`
|
|
984
991
|
is neither an int nor a tuple of three.
|
|
985
|
-
TypeError: If input data type is not float16 or float32.
|
|
992
|
+
TypeError: If input data type is not supported.For CPU/GPU: not float16 or float32;for ASCEND, not float16.
|
|
986
993
|
ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
|
|
987
994
|
ValueError: If `padding` is less than 0.
|
|
988
995
|
ValueError: If `pad_mode` is not one of ``'same'``, ``'valid'``, ``'pad'``.
|
|
@@ -997,9 +1004,9 @@ class Conv3dTranspose(_Conv):
|
|
|
997
1004
|
>>> import mindspore
|
|
998
1005
|
>>> from mindspore import Tensor, nn
|
|
999
1006
|
>>> import numpy as np
|
|
1000
|
-
>>> x = Tensor(np.ones([32, 16, 10, 32, 32]), mindspore.
|
|
1007
|
+
>>> x = Tensor(np.ones([32, 16, 10, 32, 32]), mindspore.float16)
|
|
1001
1008
|
>>> conv3d_transpose = nn.Conv3dTranspose(in_channels=16, out_channels=3, kernel_size=(4, 6, 2),
|
|
1002
|
-
... pad_mode='pad')
|
|
1009
|
+
... pad_mode='pad', dtype=mindspore.float16)
|
|
1003
1010
|
>>> output = conv3d_transpose(x)
|
|
1004
1011
|
>>> print(output.shape)
|
|
1005
1012
|
(32, 3, 13, 37, 33)
|
|
@@ -1032,7 +1039,7 @@ class Conv3dTranspose(_Conv):
|
|
|
1032
1039
|
if isinstance(padding, tuple):
|
|
1033
1040
|
Validator.check_equal_int(len(padding), 6, 'padding size', self.cls_name)
|
|
1034
1041
|
self.output_padding = _check_3d_int_or_tuple("output_padding", output_padding, self.cls_name,
|
|
1035
|
-
greater_zero=False)
|
|
1042
|
+
greater_zero=False, pad_value=0)
|
|
1036
1043
|
super(Conv3dTranspose, self).__init__(
|
|
1037
1044
|
in_channels,
|
|
1038
1045
|
out_channels,
|
|
@@ -1048,19 +1055,19 @@ class Conv3dTranspose(_Conv):
|
|
|
1048
1055
|
data_format,
|
|
1049
1056
|
transposed=True,
|
|
1050
1057
|
dtype=dtype)
|
|
1051
|
-
self.conv3d_transpose =
|
|
1052
|
-
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1056
|
-
|
|
1057
|
-
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1062
|
-
self.bias_add =
|
|
1063
|
-
self.shape =
|
|
1058
|
+
self.conv3d_transpose = ops.Conv3DTranspose(in_channel=self.in_channels,
|
|
1059
|
+
out_channel=self.out_channels,
|
|
1060
|
+
kernel_size=self.kernel_size,
|
|
1061
|
+
mode=1,
|
|
1062
|
+
pad_mode=self.pad_mode,
|
|
1063
|
+
pad=self.padding,
|
|
1064
|
+
stride=self.stride,
|
|
1065
|
+
dilation=self.dilation,
|
|
1066
|
+
group=self.group,
|
|
1067
|
+
output_padding=self.output_padding,
|
|
1068
|
+
data_format=self.data_format)
|
|
1069
|
+
self.bias_add = ops.BiasAdd(data_format=self.data_format)
|
|
1070
|
+
self.shape = ops.Shape()
|
|
1064
1071
|
|
|
1065
1072
|
def construct(self, x):
|
|
1066
1073
|
output = self.conv3d_transpose(x, self.weight)
|
|
@@ -1219,7 +1226,7 @@ class Conv2dTranspose(_Conv):
|
|
|
1219
1226
|
>>> output = net(x).shape
|
|
1220
1227
|
>>> print(output)
|
|
1221
1228
|
(1, 64, 19, 53)
|
|
1222
|
-
|
|
1229
|
+
"""
|
|
1223
1230
|
|
|
1224
1231
|
def __init__(self,
|
|
1225
1232
|
in_channels,
|
|
@@ -1265,7 +1272,7 @@ class Conv2dTranspose(_Conv):
|
|
|
1265
1272
|
|
|
1266
1273
|
self.in_channels = in_channels
|
|
1267
1274
|
self.out_channels = out_channels
|
|
1268
|
-
self.shape =
|
|
1275
|
+
self.shape = ops.Shape()
|
|
1269
1276
|
Validator.check_string(pad_mode, ['valid', 'same', 'pad'], 'pad_mode', self.cls_name)
|
|
1270
1277
|
self.is_valid = self.pad_mode == 'valid'
|
|
1271
1278
|
self.is_same = self.pad_mode == 'same'
|
|
@@ -1273,15 +1280,15 @@ class Conv2dTranspose(_Conv):
|
|
|
1273
1280
|
self.output_padding = output_padding
|
|
1274
1281
|
|
|
1275
1282
|
# cause Conv2DTranspose's out_channel refers to Conv2D's out_channel.
|
|
1276
|
-
self.conv2d_transpose =
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1280
|
-
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
|
|
1284
|
-
self.bias_add =
|
|
1283
|
+
self.conv2d_transpose = ops.Conv2DTranspose(out_channel=in_channels,
|
|
1284
|
+
kernel_size=kernel_size,
|
|
1285
|
+
mode=1,
|
|
1286
|
+
pad_mode=pad_mode,
|
|
1287
|
+
pad=padding,
|
|
1288
|
+
stride=stride,
|
|
1289
|
+
dilation=dilation,
|
|
1290
|
+
group=group)
|
|
1291
|
+
self.bias_add = ops.BiasAdd()
|
|
1285
1292
|
if isinstance(self.padding, int):
|
|
1286
1293
|
self.padding_top, self.padding_bottom, self.padding_left, self.padding_right = (self.padding,) * 4
|
|
1287
1294
|
else:
|
|
@@ -1308,7 +1315,7 @@ class Conv2dTranspose(_Conv):
|
|
|
1308
1315
|
if not self.is_pad and (self.output_padding[0] > 0 or self.output_padding[1] > 0):
|
|
1309
1316
|
raise ValueError("when output_padding is not zero, pad_mode must be 'pad'")
|
|
1310
1317
|
|
|
1311
|
-
pad =
|
|
1318
|
+
pad = ops.Pad(paddings=((0, 0), (0, 0), (0, self.output_padding[0]), (0, self.output_padding[1])))
|
|
1312
1319
|
return pad(conv2d_trans_ret)
|
|
1313
1320
|
|
|
1314
1321
|
if self.output_padding == 0:
|
|
@@ -1320,7 +1327,7 @@ class Conv2dTranspose(_Conv):
|
|
|
1320
1327
|
raise ValueError("output_padding must be in range of [0, max(stride_w, dilation_w)).")
|
|
1321
1328
|
if not self.is_pad and self.output_padding > 0:
|
|
1322
1329
|
raise ValueError("when output_padding is not zero, pad_mode must be 'pad'")
|
|
1323
|
-
pad =
|
|
1330
|
+
pad = ops.Pad(paddings=((0, 0), (0, 0), (0, self.output_padding), (0, self.output_padding)))
|
|
1324
1331
|
return pad(conv2d_trans_ret)
|
|
1325
1332
|
|
|
1326
1333
|
|
|
@@ -1444,8 +1451,8 @@ class Conv1dTranspose(_Conv):
|
|
|
1444
1451
|
kernel_size = (1, kernel_size)
|
|
1445
1452
|
stride = (1, stride)
|
|
1446
1453
|
dilation = (1, dilation)
|
|
1447
|
-
get_shape =
|
|
1448
|
-
get_dtype =
|
|
1454
|
+
get_shape = ops.Shape()
|
|
1455
|
+
get_dtype = ops.DType()
|
|
1449
1456
|
if isinstance(weight_init, Tensor):
|
|
1450
1457
|
weight_init_shape = get_shape(weight_init)
|
|
1451
1458
|
Validator.check_equal_int(len(weight_init_shape), 3, 'weight_init_shape', self.cls_name)
|
|
@@ -1473,24 +1480,24 @@ class Conv1dTranspose(_Conv):
|
|
|
1473
1480
|
self.padding = (0, 0, padding, padding)
|
|
1474
1481
|
self.in_channels = in_channels
|
|
1475
1482
|
self.out_channels = out_channels
|
|
1476
|
-
self.shape =
|
|
1483
|
+
self.shape = ops.Shape()
|
|
1477
1484
|
Validator.check_string(pad_mode, ['valid', 'same', 'pad'], 'pad_mode', self.cls_name)
|
|
1478
1485
|
self.is_valid = self.pad_mode == 'valid'
|
|
1479
1486
|
self.is_same = self.pad_mode == 'same'
|
|
1480
1487
|
self.is_pad = self.pad_mode == 'pad'
|
|
1481
1488
|
|
|
1482
1489
|
# cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel.
|
|
1483
|
-
self.conv2d_transpose =
|
|
1484
|
-
|
|
1485
|
-
|
|
1486
|
-
|
|
1487
|
-
|
|
1488
|
-
|
|
1489
|
-
|
|
1490
|
-
|
|
1491
|
-
self.bias_add =
|
|
1492
|
-
self.expand_dims =
|
|
1493
|
-
self.squeeze =
|
|
1490
|
+
self.conv2d_transpose = ops.Conv2DBackpropInput(out_channel=in_channels,
|
|
1491
|
+
kernel_size=kernel_size,
|
|
1492
|
+
mode=1,
|
|
1493
|
+
pad_mode=pad_mode,
|
|
1494
|
+
pad=self.padding,
|
|
1495
|
+
stride=stride,
|
|
1496
|
+
dilation=dilation,
|
|
1497
|
+
group=group)
|
|
1498
|
+
self.bias_add = ops.BiasAdd()
|
|
1499
|
+
self.expand_dims = ops.ExpandDims()
|
|
1500
|
+
self.squeeze = ops.Squeeze(2)
|
|
1494
1501
|
|
|
1495
1502
|
def shard(self, strategy):
|
|
1496
1503
|
self.conv2d_transpose.shard(strategy)
|
mindspore/nn/layer/dense.py
CHANGED
|
@@ -18,7 +18,7 @@ from __future__ import absolute_import
|
|
|
18
18
|
|
|
19
19
|
import math
|
|
20
20
|
|
|
21
|
-
|
|
21
|
+
from mindspore import ops
|
|
22
22
|
import mindspore.common.dtype as mstype
|
|
23
23
|
from mindspore.common.tensor import Tensor
|
|
24
24
|
from mindspore.common.initializer import initializer, Uniform
|
|
@@ -41,9 +41,9 @@ def check_dense_inputs_same_shape(input1, input2, prim_name=None):
|
|
|
41
41
|
@constexpr(check=False)
|
|
42
42
|
def _check_is_tensor(param_name, input_data, cls_name):
|
|
43
43
|
"""Internal function, used to check whether the input data is Tensor."""
|
|
44
|
-
if input_data is not None and not isinstance(
|
|
44
|
+
if input_data is not None and not isinstance(ops.typeof(input_data), mstype.TensorType):
|
|
45
45
|
raise TypeError(f"For '{cls_name}', the '{param_name}' must be '{mstype.TensorType}', "
|
|
46
|
-
f"but got '{
|
|
46
|
+
f"but got '{ops.typeof(input_data)}'")
|
|
47
47
|
|
|
48
48
|
|
|
49
49
|
@_primexpr
|
|
@@ -73,9 +73,11 @@ class BiDense(Cell):
|
|
|
73
73
|
in2_channels (int): The number of channels in the input2 space.
|
|
74
74
|
out_channels (int): The number of channels in the output space.
|
|
75
75
|
weight_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable weight_init parameter.
|
|
76
|
-
The values of str refer to the function
|
|
76
|
+
The values of str refer to the function :func:`mindspore.common.initializer.initializer`.
|
|
77
|
+
Default: ``None`` .
|
|
77
78
|
bias_init (Union[Tensor, str, Initializer, numbers.Number]): The trainable bias_init parameter.
|
|
78
|
-
The values of str refer to the function
|
|
79
|
+
The values of str refer to the function :func:`mindspore.common.initializer.initializer`.
|
|
80
|
+
Default: ``None`` .
|
|
79
81
|
has_bias (bool): Specifies whether the layer uses :math:`\text{bias}` vector. Default: ``True`` .
|
|
80
82
|
dtype (:class:`mindspore.dtype`): Dtype of Parameters. Default: ``mstype.float32`` .
|
|
81
83
|
|
|
@@ -170,8 +172,8 @@ class BiDense(Cell):
|
|
|
170
172
|
f"be equal to 1, and the first dim must be equal to 'out_channels'. But got "
|
|
171
173
|
f"'bias_init': {bias_init}, 'out_channels': {out_channels}.")
|
|
172
174
|
self.bias = Parameter(initializer(bias_init, [out_channels], dtype=dtype), name="bias")
|
|
173
|
-
self.bias_add =
|
|
174
|
-
self.matmul =
|
|
175
|
+
self.bias_add = ops.BiasAdd()
|
|
176
|
+
self.matmul = ops.MatMul()
|
|
175
177
|
|
|
176
178
|
def construct(self, input1, input2):
|
|
177
179
|
_check_is_tensor("input1", input1, self.cls_name)
|
mindspore/nn/layer/embedding.py
CHANGED
|
@@ -19,8 +19,6 @@ import mindspore.common.dtype as mstype
|
|
|
19
19
|
import mindspore.ops as ops
|
|
20
20
|
from mindspore import log as logger
|
|
21
21
|
from mindspore.common.tensor import Tensor
|
|
22
|
-
from mindspore.ops import operations as P
|
|
23
|
-
from mindspore.ops import functional as F
|
|
24
22
|
from mindspore.common.parameter import Parameter
|
|
25
23
|
from mindspore.common.parameter import _get_unique_parameter_key
|
|
26
24
|
from mindspore.common.initializer import initializer, Normal
|
|
@@ -130,17 +128,17 @@ class Embedding(Cell):
|
|
|
130
128
|
self.init_tensor = Tensor(self.init_tensor, init_tensor_type)
|
|
131
129
|
self.embedding_table = Parameter(
|
|
132
130
|
self.init_tensor, name='embedding_table')
|
|
133
|
-
self.expand =
|
|
134
|
-
self.reshape_flat =
|
|
131
|
+
self.expand = ops.ExpandDims()
|
|
132
|
+
self.reshape_flat = ops.Reshape()
|
|
135
133
|
self.shp_flat = (-1,)
|
|
136
|
-
self.gather =
|
|
137
|
-
self.one_hot =
|
|
134
|
+
self.gather = ops.Gather()
|
|
135
|
+
self.one_hot = ops.OneHot()
|
|
138
136
|
self.on_value = Tensor(1.0, self.dtype)
|
|
139
137
|
self.off_value = Tensor(0.0, self.dtype)
|
|
140
|
-
self.array_mul =
|
|
141
|
-
self.reshape =
|
|
142
|
-
self.get_shp =
|
|
143
|
-
self.concat =
|
|
138
|
+
self.array_mul = ops.MatMul()
|
|
139
|
+
self.reshape = ops.Reshape()
|
|
140
|
+
self.get_shp = ops.Shape()
|
|
141
|
+
self.concat = ops.Concat()
|
|
144
142
|
|
|
145
143
|
def construct(self, ids):
|
|
146
144
|
out_shape = self.get_shp(ids) + (self.embedding_size,)
|
|
@@ -311,9 +309,9 @@ class EmbeddingLookup(Cell):
|
|
|
311
309
|
|
|
312
310
|
Note:
|
|
313
311
|
When 'target' is set to 'CPU', this module will use
|
|
314
|
-
|
|
312
|
+
ops.EmbeddingLookup().set_device('CPU') which
|
|
315
313
|
specified 'offset = 0' to lookup table.
|
|
316
|
-
When 'target' is set to 'DEVICE', this module will use
|
|
314
|
+
When 'target' is set to 'DEVICE', this module will use ops.Gather() which
|
|
317
315
|
specified 'axis = 0' to lookup table.
|
|
318
316
|
In field slice mode, the manual_shapes must be given. It is a tuple ,where
|
|
319
317
|
the element is vocab[i], vocab[i] is the row numbers for i-th part.
|
|
@@ -407,10 +405,10 @@ class EmbeddingLookup(Cell):
|
|
|
407
405
|
raise ValueError(f"For '{self.cls_name}', 'sparse' must be True when 'target' is \"CPU\", "
|
|
408
406
|
f"but got 'sparse': {sparse} and 'target': {target}")
|
|
409
407
|
if sparse:
|
|
410
|
-
self.gatherv2 =
|
|
408
|
+
self.gatherv2 = ops.SparseGatherV2()
|
|
411
409
|
else:
|
|
412
|
-
self.gatherv2 =
|
|
413
|
-
self.embeddinglookup =
|
|
410
|
+
self.gatherv2 = ops.Gather()
|
|
411
|
+
self.embeddinglookup = ops.EmbeddingLookup().set_device('CPU')
|
|
414
412
|
self.is_ps_server = False
|
|
415
413
|
enable_ps = _get_ps_context("enable_ps")
|
|
416
414
|
if enable_ps:
|
|
@@ -422,13 +420,13 @@ class EmbeddingLookup(Cell):
|
|
|
422
420
|
parallel_mode = _get_parallel_mode()
|
|
423
421
|
is_auto_parallel = parallel_mode in (
|
|
424
422
|
ParallelMode.SEMI_AUTO_PARALLEL, ParallelMode.AUTO_PARALLEL)
|
|
425
|
-
self.gather_revert =
|
|
426
|
-
self.reshape_first =
|
|
427
|
-
self.reshape =
|
|
428
|
-
self.unique =
|
|
429
|
-
self.shape =
|
|
423
|
+
self.gather_revert = ops.Gather()
|
|
424
|
+
self.reshape_first = ops.Reshape()
|
|
425
|
+
self.reshape = ops.Reshape()
|
|
426
|
+
self.unique = ops.Unique()
|
|
427
|
+
self.shape = ops.Shape()
|
|
430
428
|
if is_auto_parallel:
|
|
431
|
-
self.unique =
|
|
429
|
+
self.unique = ops.Unique().shard(((1,),))
|
|
432
430
|
if self.cache_enable and enable_ps:
|
|
433
431
|
self._set_voacb_cache_enable_for_ps(
|
|
434
432
|
vocab_cache_size, embedding_size, vocab_size, param_init, dtype=dtype)
|
|
@@ -582,12 +580,12 @@ class EmbeddingLookup(Cell):
|
|
|
582
580
|
|
|
583
581
|
# Add EmbeddingLookup ops on different servers.
|
|
584
582
|
if self.target == 'CPU':
|
|
585
|
-
embedding_lookup =
|
|
583
|
+
embedding_lookup = ops.EmbeddingLookup().set_device('CPU')
|
|
586
584
|
else:
|
|
587
585
|
if self.sparse:
|
|
588
|
-
embedding_lookup =
|
|
586
|
+
embedding_lookup = ops.SparseGatherV2()
|
|
589
587
|
else:
|
|
590
|
-
embedding_lookup =
|
|
588
|
+
embedding_lookup = ops.Gather()
|
|
591
589
|
embedding_lookup.add_prim_attr(
|
|
592
590
|
'offset', self.embedding_offset[i])
|
|
593
591
|
embedding_lookup.add_prim_attr('rank_id', i)
|
|
@@ -596,7 +594,7 @@ class EmbeddingLookup(Cell):
|
|
|
596
594
|
|
|
597
595
|
# For now unique operation is not applied,
|
|
598
596
|
# so we need to reduce the lookup results from different servers with AddN.
|
|
599
|
-
self.reduce_lookup_result =
|
|
597
|
+
self.reduce_lookup_result = ops.AddN()
|
|
600
598
|
|
|
601
599
|
def _do_server_embedding_lookup(self, indices):
|
|
602
600
|
'''
|
|
@@ -647,7 +645,7 @@ class EmbeddingLookup(Cell):
|
|
|
647
645
|
else:
|
|
648
646
|
out = self.gatherv2(self.embedding_table, indices, 0)
|
|
649
647
|
if self.max_norm is not None:
|
|
650
|
-
axis = _make_axis_range(
|
|
648
|
+
axis = _make_axis_range(ops.rank(indices), ops.rank(out))
|
|
651
649
|
clip_by_norm = ClipByNorm(axis)
|
|
652
650
|
out = clip_by_norm(out, self.max_norm)
|
|
653
651
|
return out
|
|
@@ -660,9 +658,9 @@ class MultiFieldEmbeddingLookup(EmbeddingLookup):
|
|
|
660
658
|
|
|
661
659
|
Note:
|
|
662
660
|
When 'target' is set to 'CPU', this module will use
|
|
663
|
-
|
|
661
|
+
ops.EmbeddingLookup().set_device('CPU') which
|
|
664
662
|
specified 'offset = 0' to lookup table.
|
|
665
|
-
When 'target' is set to 'DEVICE', this module will use
|
|
663
|
+
When 'target' is set to 'DEVICE', this module will use ops.Gather() which
|
|
666
664
|
specified 'axis = 0' to lookup table.
|
|
667
665
|
The vectors with the same field_ids will be combined by the `operator`, such as 'SUM', 'MAX' and
|
|
668
666
|
'MEAN'. Ensure the input_values of the padded id is zero, so that they can be ignored. The final
|
|
@@ -753,29 +751,29 @@ class MultiFieldEmbeddingLookup(EmbeddingLookup):
|
|
|
753
751
|
field_size, 'field_size', self.cls_name)
|
|
754
752
|
self.operator = operator
|
|
755
753
|
|
|
756
|
-
self.mul =
|
|
757
|
-
self.inf_mask_mul =
|
|
758
|
-
self.bias_add =
|
|
759
|
-
self.inf_add =
|
|
754
|
+
self.mul = ops.Mul()
|
|
755
|
+
self.inf_mask_mul = ops.Mul()
|
|
756
|
+
self.bias_add = ops.Add()
|
|
757
|
+
self.inf_add = ops.Add()
|
|
760
758
|
self.merge_op = None
|
|
761
|
-
self.count_op =
|
|
762
|
-
self.abs =
|
|
763
|
-
self.equal =
|
|
764
|
-
self.add =
|
|
765
|
-
self.cast =
|
|
766
|
-
self.div_no_nan =
|
|
767
|
-
self.expand =
|
|
768
|
-
self.max_mask_mul =
|
|
769
|
-
self.max_no_equal =
|
|
759
|
+
self.count_op = ops.UnsortedSegmentSum()
|
|
760
|
+
self.abs = ops.Abs()
|
|
761
|
+
self.equal = ops.Equal()
|
|
762
|
+
self.add = ops.Add()
|
|
763
|
+
self.cast = ops.Cast()
|
|
764
|
+
self.div_no_nan = ops.DivNoNan()
|
|
765
|
+
self.expand = ops.ExpandDims()
|
|
766
|
+
self.max_mask_mul = ops.Mul()
|
|
767
|
+
self.max_no_equal = ops.NotEqual()
|
|
770
768
|
|
|
771
769
|
Validator.check_string(
|
|
772
770
|
operator, ['SUM', 'MAX', 'MEAN'], 'operator', self.cls_name)
|
|
773
771
|
if operator == MultiFieldEmbeddingLookup.OPERATOR_SUM:
|
|
774
|
-
self.merge_op =
|
|
772
|
+
self.merge_op = ops.UnsortedSegmentSum()
|
|
775
773
|
elif operator == MultiFieldEmbeddingLookup.OPERATOR_MAX:
|
|
776
|
-
self.merge_op =
|
|
774
|
+
self.merge_op = ops.UnsortedSegmentMax()
|
|
777
775
|
else:
|
|
778
|
-
self.merge_op =
|
|
776
|
+
self.merge_op = ops.UnsortedSegmentSum()
|
|
779
777
|
|
|
780
778
|
|
|
781
779
|
parallel_mode = _get_parallel_mode()
|
|
@@ -822,16 +820,16 @@ class MultiFieldEmbeddingLookup(EmbeddingLookup):
|
|
|
822
820
|
self.negative_inf_value = -3.402823466E+38
|
|
823
821
|
|
|
824
822
|
def construct(self, input_indices, input_values, field_ids):
|
|
825
|
-
_check_input_2d(
|
|
826
|
-
_check_input_2d(
|
|
827
|
-
_check_input_2d(
|
|
828
|
-
_check_input_dtype(
|
|
829
|
-
_check_input_dtype(
|
|
830
|
-
_check_input_dtype(
|
|
823
|
+
_check_input_2d(ops.shape(input_indices), "input_indices", self.cls_name)
|
|
824
|
+
_check_input_2d(ops.shape(input_values), "input_values", self.cls_name)
|
|
825
|
+
_check_input_2d(ops.shape(field_ids), "field_ids", self.cls_name)
|
|
826
|
+
_check_input_dtype(ops.dtype(input_indices), "input_indices", [mstype.int32, mstype.int64], self.cls_name)
|
|
827
|
+
_check_input_dtype(ops.dtype(input_values), "input_values", [mstype.float32], self.cls_name)
|
|
828
|
+
_check_input_dtype(ops.dtype(field_ids), "field_ids", [mstype.int32], self.cls_name)
|
|
831
829
|
|
|
832
830
|
batch_size = self.shape(input_indices)[0]
|
|
833
831
|
num_segments = batch_size * self.field_size
|
|
834
|
-
bias =
|
|
832
|
+
bias = ops.tuple_to_array(ops.make_range(0, num_segments, self.field_size))
|
|
835
833
|
bias = self.reshape(bias, (batch_size, -1))
|
|
836
834
|
field_ids = self.bias_add(field_ids, bias)
|
|
837
835
|
|
|
@@ -848,7 +846,7 @@ class MultiFieldEmbeddingLookup(EmbeddingLookup):
|
|
|
848
846
|
else:
|
|
849
847
|
out = self.gatherv2(self.embedding_table, input_indices, 0)
|
|
850
848
|
if self.max_norm is not None:
|
|
851
|
-
axis = _make_axis_range(
|
|
849
|
+
axis = _make_axis_range(ops.rank(input_indices), ops.rank(out))
|
|
852
850
|
clip_by_norm = ClipByNorm(axis)
|
|
853
851
|
out = clip_by_norm(out, self.max_norm)
|
|
854
852
|
|