mindspore 2.7.0rc1__cp310-cp310-win_amd64.whl → 2.7.1__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +5 -2
- mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +2 -2
- mindspore/_extends/builtin_operations.py +3 -3
- mindspore/_extends/parallel_compile/akg_compiler/custom.py +1109 -0
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +3 -3
- mindspore/_extends/parse/compile_config.py +24 -1
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +6 -3
- mindspore/_extends/parse/parser.py +28 -22
- mindspore/_extends/parse/resources.py +1 -1
- mindspore/_extends/parse/standard_method.py +23 -2
- mindspore/_extends/parse/trope.py +2 -1
- mindspore/_extends/pijit/pijit_func_white_list.py +9 -27
- mindspore/amp.py +0 -18
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/boost/base.py +29 -2
- mindspore/common/__init__.py +18 -12
- mindspore/common/_decorator.py +3 -2
- mindspore/common/_grad_function.py +3 -1
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +371 -96
- mindspore/common/_utils.py +7 -43
- mindspore/common/api.py +434 -135
- mindspore/common/dtype.py +98 -57
- mindspore/common/dump.py +7 -108
- mindspore/common/dynamic_shape/__init__.py +0 -0
- mindspore/common/{auto_dynamic_shape.py → dynamic_shape/auto_dynamic_shape.py} +15 -23
- mindspore/common/dynamic_shape/enable_dynamic.py +197 -0
- mindspore/common/file_system.py +59 -9
- mindspore/common/hook_handle.py +82 -3
- mindspore/common/jit_config.py +5 -1
- mindspore/common/jit_trace.py +27 -12
- mindspore/common/lazy_inline.py +5 -3
- mindspore/common/np_dtype.py +3 -3
- mindspore/common/parameter.py +17 -127
- mindspore/common/recompute.py +4 -13
- mindspore/common/tensor.py +50 -217
- mindspore/communication/_comm_helper.py +11 -1
- mindspore/communication/comm_func.py +138 -4
- mindspore/communication/management.py +85 -1
- mindspore/config/op_info.config +0 -15
- mindspore/context.py +20 -106
- mindspore/dataset/__init__.py +1 -1
- mindspore/dataset/audio/transforms.py +1 -1
- mindspore/dataset/core/config.py +35 -1
- mindspore/dataset/engine/datasets.py +338 -319
- mindspore/dataset/engine/datasets_user_defined.py +38 -22
- mindspore/dataset/engine/datasets_vision.py +1 -1
- mindspore/dataset/engine/validators.py +1 -15
- mindspore/dataset/transforms/c_transforms.py +2 -2
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/vision/__init__.py +1 -1
- mindspore/dataset/vision/py_transforms.py +8 -8
- mindspore/dataset/vision/transforms.py +17 -5
- mindspore/dataset/vision/utils.py +632 -21
- mindspore/device_context/ascend/op_tuning.py +35 -1
- mindspore/dnnl.dll +0 -0
- mindspore/{profiler/common/validator → graph}/__init__.py +9 -1
- mindspore/graph/custom_pass.py +55 -0
- mindspore/include/api/cell.h +28 -4
- mindspore/include/api/cfg.h +24 -7
- mindspore/include/api/context.h +1 -0
- mindspore/include/api/delegate.h +0 -2
- mindspore/include/api/dual_abi_helper.h +100 -19
- mindspore/include/api/graph.h +14 -1
- mindspore/include/api/kernel.h +16 -3
- mindspore/include/api/kernel_api.h +9 -1
- mindspore/include/api/metrics/accuracy.h +9 -0
- mindspore/include/api/model.h +5 -1
- mindspore/include/api/model_group.h +4 -0
- mindspore/include/api/model_parallel_runner.h +2 -0
- mindspore/include/api/status.h +48 -10
- mindspore/include/api/types.h +6 -1
- mindspore/include/dataset/constants.h +9 -0
- mindspore/include/dataset/execute.h +2 -2
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/__init__.py +3 -3
- mindspore/mindrecord/common/exceptions.py +1 -0
- mindspore/mindrecord/config.py +1 -1
- mindspore/{parallel/mpi → mindrecord/core}/__init__.py +4 -1
- mindspore/mindrecord/{shardheader.py → core/shardheader.py} +2 -1
- mindspore/mindrecord/{shardindexgenerator.py → core/shardindexgenerator.py} +1 -1
- mindspore/mindrecord/{shardreader.py → core/shardreader.py} +2 -1
- mindspore/mindrecord/{shardsegment.py → core/shardsegment.py} +2 -2
- mindspore/mindrecord/{shardutils.py → core/shardutils.py} +1 -1
- mindspore/mindrecord/{shardwriter.py → core/shardwriter.py} +1 -1
- mindspore/mindrecord/filereader.py +4 -4
- mindspore/mindrecord/filewriter.py +5 -5
- mindspore/mindrecord/mindpage.py +2 -2
- mindspore/mindrecord/tools/cifar10.py +4 -3
- mindspore/mindrecord/tools/cifar100.py +1 -1
- mindspore/mindrecord/tools/cifar100_to_mr.py +1 -1
- mindspore/mindrecord/tools/cifar10_to_mr.py +6 -6
- mindspore/mindrecord/tools/csv_to_mr.py +1 -1
- mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
- mindspore/mindrecord/tools/mnist_to_mr.py +1 -1
- mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -1
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_cluster.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_hardware_abstract.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/{mindspore_ops_host.dll → mindspore_ops_cpu.dll} +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mindspore_runtime_utils.dll +0 -0
- mindspore/mindspore_tools.dll +0 -0
- mindspore/mint/__init__.py +15 -10
- mindspore/mint/distributed/__init__.py +4 -0
- mindspore/mint/distributed/distributed.py +392 -69
- mindspore/mint/nn/__init__.py +2 -16
- mindspore/mint/nn/functional.py +4 -110
- mindspore/mint/nn/layer/__init__.py +0 -2
- mindspore/mint/nn/layer/_functions.py +1 -2
- mindspore/mint/nn/layer/activation.py +0 -6
- mindspore/mint/nn/layer/basic.py +0 -47
- mindspore/mint/nn/layer/conv.py +10 -10
- mindspore/mint/nn/layer/normalization.py +11 -16
- mindspore/mint/nn/layer/pooling.py +0 -4
- mindspore/nn/__init__.py +1 -3
- mindspore/nn/cell.py +231 -239
- mindspore/nn/layer/activation.py +4 -2
- mindspore/nn/layer/basic.py +56 -14
- mindspore/nn/layer/container.py +16 -0
- mindspore/nn/layer/embedding.py +4 -169
- mindspore/nn/layer/image.py +1 -1
- mindspore/nn/layer/normalization.py +2 -1
- mindspore/nn/layer/thor_layer.py +4 -85
- mindspore/nn/optim/ada_grad.py +0 -1
- mindspore/nn/optim/adafactor.py +0 -1
- mindspore/nn/optim/adam.py +32 -127
- mindspore/nn/optim/adamax.py +0 -1
- mindspore/nn/optim/asgd.py +0 -1
- mindspore/nn/optim/ftrl.py +8 -102
- mindspore/nn/optim/lamb.py +1 -4
- mindspore/nn/optim/lars.py +0 -3
- mindspore/nn/optim/lazyadam.py +25 -218
- mindspore/nn/optim/momentum.py +5 -43
- mindspore/nn/optim/optimizer.py +6 -55
- mindspore/nn/optim/proximal_ada_grad.py +0 -1
- mindspore/nn/optim/rmsprop.py +0 -1
- mindspore/nn/optim/rprop.py +0 -1
- mindspore/nn/optim/sgd.py +0 -1
- mindspore/nn/optim/tft_wrapper.py +2 -4
- mindspore/nn/optim/thor.py +0 -2
- mindspore/nn/probability/bijector/bijector.py +7 -8
- mindspore/nn/probability/bijector/gumbel_cdf.py +2 -2
- mindspore/nn/probability/bijector/power_transform.py +20 -21
- mindspore/nn/probability/bijector/scalar_affine.py +5 -5
- mindspore/nn/probability/bijector/softplus.py +13 -14
- mindspore/nn/probability/distribution/_utils/utils.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +39 -5
- mindspore/nn/wrap/grad_reducer.py +4 -89
- mindspore/numpy/array_creations.py +4 -4
- mindspore/numpy/fft.py +9 -9
- mindspore/numpy/utils_const.py +1 -1
- mindspore/{nn/reinforcement → onnx}/__init__.py +5 -8
- mindspore/onnx/onnx_export.py +137 -0
- mindspore/opencv_core4110.dll +0 -0
- mindspore/opencv_imgcodecs4110.dll +0 -0
- mindspore/{opencv_imgproc452.dll → opencv_imgproc4110.dll} +0 -0
- mindspore/ops/__init__.py +2 -0
- mindspore/ops/_grad_experimental/grad_comm_ops.py +38 -2
- mindspore/ops/_grad_experimental/grad_inner_ops.py +0 -9
- mindspore/ops/_op_impl/aicpu/__init__.py +0 -10
- mindspore/ops/_op_impl/cpu/__init__.py +1 -5
- mindspore/ops/_op_impl/cpu/{buffer_append.py → joinedstr_op.py} +8 -8
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +28 -24
- mindspore/ops/auto_generate/gen_extend_func.py +6 -11
- mindspore/ops/auto_generate/gen_ops_def.py +385 -154
- mindspore/ops/auto_generate/gen_ops_prim.py +5676 -5167
- mindspore/ops/communication.py +97 -0
- mindspore/ops/composite/__init__.py +5 -2
- mindspore/ops/composite/base.py +16 -2
- mindspore/ops/composite/multitype_ops/__init__.py +3 -1
- mindspore/ops/composite/multitype_ops/_compile_utils.py +150 -8
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/composite/multitype_ops/add_impl.py +7 -0
- mindspore/ops/composite/multitype_ops/mod_impl.py +27 -0
- mindspore/ops/function/__init__.py +2 -0
- mindspore/ops/function/array_func.py +24 -18
- mindspore/ops/function/comm_func.py +3883 -0
- mindspore/ops/function/debug_func.py +7 -6
- mindspore/ops/function/grad/grad_func.py +4 -12
- mindspore/ops/function/math_func.py +89 -86
- mindspore/ops/function/nn_func.py +92 -313
- mindspore/ops/function/random_func.py +9 -18
- mindspore/ops/functional.py +4 -1
- mindspore/ops/functional_overload.py +377 -30
- mindspore/ops/operations/__init__.py +2 -5
- mindspore/ops/operations/_custom_ops_utils.py +7 -9
- mindspore/ops/operations/_inner_ops.py +12 -50
- mindspore/ops/operations/_rl_inner_ops.py +0 -933
- mindspore/ops/operations/array_ops.py +5 -50
- mindspore/ops/operations/comm_ops.py +95 -17
- mindspore/ops/operations/custom_ops.py +237 -22
- mindspore/ops/operations/debug_ops.py +33 -35
- mindspore/ops/operations/manually_defined/ops_def.py +39 -318
- mindspore/ops/operations/math_ops.py +5 -5
- mindspore/ops/operations/nn_ops.py +3 -3
- mindspore/ops/operations/sparse_ops.py +0 -83
- mindspore/ops/primitive.py +4 -27
- mindspore/ops/tensor_method.py +88 -10
- mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +5 -5
- mindspore/ops_generate/aclnn/gen_aclnn_implement.py +8 -8
- mindspore/ops_generate/api/functions_cc_generator.py +53 -4
- mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +25 -11
- mindspore/ops_generate/common/gen_constants.py +11 -10
- mindspore/ops_generate/common/op_proto.py +18 -1
- mindspore/ops_generate/common/template.py +102 -245
- mindspore/ops_generate/common/template_utils.py +212 -0
- mindspore/ops_generate/gen_custom_ops.py +69 -0
- mindspore/ops_generate/op_def/ops_def_cc_generator.py +78 -7
- mindspore/ops_generate/op_def_py/base_op_prim_py_generator.py +360 -0
- mindspore/ops_generate/op_def_py/custom_op_prim_py_generator.py +140 -0
- mindspore/ops_generate/op_def_py/op_def_py_generator.py +54 -7
- mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -312
- mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +74 -17
- mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +22 -5
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +0 -16
- mindspore/ops_generate/pyboost/op_template_parser.py +3 -2
- mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +21 -5
- mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +2 -2
- mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +30 -10
- mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +10 -3
- mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +1 -1
- mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +19 -9
- mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +71 -28
- mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +10 -9
- mindspore/ops_generate/pyboost/pyboost_utils.py +27 -16
- mindspore/ops_generate/resources/yaml_loader.py +13 -0
- mindspore/ops_generate/tensor_py_cc_generator.py +2 -2
- mindspore/parallel/_auto_parallel_context.py +5 -15
- mindspore/parallel/_cell_wrapper.py +1 -1
- mindspore/parallel/_parallel_serialization.py +4 -6
- mindspore/parallel/_ps_context.py +2 -2
- mindspore/parallel/_utils.py +34 -17
- mindspore/parallel/auto_parallel.py +23 -9
- mindspore/parallel/checkpoint_transform.py +20 -2
- mindspore/parallel/cluster/process_entity/_api.py +28 -33
- mindspore/parallel/cluster/process_entity/_utils.py +9 -5
- mindspore/parallel/cluster/run.py +5 -3
- mindspore/{experimental/llm_boost/ascend_native → parallel/distributed}/__init__.py +21 -22
- mindspore/parallel/distributed/distributed_data_parallel.py +393 -0
- mindspore/parallel/distributed/flatten_grad_buffer.py +295 -0
- mindspore/parallel/function/reshard_func.py +6 -5
- mindspore/parallel/nn/parallel_cell_wrapper.py +40 -3
- mindspore/parallel/nn/parallel_grad_reducer.py +0 -8
- mindspore/parallel/shard.py +7 -21
- mindspore/parallel/strategy.py +336 -0
- mindspore/parallel/transform_safetensors.py +127 -20
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +13 -9
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +1 -1
- mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +1 -1
- mindspore/profiler/common/constant.py +5 -0
- mindspore/profiler/common/file_manager.py +9 -0
- mindspore/profiler/common/msprof_cmd_tool.py +40 -4
- mindspore/profiler/common/path_manager.py +65 -24
- mindspore/profiler/common/profiler_context.py +27 -14
- mindspore/profiler/common/profiler_info.py +3 -3
- mindspore/profiler/common/profiler_meta_data.py +1 -0
- mindspore/profiler/common/profiler_op_analyse.py +10 -6
- mindspore/profiler/common/profiler_path_manager.py +13 -0
- mindspore/profiler/common/util.py +30 -3
- mindspore/profiler/dynamic_profiler.py +91 -46
- mindspore/profiler/envprofiler.py +30 -5
- mindspore/profiler/experimental_config.py +18 -2
- mindspore/profiler/platform/cpu_profiler.py +10 -4
- mindspore/profiler/platform/npu_profiler.py +34 -7
- mindspore/profiler/profiler.py +193 -145
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +2 -2
- mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
- mindspore/run_check/_check_version.py +108 -24
- mindspore/runtime/__init__.py +9 -6
- mindspore/runtime/executor.py +35 -0
- mindspore/runtime/memory.py +113 -0
- mindspore/runtime/thread_bind_core.py +1 -1
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/{experimental/llm_boost → tools}/__init__.py +5 -5
- mindspore/tools/data_dump.py +130 -0
- mindspore/tools/sdc_detect.py +91 -0
- mindspore/tools/stress_detect.py +63 -0
- mindspore/train/__init__.py +6 -6
- mindspore/train/_utils.py +8 -21
- mindspore/train/amp.py +6 -7
- mindspore/train/callback/_callback.py +2 -1
- mindspore/train/callback/_checkpoint.py +1 -17
- mindspore/train/callback/_flops_collector.py +10 -6
- mindspore/train/callback/_train_fault_tolerance.py +72 -25
- mindspore/train/data_sink.py +5 -9
- mindspore/train/dataset_helper.py +5 -5
- mindspore/train/model.py +41 -230
- mindspore/train/serialization.py +160 -401
- mindspore/train/train_thor/model_thor.py +2 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +6 -3
- mindspore/utils/dlpack.py +92 -0
- mindspore/utils/dryrun.py +1 -1
- mindspore/utils/runtime_execution_order_check.py +10 -0
- mindspore/utils/sdc_detect.py +14 -12
- mindspore/utils/stress_detect.py +43 -0
- mindspore/utils/utils.py +152 -16
- mindspore/version.py +1 -1
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/METADATA +3 -2
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/RECORD +330 -344
- mindspore/_extends/remote/kernel_build_server_ascend.py +0 -75
- mindspore/communication/_hccl_management.py +0 -297
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -207
- mindspore/experimental/llm_boost/ascend_native/llm_boost.py +0 -52
- mindspore/experimental/llm_boost/atb/__init__.py +0 -23
- mindspore/experimental/llm_boost/atb/boost_base.py +0 -385
- mindspore/experimental/llm_boost/atb/llama_boost.py +0 -137
- mindspore/experimental/llm_boost/atb/qwen_boost.py +0 -124
- mindspore/experimental/llm_boost/register.py +0 -130
- mindspore/experimental/llm_boost/utils.py +0 -31
- mindspore/include/OWNERS +0 -7
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/nn/optim/_dist_optimizer_registry.py +0 -111
- mindspore/nn/reinforcement/_batch_read_write.py +0 -142
- mindspore/nn/reinforcement/_tensors_queue.py +0 -152
- mindspore/nn/reinforcement/tensor_array.py +0 -145
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +0 -113
- mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +0 -96
- mindspore/ops/_op_impl/aicpu/sparse_cross.py +0 -42
- mindspore/ops/_op_impl/cpu/buffer_get.py +0 -28
- mindspore/ops/_op_impl/cpu/buffer_sample.py +0 -28
- mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +0 -42
- mindspore/ops/operations/_tensor_array.py +0 -359
- mindspore/ops/operations/rl_ops.py +0 -288
- mindspore/parallel/_offload_context.py +0 -275
- mindspore/parallel/_recovery_context.py +0 -115
- mindspore/parallel/_transformer/__init__.py +0 -35
- mindspore/parallel/_transformer/layers.py +0 -765
- mindspore/parallel/_transformer/loss.py +0 -251
- mindspore/parallel/_transformer/moe.py +0 -693
- mindspore/parallel/_transformer/op_parallel_config.py +0 -222
- mindspore/parallel/_transformer/transformer.py +0 -3124
- mindspore/parallel/mpi/_mpi_config.py +0 -116
- mindspore/profiler/common/validator/validate_path.py +0 -84
- mindspore/train/memory_profiling_pb2.py +0 -298
- mindspore/utils/hooks.py +0 -81
- /mindspore/common/{_auto_dynamic.py → dynamic_shape/_auto_dynamic.py} +0 -0
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/WHEEL +0 -0
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.7.0rc1.dist-info → mindspore-2.7.1.dist-info}/top_level.txt +0 -0
|
@@ -13,8 +13,7 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ============================================================================
|
|
15
15
|
"""PowerTransform Bijector"""
|
|
16
|
-
|
|
17
|
-
from mindspore.ops import functional as F
|
|
16
|
+
import mindspore.ops as ops
|
|
18
17
|
from ..distribution._utils.utils import check_greater_equal_zero
|
|
19
18
|
from ..distribution._utils.custom_ops import exp_generic, log_generic
|
|
20
19
|
from .bijector import Bijector
|
|
@@ -76,16 +75,16 @@ class PowerTransform(Bijector):
|
|
|
76
75
|
self._power = self._add_parameter(power, 'power')
|
|
77
76
|
check_greater_equal_zero(self._power, 'Power')
|
|
78
77
|
|
|
79
|
-
self.pow =
|
|
80
|
-
self.dtypeop =
|
|
81
|
-
self.cast =
|
|
82
|
-
self.equal_base =
|
|
78
|
+
self.pow = ops.Pow()
|
|
79
|
+
self.dtypeop = ops.DType()
|
|
80
|
+
self.cast = ops.Cast()
|
|
81
|
+
self.equal_base = ops.Equal()
|
|
83
82
|
self.exp = exp_generic
|
|
84
|
-
self.expm1 =
|
|
83
|
+
self.expm1 = ops.Expm1()
|
|
85
84
|
self.log = log_generic
|
|
86
|
-
self.log1p =
|
|
87
|
-
self.select_base =
|
|
88
|
-
self.shape =
|
|
85
|
+
self.log1p = ops.Log1p()
|
|
86
|
+
self.select_base = ops.Select()
|
|
87
|
+
self.shape = ops.Shape()
|
|
89
88
|
|
|
90
89
|
@property
|
|
91
90
|
def power(self):
|
|
@@ -113,17 +112,17 @@ class PowerTransform(Bijector):
|
|
|
113
112
|
power_local = self.cast_param_by_value(x, self.power)
|
|
114
113
|
|
|
115
114
|
# broad cast the value of x and power
|
|
116
|
-
ones =
|
|
117
|
-
|
|
115
|
+
ones = ops.fill(self.dtypeop(power_local), self.shape(x + power_local),
|
|
116
|
+
1.)
|
|
118
117
|
power_local = power_local * ones
|
|
119
118
|
x = x * ones
|
|
120
119
|
safe_power = self.select_base(
|
|
121
120
|
self.equal_base(power_local,
|
|
122
|
-
|
|
121
|
+
ops.ZerosLike()(power_local)), ones, power_local)
|
|
123
122
|
|
|
124
123
|
forward_v = self.select_base(
|
|
125
124
|
self.equal_base(power_local,
|
|
126
|
-
|
|
125
|
+
ops.ZerosLike()(power_local)), self.exp(x),
|
|
127
126
|
self.exp(self.log1p(x * safe_power) / safe_power))
|
|
128
127
|
return forward_v
|
|
129
128
|
|
|
@@ -135,17 +134,17 @@ class PowerTransform(Bijector):
|
|
|
135
134
|
power_local = self.cast_param_by_value(y, self.power)
|
|
136
135
|
|
|
137
136
|
# broad cast the value of x and power
|
|
138
|
-
ones =
|
|
139
|
-
|
|
137
|
+
ones = ops.fill(self.dtypeop(power_local), self.shape(y + power_local),
|
|
138
|
+
1.)
|
|
140
139
|
power_local = power_local * ones
|
|
141
140
|
y = y * ones
|
|
142
141
|
safe_power = self.select_base(
|
|
143
142
|
self.equal_base(power_local,
|
|
144
|
-
|
|
143
|
+
ops.ZerosLike()(power_local)), ones, power_local)
|
|
145
144
|
|
|
146
145
|
inverse_v = self.select_base(
|
|
147
146
|
self.equal_base(power_local,
|
|
148
|
-
|
|
147
|
+
ops.ZerosLike()(power_local)), self.log(y),
|
|
149
148
|
self.expm1(self.log(y) * safe_power) / safe_power)
|
|
150
149
|
|
|
151
150
|
return inverse_v
|
|
@@ -166,14 +165,14 @@ class PowerTransform(Bijector):
|
|
|
166
165
|
power_local = self.cast_param_by_value(x, self.power)
|
|
167
166
|
|
|
168
167
|
# broad cast the value of x and power
|
|
169
|
-
ones =
|
|
170
|
-
|
|
168
|
+
ones = ops.fill(self.dtypeop(power_local), self.shape(x + power_local),
|
|
169
|
+
1.)
|
|
171
170
|
power_local = power_local * ones
|
|
172
171
|
x = x * ones
|
|
173
172
|
|
|
174
173
|
forward_log_j = self.select_base(
|
|
175
174
|
self.equal_base(power_local,
|
|
176
|
-
|
|
175
|
+
ops.ZerosLike()(power_local)), x,
|
|
177
176
|
(1. / power_local - 1) * self.log1p(x * power_local))
|
|
178
177
|
|
|
179
178
|
return forward_log_j
|
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ============================================================================
|
|
15
15
|
"""Scalar Affine Bijector"""
|
|
16
|
-
|
|
16
|
+
import mindspore.ops as ops
|
|
17
17
|
from ..distribution._utils.custom_ops import log_generic
|
|
18
18
|
from .bijector import Bijector
|
|
19
19
|
|
|
@@ -86,10 +86,10 @@ class ScalarAffine(Bijector):
|
|
|
86
86
|
self._scale = self._add_parameter(scale, 'scale')
|
|
87
87
|
self._shift = self._add_parameter(shift, 'shift')
|
|
88
88
|
|
|
89
|
-
self.abs =
|
|
90
|
-
self.oneslike =
|
|
91
|
-
self.dtypeop =
|
|
92
|
-
self.cast =
|
|
89
|
+
self.abs = ops.Abs()
|
|
90
|
+
self.oneslike = ops.OnesLike()
|
|
91
|
+
self.dtypeop = ops.DType()
|
|
92
|
+
self.cast = ops.Cast()
|
|
93
93
|
self.log = log_generic
|
|
94
94
|
|
|
95
95
|
@property
|
|
@@ -14,8 +14,7 @@
|
|
|
14
14
|
# ============================================================================
|
|
15
15
|
"""Softplus Bijector"""
|
|
16
16
|
import numpy as np
|
|
17
|
-
|
|
18
|
-
from mindspore.ops import functional as F
|
|
17
|
+
import mindspore.ops as ops
|
|
19
18
|
from mindspore.nn.layer.activation import LogSigmoid
|
|
20
19
|
from ..distribution._utils.custom_ops import exp_generic, log_generic
|
|
21
20
|
from .bijector import Bijector
|
|
@@ -82,17 +81,17 @@ class Softplus(Bijector):
|
|
|
82
81
|
|
|
83
82
|
self.exp = exp_generic
|
|
84
83
|
self.log = log_generic
|
|
85
|
-
self.expm1 =
|
|
86
|
-
self.abs =
|
|
87
|
-
self.dtypeop =
|
|
88
|
-
self.cast =
|
|
89
|
-
self.greater =
|
|
90
|
-
self.less =
|
|
84
|
+
self.expm1 = ops.Expm1()
|
|
85
|
+
self.abs = ops.Abs()
|
|
86
|
+
self.dtypeop = ops.DType()
|
|
87
|
+
self.cast = ops.Cast()
|
|
88
|
+
self.greater = ops.Greater()
|
|
89
|
+
self.less = ops.Less()
|
|
91
90
|
self.log_sigmoid = LogSigmoid()
|
|
92
|
-
self.logicalor =
|
|
93
|
-
self.select =
|
|
94
|
-
self.shape =
|
|
95
|
-
self.sigmoid =
|
|
91
|
+
self.logicalor = ops.LogicalOr()
|
|
92
|
+
self.select = ops.Select()
|
|
93
|
+
self.shape = ops.Shape()
|
|
94
|
+
self.sigmoid = ops.Sigmoid()
|
|
96
95
|
self.softplus = self._softplus
|
|
97
96
|
self.inverse_softplus = self._inverse_softplus
|
|
98
97
|
|
|
@@ -104,7 +103,7 @@ class Softplus(Bijector):
|
|
|
104
103
|
too_large = self.greater(x, -self.threshold)
|
|
105
104
|
too_small_value = self.exp(x)
|
|
106
105
|
too_large_value = x
|
|
107
|
-
ones =
|
|
106
|
+
ones = ops.fill(self.dtypeop(x), self.shape(x), 1.0)
|
|
108
107
|
too_small_or_too_large = self.logicalor(too_small, too_large)
|
|
109
108
|
x = self.select(too_small_or_too_large, ones, x)
|
|
110
109
|
y = self.log(self.exp(x) + 1.0)
|
|
@@ -120,7 +119,7 @@ class Softplus(Bijector):
|
|
|
120
119
|
too_large = self.greater(x, (-1) * self.threshold)
|
|
121
120
|
too_small_value = self.log(x)
|
|
122
121
|
too_large_value = x
|
|
123
|
-
ones =
|
|
122
|
+
ones = ops.fill(self.dtypeop(x), self.shape(x), 1.0)
|
|
124
123
|
too_small_or_too_large = self.logicalor(too_small, too_large)
|
|
125
124
|
x = self.select(too_small_or_too_large, ones, x)
|
|
126
125
|
y = x + self.log(self.abs(self.expm1((-1)*x)))
|
|
@@ -298,7 +298,7 @@ class CheckTuple(PrimitiveWithInfer):
|
|
|
298
298
|
# The op is not used in a cell
|
|
299
299
|
if isinstance(x, tuple):
|
|
300
300
|
return x
|
|
301
|
-
if context.get_context("mode") ==
|
|
301
|
+
if context.get_context("mode") == context.GRAPH_MODE:
|
|
302
302
|
return x["value"]
|
|
303
303
|
raise TypeError(f"For {name}, input type must be a tuple.")
|
|
304
304
|
|
|
@@ -349,7 +349,7 @@ def set_param_type(args, hint_type):
|
|
|
349
349
|
for name, arg in args.items():
|
|
350
350
|
if hasattr(arg, 'dtype'):
|
|
351
351
|
if isinstance(arg, np.ndarray):
|
|
352
|
-
cur_dtype = mstype.
|
|
352
|
+
cur_dtype = mstype._pytype_to_dtype(arg.dtype) # pylint:disable=protected-access
|
|
353
353
|
else:
|
|
354
354
|
cur_dtype = arg.dtype
|
|
355
355
|
if common_dtype is None:
|
|
@@ -23,7 +23,7 @@ from types import FunctionType, MethodType
|
|
|
23
23
|
|
|
24
24
|
from mindspore import log as logger
|
|
25
25
|
from mindspore.parallel._utils import _get_device_num, _get_gradients_mean,\
|
|
26
|
-
_get_parallel_mode, _get_enable_parallel_optimizer
|
|
26
|
+
_get_parallel_mode, _get_enable_parallel_optimizer
|
|
27
27
|
from mindspore.context import ParallelMode
|
|
28
28
|
from mindspore import _checkparam as validator
|
|
29
29
|
from mindspore import ops, nn
|
|
@@ -397,8 +397,7 @@ class TrainOneStepCell(Cell):
|
|
|
397
397
|
self.reducer_flag = False
|
|
398
398
|
self.grad_reducer = nn.Identity()
|
|
399
399
|
self.parallel_mode = _get_parallel_mode()
|
|
400
|
-
self.reducer_flag = self.parallel_mode in (ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL)
|
|
401
|
-
_is_pynative_parallel()
|
|
400
|
+
self.reducer_flag = self.parallel_mode in (ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL)
|
|
402
401
|
if self.reducer_flag:
|
|
403
402
|
self.mean = _get_gradients_mean()
|
|
404
403
|
self.degree = _get_device_num()
|
|
@@ -860,7 +859,7 @@ class _BroadCastCell(Cell):
|
|
|
860
859
|
from mindspore import context
|
|
861
860
|
self.map_ = ops.Map()
|
|
862
861
|
self.params = tuple(params)
|
|
863
|
-
if context.get_context("device_target") == "Ascend"
|
|
862
|
+
if context.get_context("device_target") == "Ascend":
|
|
864
863
|
rank_list = [id for id in range(0, get_group_size())]
|
|
865
864
|
create_group("BroadcastWorldGroup", rank_list)
|
|
866
865
|
self.broadcast = ops.Broadcast(0, group="BroadcastWorldGroup")
|
|
@@ -889,6 +888,8 @@ class PipelineCell(Cell):
|
|
|
889
888
|
micro_size (int): MicroBatch size.
|
|
890
889
|
stage_config (dict, optional): The stage configuration for each cell's execution in pipeline parallel.
|
|
891
890
|
Default ``None``.
|
|
891
|
+
segment_config (dict, optional): The segment configuration for each cell's execution in pipeline parallel.
|
|
892
|
+
Default ``None``.
|
|
892
893
|
|
|
893
894
|
Supported Platforms:
|
|
894
895
|
``Ascend`` ``GPU``
|
|
@@ -900,7 +901,7 @@ class PipelineCell(Cell):
|
|
|
900
901
|
>>> net = LeNet5()
|
|
901
902
|
>>> net = nn.PipelineCell(net, 4)
|
|
902
903
|
"""
|
|
903
|
-
def __init__(self, network, micro_size, stage_config=None):
|
|
904
|
+
def __init__(self, network, micro_size, stage_config=None, segment_config=None):
|
|
904
905
|
super(PipelineCell, self).__init__(auto_prefix=False)
|
|
905
906
|
self.network = network
|
|
906
907
|
self.micro_inputs = nn.CellList()
|
|
@@ -956,6 +957,39 @@ class PipelineCell(Cell):
|
|
|
956
957
|
print(cell_name)
|
|
957
958
|
raise KeyError("For 'PipelineCell', the argument 'stage_config' : {} is not "
|
|
958
959
|
"found in 'network' : {}".format(config_dict, network))
|
|
960
|
+
if segment_config is None:
|
|
961
|
+
return
|
|
962
|
+
self._config_segment(segment_config)
|
|
963
|
+
|
|
964
|
+
|
|
965
|
+
def _config_segment(self, segment_config=None):
|
|
966
|
+
"""
|
|
967
|
+
Config segment num for cell.
|
|
968
|
+
"""
|
|
969
|
+
config_dict = segment_config.copy()
|
|
970
|
+
for cell_name, cell in self.network.cells_and_names():
|
|
971
|
+
if cell_name in segment_config:
|
|
972
|
+
setattr(cell, "pipeline_segment", segment_config[cell_name])
|
|
973
|
+
del config_dict[cell_name]
|
|
974
|
+
if str(self.network) in segment_config:
|
|
975
|
+
setattr(self.network, "pipeline_segment", segment_config[str(self.network)])
|
|
976
|
+
del config_dict[str(self.network)]
|
|
977
|
+
# if there are any config elements left, print them
|
|
978
|
+
if config_dict:
|
|
979
|
+
for config_cell_name, config_segment_num in config_dict.items():
|
|
980
|
+
logger.error("pipeline_cell segment_config set pipeline_segment fail!")
|
|
981
|
+
logger.warning("config cell name:" + str(config_cell_name) +
|
|
982
|
+
" config segment num:" + str(config_segment_num))
|
|
983
|
+
logger.warning("network:" + str(self.network))
|
|
984
|
+
logger.warning("cell name available:")
|
|
985
|
+
for cell_name, _ in self.network.cells_and_names():
|
|
986
|
+
logger.warning(cell_name)
|
|
987
|
+
raise KeyError("For 'PipelineCell', the argument 'segment_config' : {} is not "
|
|
988
|
+
"found in 'network' : {}".format(config_dict, self.network))
|
|
989
|
+
|
|
990
|
+
|
|
991
|
+
def shard(self, in_strategy, out_strategy=None, parameter_plan=None, device="Ascend", level=0):
|
|
992
|
+
raise ValueError("For 'PipelineCell', no 'shard' on 'PipelineCell' is allowed.")
|
|
959
993
|
|
|
960
994
|
def construct(self, *inputs):
|
|
961
995
|
ret = None
|
|
@@ -140,34 +140,6 @@ def _tensors_allreduce_post(degree, mean, allreduce_filter, grad):
|
|
|
140
140
|
return grad
|
|
141
141
|
|
|
142
142
|
|
|
143
|
-
@reduce_opt.register("Tensor", "Bool", "Function", "Function", "Bool", "Tensor", "Bool")
|
|
144
|
-
def _tensors_allreduce_ps(degree, mean, allgather, allreduce, allreduce_filter, grad, ps_parameter):
|
|
145
|
-
"""
|
|
146
|
-
Apply allreduce on gradient.
|
|
147
|
-
|
|
148
|
-
Args:
|
|
149
|
-
degree (int): The mean coefficient.
|
|
150
|
-
mean (bool): When mean is true, the mean coefficient (degree) would apply on gradients.
|
|
151
|
-
allgather (Primitive): The communication operator for sparse gradients.
|
|
152
|
-
allreduce (Primitive): The communication operator for gradients.
|
|
153
|
-
allreduce_filter (bool): When it is true, allreduce would apply.
|
|
154
|
-
grad (Tensor): The gradient tensor before operation.
|
|
155
|
-
ps_parameter (bool): Use parameter server or not.
|
|
156
|
-
|
|
157
|
-
Returns:
|
|
158
|
-
Tensor, the gradient tensor after operation.
|
|
159
|
-
"""
|
|
160
|
-
if ps_parameter:
|
|
161
|
-
return grad
|
|
162
|
-
|
|
163
|
-
if allreduce_filter:
|
|
164
|
-
grad = allreduce(grad)
|
|
165
|
-
if mean:
|
|
166
|
-
grad = ops.tensor_mul(grad, ops.cast(degree, ops.dtype(grad)))
|
|
167
|
-
return grad
|
|
168
|
-
return grad
|
|
169
|
-
|
|
170
|
-
|
|
171
143
|
@reduce_opt.register("Tensor", "Bool", "Function", "Function", "Bool", "RowTensor")
|
|
172
144
|
def _tensors_allreduce_with_sparse(degree, mean, allgather, allreduce, allreduce_filter, grad):
|
|
173
145
|
"""
|
|
@@ -193,37 +165,6 @@ def _tensors_allreduce_with_sparse(degree, mean, allgather, allreduce, allreduce
|
|
|
193
165
|
grad = RowTensorInner(indices, dout, grad.dense_shape)
|
|
194
166
|
return grad
|
|
195
167
|
|
|
196
|
-
|
|
197
|
-
@reduce_opt.register("Tensor", "Bool", "Function", "Function", "Bool", "RowTensor", "Bool")
|
|
198
|
-
def _tensors_allreduce_with_sparse_ps(degree, mean, allgather, allreduce, allreduce_filter, grad, ps_parameter):
|
|
199
|
-
"""
|
|
200
|
-
Apply allgather on gradient instead of allreduce for sparse feature.
|
|
201
|
-
Allgather is a communication operation used for distributed deep learning.
|
|
202
|
-
|
|
203
|
-
Args:
|
|
204
|
-
degree (int): The mean coefficient.
|
|
205
|
-
mean (bool): When mean is true, the mean coefficient (degree) would apply on gradients.
|
|
206
|
-
allgather (Primitive): The communication operator for sparse gradients.
|
|
207
|
-
allreduce (Primitive): The communication operator for gradients.
|
|
208
|
-
allreduce_filter (bool): When it is true, allgather would apply.
|
|
209
|
-
grad (tuple): The indices, gradient tensor and tensor_shape before operation.
|
|
210
|
-
ps_parameter (bool): Use parameter server or not.
|
|
211
|
-
|
|
212
|
-
Returns:
|
|
213
|
-
RowTensor, the gradient after operation.
|
|
214
|
-
"""
|
|
215
|
-
if ps_parameter:
|
|
216
|
-
return grad
|
|
217
|
-
|
|
218
|
-
if allreduce_filter:
|
|
219
|
-
indices = allgather(grad.indices)
|
|
220
|
-
dout = allgather(grad.values)
|
|
221
|
-
if mean:
|
|
222
|
-
dout = ops.tensor_mul(dout, ops.cast(degree, ops.dtype(dout)))
|
|
223
|
-
grad = RowTensorInner(indices, dout, grad.dense_shape)
|
|
224
|
-
return grad
|
|
225
|
-
|
|
226
|
-
|
|
227
168
|
_get_datatype = ops.MultitypeFuncGraph("_get_datatype")
|
|
228
169
|
|
|
229
170
|
|
|
@@ -394,7 +335,6 @@ class DistributedGradReducer(Cell):
|
|
|
394
335
|
|
|
395
336
|
def __init__(self, parameters, mean=None, degree=None, fusion_type=1, group=GlobalComm.WORLD_COMM_GROUP):
|
|
396
337
|
super(DistributedGradReducer, self).__init__(auto_prefix=False)
|
|
397
|
-
self._check_parallel_mode()
|
|
398
338
|
self.map_ = ops.Map()
|
|
399
339
|
self.mean = mean
|
|
400
340
|
if mean is None:
|
|
@@ -424,9 +364,6 @@ class DistributedGradReducer(Cell):
|
|
|
424
364
|
self.split_fusion = False
|
|
425
365
|
self.allreduce = AllReduce('sum', group).add_prim_attr('fusion', fusion_type)
|
|
426
366
|
self.allgather = AllGather(group)
|
|
427
|
-
ps_filter = lambda x: x.is_param_ps
|
|
428
|
-
self.ps_parameters = tuple(ps_filter(x) for x in parameters)
|
|
429
|
-
self.enable_parameter_server = any(self.ps_parameters)
|
|
430
367
|
self.mode = context.get_context("mode")
|
|
431
368
|
self.enable_tuple_broaden = True
|
|
432
369
|
|
|
@@ -447,29 +384,14 @@ class DistributedGradReducer(Cell):
|
|
|
447
384
|
grads = self.map_(ops.partial(_cast_datatype, mstype.float32), grads)
|
|
448
385
|
|
|
449
386
|
if self.split_fusion:
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
self.op_list, self.allreduce_filter, grads, self.ps_parameters)
|
|
453
|
-
else:
|
|
454
|
-
new_grad = self.map_(ops.partial(reduce_opt, self.degree, self.mean, self.allgather),
|
|
455
|
-
self.op_list, self.allreduce_filter, grads)
|
|
387
|
+
new_grad = self.map_(ops.partial(reduce_opt, self.degree, self.mean, self.allgather),
|
|
388
|
+
self.op_list, self.allreduce_filter, grads)
|
|
456
389
|
else:
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
self.allreduce), self.allreduce_filter, grads, self.ps_parameters)
|
|
460
|
-
else:
|
|
461
|
-
new_grad = self.map_(ops.partial(reduce_opt, self.degree, self.mean, self.allgather,
|
|
462
|
-
self.allreduce), self.allreduce_filter, grads)
|
|
390
|
+
new_grad = self.map_(ops.partial(reduce_opt, self.degree, self.mean, self.allgather,
|
|
391
|
+
self.allreduce), self.allreduce_filter, grads)
|
|
463
392
|
new_grad = self.map_(ops.partial(_cast_datatype), datatypes, new_grad)
|
|
464
393
|
return new_grad
|
|
465
394
|
|
|
466
|
-
def _check_parallel_mode(self):
|
|
467
|
-
"""check parallel mode"""
|
|
468
|
-
parallel_mode = context.get_auto_parallel_context('parallel_mode')
|
|
469
|
-
if context.get_context('mode') == context.GRAPH_MODE and parallel_mode in (
|
|
470
|
-
context.ParallelMode.SEMI_AUTO_PARALLEL, context.ParallelMode.AUTO_PARALLEL):
|
|
471
|
-
raise RuntimeError("{} can not use DistributedGradReducer in graph mode".format(parallel_mode))
|
|
472
|
-
|
|
473
395
|
|
|
474
396
|
grad_scale = ops.MultitypeFuncGraph("grad_scale")
|
|
475
397
|
shard_grad_scale = ops.MultitypeFuncGraph("shard_grad_scale")
|
|
@@ -587,7 +509,6 @@ class PipelineGradReducer(Cell):
|
|
|
587
509
|
"""
|
|
588
510
|
def __init__(self, parameters, scale_sense=1.0, opt_shard=None):
|
|
589
511
|
super(PipelineGradReducer, self).__init__(auto_prefix=False)
|
|
590
|
-
self._check_mode()
|
|
591
512
|
self.accu_grads = parameters.clone(prefix="accu_grads", init="zeros")
|
|
592
513
|
self.grad_reducer = Identity()
|
|
593
514
|
self.degree = Tensor(1, mstype.float32)
|
|
@@ -609,9 +530,3 @@ class PipelineGradReducer(Cell):
|
|
|
609
530
|
accu_grads = self.grad_reducer(self.accu_grads)
|
|
610
531
|
new_grads = self.hyper_map(ops.partial(grad_scale, self.scale_sense * self.degree), grads, accu_grads)
|
|
611
532
|
return new_grads
|
|
612
|
-
|
|
613
|
-
def _check_mode(self):
|
|
614
|
-
"""check parallel mode"""
|
|
615
|
-
mode = context.get_context('mode')
|
|
616
|
-
if mode != context.GRAPH_MODE:
|
|
617
|
-
raise RuntimeError(f"PipelineGradReducer only support graph mode, but get {mode}")
|
|
@@ -127,7 +127,7 @@ def asarray_const(a, dtype=None):
|
|
|
127
127
|
# If dtype is not specified, we keep consistent with numpy decision
|
|
128
128
|
# only exceptions are: we use int/float32
|
|
129
129
|
if dtype is None:
|
|
130
|
-
dtype = mstype.
|
|
130
|
+
dtype = mstype._pytype_to_dtype(a.dtype) # pylint:disable=protected-access
|
|
131
131
|
if dtype == mstype.float64:
|
|
132
132
|
dtype = mstype.float32
|
|
133
133
|
elif dtype == mstype.int64:
|
|
@@ -138,7 +138,7 @@ def asarray_const(a, dtype=None):
|
|
|
138
138
|
if isinstance(a, onp.ndarray) and dtype is None:
|
|
139
139
|
if a.dtype is onp.dtype('object'):
|
|
140
140
|
raise TypeError(f"For Tensor conversion, the input_data is {a} that contains unsupported element.")
|
|
141
|
-
dtype = mstype.
|
|
141
|
+
dtype = mstype._pytype_to_dtype(a.dtype) # pylint:disable=protected-access
|
|
142
142
|
a = Tensor.from_numpy(a)
|
|
143
143
|
|
|
144
144
|
return Tensor(a, dtype=dtype)
|
|
@@ -2622,7 +2622,7 @@ def pad(arr, pad_width, mode="constant", stat_length=None, constant_values=0,
|
|
|
2622
2622
|
unique pad widths for each axis. ``((before, after),)`` yields same
|
|
2623
2623
|
before and after pad for each axis. ``(pad,)`` or int is a shortcut
|
|
2624
2624
|
for ``before = after = pad width`` for all axes.
|
|
2625
|
-
mode (
|
|
2625
|
+
mode (str, optional):
|
|
2626
2626
|
One of the following string values:
|
|
2627
2627
|
|
|
2628
2628
|
- constant (default): Pads with a constant value.
|
|
@@ -2660,7 +2660,7 @@ def pad(arr, pad_width, mode="constant", stat_length=None, constant_values=0,
|
|
|
2660
2660
|
unique end values for each axis. ``((before, after),)`` yields same before
|
|
2661
2661
|
and after end values for each axis. ``(constant,)`` or ``constant``
|
|
2662
2662
|
is a shortcut for ``before = after = constant`` for all axes. Default: ``0`` .
|
|
2663
|
-
reflect_type(
|
|
2663
|
+
reflect_type(str, optional) can choose between \'even\' and \'odd\'. Used in
|
|
2664
2664
|
\'reflect\', and \'symmetric\'. The \'even\' style is the default with an
|
|
2665
2665
|
unaltered reflection around the edge value. For the \'odd\' style, the extended
|
|
2666
2666
|
part of the `arr` is created by subtracting the reflected values from two times
|
mindspore/numpy/fft.py
CHANGED
|
@@ -185,7 +185,7 @@ def rfft(a, n=None, axis=-1, norm=None):
|
|
|
185
185
|
Default: ``None``.
|
|
186
186
|
axis (int, optional): Axis over which to compute the `rfft`.
|
|
187
187
|
Default: ``-1``, which means the last axis of `a` is used.
|
|
188
|
-
norm (
|
|
188
|
+
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"``.
|
|
189
189
|
Three modes are defined as,
|
|
190
190
|
|
|
191
191
|
- ``"backward"`` (no normalization).
|
|
@@ -224,7 +224,7 @@ def irfft(a, n=None, axis=-1, norm=None):
|
|
|
224
224
|
Default: ``None``.
|
|
225
225
|
axis (int, optional): Axis over which to compute the `irfft`.
|
|
226
226
|
Default: ``-1``, which means the last axis of `a` is used.
|
|
227
|
-
norm (
|
|
227
|
+
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"``.
|
|
228
228
|
Three modes are defined as,
|
|
229
229
|
|
|
230
230
|
- ``"backward"`` (normalize by :math:`1/n`).
|
|
@@ -266,7 +266,7 @@ def fft2(a, s=None, axes=(-2, -1), norm=None):
|
|
|
266
266
|
Default: ``None`` , which does not need to process `a`.
|
|
267
267
|
axes (tuple[int], optional): The dimension along which to take the one dimensional `fft2`.
|
|
268
268
|
Default: ``(-2, -1)`` , which means transform the last two dimension of `a`.
|
|
269
|
-
norm (
|
|
269
|
+
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
|
|
270
270
|
Three modes are defined as, where :math: `n = prod(s)`
|
|
271
271
|
|
|
272
272
|
- ``"backward"`` (no normalization).
|
|
@@ -361,7 +361,7 @@ def fftn(a, s=None, axes=None, norm=None):
|
|
|
361
361
|
axes (tuple[int], optional): The dimension along which to take the one dimensional `fftn`.
|
|
362
362
|
Default: ``None`` , which means transform the all dimension of `a`,
|
|
363
363
|
or the last `len(s)` dimensions if s is given.
|
|
364
|
-
norm (
|
|
364
|
+
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
|
|
365
365
|
Three modes are defined as, where :math: `n = prod(s)`
|
|
366
366
|
|
|
367
367
|
- ``"backward"`` (no normalization).
|
|
@@ -409,7 +409,7 @@ def ifftn(a, s=None, axes=None, norm=None):
|
|
|
409
409
|
axes (tuple[int], optional): The dimension along which to take the one dimensional `ifftn`.
|
|
410
410
|
Default: ``None`` , which means transform the all dimension of `a`,
|
|
411
411
|
or the last `len(s)` dimensions if s is given.
|
|
412
|
-
norm (
|
|
412
|
+
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
|
|
413
413
|
Three modes are defined as, where :math: `n = prod(s)`
|
|
414
414
|
|
|
415
415
|
- ``"backward"`` (normalize by :math:`1/n`).
|
|
@@ -457,7 +457,7 @@ def rfft2(a, s=None, axes=(-2, -1), norm=None):
|
|
|
457
457
|
Default: ``None`` , which does not need to process `a`.
|
|
458
458
|
axes (tuple[int], optional): The dimension along which to take the one dimensional `rfft2`.
|
|
459
459
|
Default: ``(-2, -1)`` , which means transform the last two dimension of `a`.
|
|
460
|
-
norm (
|
|
460
|
+
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
|
|
461
461
|
Three modes are defined as, where :math: `n = prod(s)`
|
|
462
462
|
|
|
463
463
|
- ``"backward"`` (no normalization).
|
|
@@ -502,7 +502,7 @@ def irfft2(a, s=None, axes=(-2, -1), norm=None):
|
|
|
502
502
|
Default: ``None`` , the axes[-1] of the `a` will be zero-padded to :math:`2*(a.shape[axes[-1]]-1)`.
|
|
503
503
|
axes (tuple[int], optional): The dimension along which to take the one dimensional `irfft2`.
|
|
504
504
|
Default: ``(-2, -1)`` , which means transform the last two dimension of `a`.
|
|
505
|
-
norm (
|
|
505
|
+
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
|
|
506
506
|
Three modes are defined as, where :math: `n = prod(s)`
|
|
507
507
|
|
|
508
508
|
- ``"backward"`` (normalize by :math:`1/n`).
|
|
@@ -551,7 +551,7 @@ def rfftn(a, s=None, axes=None, norm=None):
|
|
|
551
551
|
axes (tuple[int], optional): The dimension along which to take the one dimensional `rfftn`.
|
|
552
552
|
Default: ``None`` , which means transform the all dimension of `a`,
|
|
553
553
|
or the last `len(s)` dimensions if s is given.
|
|
554
|
-
norm (
|
|
554
|
+
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
|
|
555
555
|
Three modes are defined as, where :math: `n = prod(s)`
|
|
556
556
|
|
|
557
557
|
- ``"backward"`` (no normalization).
|
|
@@ -599,7 +599,7 @@ def irfftn(a, s=None, axes=None, norm=None):
|
|
|
599
599
|
axes (tuple[int], optional): The dimension along which to take the one dimensional `irfftn`.
|
|
600
600
|
Default: ``None`` , which means transform the all dimension of `a`,
|
|
601
601
|
or the last `len(s)` dimensions if s is given.
|
|
602
|
-
norm (
|
|
602
|
+
norm (str, optional): Normalization mode. Default: ``None`` that means ``"backward"`` .
|
|
603
603
|
Three modes are defined as, where :math: `n = prod(s)`
|
|
604
604
|
|
|
605
605
|
- ``"backward"`` (normalize by :math:`1/n`).
|
mindspore/numpy/utils_const.py
CHANGED
|
@@ -70,7 +70,7 @@ def _check_dtype(dtype):
|
|
|
70
70
|
elif dtype is float:
|
|
71
71
|
dtype = mstype.float32
|
|
72
72
|
else:
|
|
73
|
-
dtype = mstype.
|
|
73
|
+
dtype = mstype._pytype_to_dtype(dtype) # pylint:disable=protected-access
|
|
74
74
|
if dtype not in dtype_tuple:
|
|
75
75
|
raise TypeError(f"only {all_types} are allowed for dtype, but got {type(dtype)}")
|
|
76
76
|
return dtype
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright
|
|
1
|
+
# Copyright 2025 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -12,13 +12,10 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
# ============================================================================
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
"""
|
|
15
|
+
"""onnx module."""
|
|
16
|
+
|
|
18
17
|
from __future__ import absolute_import
|
|
19
18
|
|
|
20
|
-
from
|
|
19
|
+
from .onnx_export import export
|
|
21
20
|
|
|
22
|
-
__all__ = [
|
|
23
|
-
"TensorArray",
|
|
24
|
-
]
|
|
21
|
+
__all__ = ["export"]
|