mindspore 2.3.0rc1__cp39-cp39-manylinux1_x86_64.whl → 2.3.0rc2__cp39-cp39-manylinux1_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +1 -1
- mindspore/_akg/akg/utils/tbe_codegen_utils.py +13 -3
- mindspore/_c_dataengine.cpython-39-x86_64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-39-x86_64-linux-gnu.so +0 -0
- mindspore/_checkparam.py +20 -0
- mindspore/_extends/parse/parser.py +1 -1
- mindspore/_extends/parse/standard_method.py +6 -5
- mindspore/_mindspore_offline_debug.cpython-39-x86_64-linux-gnu.so +0 -0
- mindspore/amp.py +5 -5
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/boost/boost_cell_wrapper.py +1 -1
- mindspore/boost/group_loss_scale_manager.py +1 -1
- mindspore/common/__init__.py +4 -2
- mindspore/common/_register_for_recompute.py +48 -0
- mindspore/common/_stub_tensor.py +1 -0
- mindspore/common/api.py +56 -4
- mindspore/common/dtype.py +5 -3
- mindspore/common/dump.py +2 -2
- mindspore/common/hook_handle.py +51 -4
- mindspore/common/initializer.py +1 -1
- mindspore/common/jit_config.py +17 -6
- mindspore/common/parameter.py +7 -2
- mindspore/common/recompute.py +247 -0
- mindspore/common/sparse_tensor.py +2 -2
- mindspore/common/symbol.py +1 -1
- mindspore/common/tensor.py +74 -36
- mindspore/communication/__init__.py +3 -3
- mindspore/communication/management.py +30 -30
- mindspore/context.py +28 -15
- mindspore/dataset/__init__.py +5 -5
- mindspore/dataset/audio/__init__.py +2 -2
- mindspore/dataset/audio/transforms.py +51 -51
- mindspore/dataset/callback/ds_callback.py +2 -2
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +3 -3
- mindspore/dataset/engine/datasets_audio.py +14 -14
- mindspore/dataset/engine/datasets_standard_format.py +3 -3
- mindspore/dataset/engine/datasets_text.py +38 -38
- mindspore/dataset/engine/datasets_user_defined.py +3 -3
- mindspore/dataset/engine/datasets_vision.py +68 -68
- mindspore/dataset/text/__init__.py +3 -3
- mindspore/dataset/text/transforms.py +26 -26
- mindspore/dataset/transforms/__init__.py +1 -1
- mindspore/dataset/vision/__init__.py +3 -3
- mindspore/dataset/vision/transforms.py +92 -92
- mindspore/dataset/vision/utils.py +1 -1
- mindspore/experimental/optim/adadelta.py +2 -2
- mindspore/experimental/optim/adagrad.py +2 -2
- mindspore/experimental/optim/adam.py +2 -2
- mindspore/experimental/optim/adamax.py +2 -2
- mindspore/experimental/optim/adamw.py +2 -2
- mindspore/experimental/optim/asgd.py +2 -2
- mindspore/experimental/optim/lr_scheduler.py +24 -20
- mindspore/experimental/optim/nadam.py +2 -2
- mindspore/experimental/optim/optimizer.py +1 -1
- mindspore/experimental/optim/radam.py +2 -2
- mindspore/experimental/optim/rmsprop.py +2 -2
- mindspore/experimental/optim/rprop.py +2 -2
- mindspore/experimental/optim/sgd.py +2 -2
- mindspore/hal/stream.py +2 -0
- mindspore/include/mindapi/base/types.h +5 -0
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6 -6
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
- mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
- mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
- mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
- mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
- mindspore/log.py +2 -2
- mindspore/mint/__init__.py +457 -0
- mindspore/mint/nn/__init__.py +430 -0
- mindspore/mint/nn/functional.py +424 -0
- mindspore/mint/optim/__init__.py +24 -0
- mindspore/mint/optim/adamw.py +186 -0
- mindspore/multiprocessing/__init__.py +4 -0
- mindspore/nn/__init__.py +3 -0
- mindspore/nn/cell.py +51 -47
- mindspore/nn/extend/__init__.py +29 -0
- mindspore/nn/extend/basic.py +140 -0
- mindspore/nn/extend/embedding.py +143 -0
- mindspore/nn/extend/layer/__init__.py +27 -0
- mindspore/nn/extend/layer/normalization.py +107 -0
- mindspore/nn/extend/pooling.py +117 -0
- mindspore/nn/generator.py +297 -0
- mindspore/nn/layer/basic.py +109 -1
- mindspore/nn/layer/container.py +2 -2
- mindspore/nn/layer/conv.py +6 -6
- mindspore/nn/layer/embedding.py +1 -1
- mindspore/nn/layer/normalization.py +21 -43
- mindspore/nn/layer/padding.py +4 -0
- mindspore/nn/optim/ada_grad.py +2 -2
- mindspore/nn/optim/adadelta.py +1 -1
- mindspore/nn/optim/adafactor.py +1 -1
- mindspore/nn/optim/adam.py +7 -7
- mindspore/nn/optim/adamax.py +2 -2
- mindspore/nn/optim/adasum.py +2 -2
- mindspore/nn/optim/asgd.py +2 -2
- mindspore/nn/optim/ftrl.py +1 -1
- mindspore/nn/optim/lamb.py +3 -3
- mindspore/nn/optim/lars.py +1 -1
- mindspore/nn/optim/lazyadam.py +2 -2
- mindspore/nn/optim/momentum.py +2 -2
- mindspore/nn/optim/optimizer.py +2 -2
- mindspore/nn/optim/proximal_ada_grad.py +2 -2
- mindspore/nn/optim/rmsprop.py +2 -2
- mindspore/nn/optim/rprop.py +2 -2
- mindspore/nn/optim/sgd.py +2 -2
- mindspore/nn/optim/thor.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +9 -9
- mindspore/nn/wrap/grad_reducer.py +5 -5
- mindspore/ops/_grad_experimental/grad_comm_ops.py +4 -2
- mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -2
- mindspore/ops/_vmap/vmap_math_ops.py +27 -8
- mindspore/ops/_vmap/vmap_nn_ops.py +66 -8
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +73 -1
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +12 -3
- mindspore/ops/auto_generate/gen_arg_handler.py +24 -0
- mindspore/ops/auto_generate/gen_extend_func.py +274 -0
- mindspore/ops/auto_generate/gen_ops_def.py +889 -22
- mindspore/ops/auto_generate/gen_ops_prim.py +3541 -253
- mindspore/ops/auto_generate/pyboost_inner_prim.py +282 -0
- mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -1
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +9 -0
- mindspore/ops/extend/__init__.py +9 -1
- mindspore/ops/extend/array_func.py +134 -27
- mindspore/ops/extend/math_func.py +3 -3
- mindspore/ops/extend/nn_func.py +363 -2
- mindspore/ops/function/__init__.py +19 -2
- mindspore/ops/function/array_func.py +463 -439
- mindspore/ops/function/clip_func.py +7 -18
- mindspore/ops/function/grad/grad_func.py +5 -5
- mindspore/ops/function/linalg_func.py +4 -4
- mindspore/ops/function/math_func.py +260 -243
- mindspore/ops/function/nn_func.py +825 -62
- mindspore/ops/function/random_func.py +73 -4
- mindspore/ops/function/sparse_unary_func.py +1 -1
- mindspore/ops/function/vmap_func.py +1 -1
- mindspore/ops/functional.py +2 -2
- mindspore/ops/op_info_register.py +1 -31
- mindspore/ops/operations/__init__.py +2 -3
- mindspore/ops/operations/_grad_ops.py +2 -107
- mindspore/ops/operations/_inner_ops.py +5 -5
- mindspore/ops/operations/_sequence_ops.py +2 -2
- mindspore/ops/operations/array_ops.py +11 -233
- mindspore/ops/operations/comm_ops.py +32 -32
- mindspore/ops/operations/custom_ops.py +7 -89
- mindspore/ops/operations/manually_defined/ops_def.py +329 -4
- mindspore/ops/operations/math_ops.py +13 -163
- mindspore/ops/operations/nn_ops.py +9 -316
- mindspore/ops/operations/random_ops.py +1 -1
- mindspore/ops/operations/sparse_ops.py +3 -3
- mindspore/ops/primitive.py +2 -2
- mindspore/ops_generate/arg_dtype_cast.py +12 -3
- mindspore/ops_generate/arg_handler.py +24 -0
- mindspore/ops_generate/gen_ops_inner_prim.py +2 -0
- mindspore/ops_generate/gen_pyboost_func.py +13 -6
- mindspore/ops_generate/pyboost_utils.py +2 -17
- mindspore/parallel/__init__.py +3 -2
- mindspore/parallel/_auto_parallel_context.py +106 -1
- mindspore/parallel/_parallel_serialization.py +34 -2
- mindspore/parallel/_utils.py +16 -0
- mindspore/parallel/algo_parameter_config.py +4 -4
- mindspore/parallel/checkpoint_transform.py +249 -77
- mindspore/parallel/cluster/process_entity/_api.py +1 -1
- mindspore/parallel/parameter_broadcast.py +1 -1
- mindspore/parallel/shard.py +1 -1
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +1 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +17 -5
- mindspore/profiler/parser/ascend_msprof_exporter.py +3 -3
- mindspore/profiler/parser/ascend_msprof_generator.py +10 -3
- mindspore/profiler/parser/ascend_op_generator.py +26 -9
- mindspore/profiler/parser/ascend_timeline_generator.py +7 -4
- mindspore/profiler/parser/profiler_info.py +11 -1
- mindspore/profiler/profiling.py +13 -5
- mindspore/rewrite/api/node.py +12 -12
- mindspore/rewrite/api/symbol_tree.py +11 -11
- mindspore/run_check/_check_version.py +1 -1
- mindspore/safeguard/rewrite_obfuscation.py +2 -2
- mindspore/train/amp.py +4 -4
- mindspore/train/anf_ir_pb2.py +8 -2
- mindspore/train/callback/_backup_and_restore.py +2 -2
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +2 -2
- mindspore/train/callback/_early_stop.py +2 -2
- mindspore/train/callback/_landscape.py +4 -4
- mindspore/train/callback/_loss_monitor.py +2 -2
- mindspore/train/callback/_on_request_exit.py +2 -2
- mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
- mindspore/train/callback/_summary_collector.py +2 -2
- mindspore/train/callback/_time_monitor.py +2 -2
- mindspore/train/dataset_helper.py +8 -3
- mindspore/train/loss_scale_manager.py +2 -2
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/mind_ir_pb2.py +22 -17
- mindspore/train/model.py +15 -15
- mindspore/train/serialization.py +18 -18
- mindspore/train/summary/summary_record.py +7 -7
- mindspore/train/train_thor/convert_utils.py +3 -3
- mindspore/version.py +1 -1
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +1 -1
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +226 -212
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
- {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
|
@@ -65,6 +65,19 @@ class _ParallelOptimizerConfig:
|
|
|
65
65
|
OPTIMIZER_WEIGHT_SHARD_SIZE = "optimizer_weight_shard_size"
|
|
66
66
|
|
|
67
67
|
|
|
68
|
+
class _PipelineConfig:
|
|
69
|
+
"""
|
|
70
|
+
The key of the Pipeline parallelism.
|
|
71
|
+
"""
|
|
72
|
+
PIPELINE_INTERLEAVE = "pipeline_interleave"
|
|
73
|
+
PIPELINE_SCHEDULER = "pipeline_scheduler"
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
class _PipelineScheduler:
|
|
77
|
+
PIPELINE_1F1B = "1f1b"
|
|
78
|
+
PIPELINE_GPIPE = "gpipe"
|
|
79
|
+
|
|
80
|
+
|
|
68
81
|
class _AutoParallelContext:
|
|
69
82
|
"""
|
|
70
83
|
_AutoParallelContext is the environment in which operations are executed
|
|
@@ -248,6 +261,16 @@ class _AutoParallelContext:
|
|
|
248
261
|
self.check_context_handle()
|
|
249
262
|
return self._context_handle.get_pipeline_result_broadcast()
|
|
250
263
|
|
|
264
|
+
def get_pipeline_interleave(self):
|
|
265
|
+
"""Get pipeline interleave flag"""
|
|
266
|
+
self.check_context_handle()
|
|
267
|
+
return self._context_handle.get_pipeline_interleave()
|
|
268
|
+
|
|
269
|
+
def get_pipeline_scheduler(self):
|
|
270
|
+
"""Get pipeline scheduler"""
|
|
271
|
+
self.check_context_handle()
|
|
272
|
+
return self._context_handle.get_pipeline_scheduler()
|
|
273
|
+
|
|
251
274
|
def set_pipeline_segments(self, segments):
|
|
252
275
|
"""Set the segments of the pipeline"""
|
|
253
276
|
if isinstance(segments, bool) or not isinstance(segments, int):
|
|
@@ -796,16 +819,87 @@ class _AutoParallelContext:
|
|
|
796
819
|
.format(type(enable_parallel_optimizer)))
|
|
797
820
|
self._context_handle.set_enable_parallel_optimizer(enable_parallel_optimizer)
|
|
798
821
|
|
|
822
|
+
def set_force_fp32_communication(self, force_fp32_communication):
|
|
823
|
+
"""
|
|
824
|
+
Set enable/disable force fp32 communication.
|
|
825
|
+
|
|
826
|
+
Args:
|
|
827
|
+
set_force_fp32_communication (bool): Enable/disable force fp32 communication.
|
|
828
|
+
"""
|
|
829
|
+
self.check_context_handle()
|
|
830
|
+
if not isinstance(force_fp32_communication, bool):
|
|
831
|
+
raise TypeError("For 'set_auto_parallel_context', "
|
|
832
|
+
"the argument 'force_fp32_communication' must be bool, but got the type : {}."
|
|
833
|
+
.format(type(force_fp32_communication)))
|
|
834
|
+
self._context_handle.set_force_fp32_communication(force_fp32_communication)
|
|
835
|
+
|
|
799
836
|
def get_enable_fold_pipeline(self):
|
|
800
837
|
"""Get parallel optimizer flag."""
|
|
801
838
|
self.check_context_handle()
|
|
802
839
|
return self._context_handle.get_enable_fold_pipeline()
|
|
803
840
|
|
|
841
|
+
def set_pipeline_config(self, pipeline_config):
|
|
842
|
+
r"""
|
|
843
|
+
Set the configuration for pipeline parallelism. The configuration provides more detailed behavior control about
|
|
844
|
+
parallel training when pipeline parallelism is enabled.
|
|
845
|
+
|
|
846
|
+
Args:
|
|
847
|
+
pipeline_config (dict): The configuration for pipeline parallelism. It supports following keys:
|
|
848
|
+
|
|
849
|
+
- pipeline_interleave(bool): Setting true enable interleave scheduler for pipeline parallelism. This
|
|
850
|
+
scheduler requires more memory but less bubble.
|
|
851
|
+
- pipeline_scheduler(string): There are two choices, "1f1b" and "gpipe". default is "1f1b"
|
|
852
|
+
|
|
853
|
+
- 1f1b: It requires less memory and bubble ratio, for it run backward pass when corresponding forward pass
|
|
854
|
+
finished.
|
|
855
|
+
- gpipe: It requires more memory and bubble ratio, for it run backward pass after all forward pass
|
|
856
|
+
finished.
|
|
857
|
+
|
|
858
|
+
Raises:
|
|
859
|
+
TypeError: If the type of `pipeline_config` is not `dict`.
|
|
860
|
+
ValueError: If the key in `pipeline_config` not in ["pipeline_interleave", "pipeline_scheduler"].
|
|
861
|
+
ValueError: If pipeline interleave is False, pipeline scheduler is not `1f1b`.
|
|
862
|
+
"""
|
|
863
|
+
self.check_context_handle()
|
|
864
|
+
|
|
865
|
+
if not isinstance(pipeline_config, dict):
|
|
866
|
+
raise TypeError("For 'set_pipeline_config', the argument 'pipeine_config' "
|
|
867
|
+
"must be dict, but got the type : {}.".format(type(pipeline_config)))
|
|
868
|
+
|
|
869
|
+
pp_interleave = _PipelineConfig.PIPELINE_INTERLEAVE
|
|
870
|
+
pp_scheduler = _PipelineConfig.PIPELINE_SCHEDULER
|
|
871
|
+
|
|
872
|
+
for config_name in pipeline_config:
|
|
873
|
+
unknown_config = []
|
|
874
|
+
if config_name not in [pp_interleave, pp_scheduler]:
|
|
875
|
+
unknown_config.append(config_name)
|
|
876
|
+
|
|
877
|
+
if unknown_config:
|
|
878
|
+
raise ValueError("Unknown config: {}".format(unknown_config))
|
|
879
|
+
|
|
880
|
+
Validator.check_bool(
|
|
881
|
+
pipeline_config[pp_interleave], pp_interleave, pp_interleave)
|
|
882
|
+
self._context_handle.set_pipeline_interleave(
|
|
883
|
+
pipeline_config[pp_interleave])
|
|
884
|
+
|
|
885
|
+
Validator.check_string(pipeline_config[pp_scheduler], [_PipelineScheduler.PIPELINE_1F1B,
|
|
886
|
+
_PipelineScheduler.PIPELINE_GPIPE])
|
|
887
|
+
if not pipeline_config[pp_interleave] and pipeline_config[pp_scheduler] != _PipelineScheduler.PIPELINE_1F1B:
|
|
888
|
+
raise ValueError(f"When pipeline_interleave is False, {pp_scheduler} is not supported")
|
|
889
|
+
|
|
890
|
+
self._context_handle.set_pipeline_scheduler(pipeline_config[pp_scheduler])
|
|
891
|
+
|
|
804
892
|
def get_enable_parallel_optimizer(self):
|
|
805
893
|
"""Get parallel optimizer flag."""
|
|
806
894
|
self.check_context_handle()
|
|
807
895
|
return self._context_handle.get_enable_parallel_optimizer()
|
|
808
896
|
|
|
897
|
+
def get_force_fp32_communication(self):
|
|
898
|
+
"""Get force fp32 communication flag."""
|
|
899
|
+
self.check_context_handle()
|
|
900
|
+
return self._context_handle.get_force_fp32_communication()
|
|
901
|
+
|
|
902
|
+
|
|
809
903
|
def set_parallel_optimizer_config(self, parallel_optimizer_config):
|
|
810
904
|
r"""
|
|
811
905
|
Set the configure for parallel optimizer. The configure provides more detailed behavior control about parallel
|
|
@@ -1087,6 +1181,7 @@ class _AutoParallelContext:
|
|
|
1087
1181
|
self.set_enable_all_gather_fusion(openstate)
|
|
1088
1182
|
self.set_enable_reduce_scatter_fusion(openstate)
|
|
1089
1183
|
|
|
1184
|
+
|
|
1090
1185
|
def _set_ops_strategy_json_config(type="SAVE", path="", mode="all"):
|
|
1091
1186
|
"""
|
|
1092
1187
|
Set strategy json configuration.
|
|
@@ -1110,6 +1205,7 @@ def _set_ops_strategy_json_config(type="SAVE", path="", mode="all"):
|
|
|
1110
1205
|
else:
|
|
1111
1206
|
raise KeyError("Type must be 'SAVE' or 'LOAD' and mode must be 'all' or 'principal'")
|
|
1112
1207
|
|
|
1208
|
+
|
|
1113
1209
|
_AUTO_PARALLEL_CONTEXT = None
|
|
1114
1210
|
|
|
1115
1211
|
|
|
@@ -1145,7 +1241,9 @@ _set_auto_parallel_context_func_map = {
|
|
|
1145
1241
|
"full_batch": auto_parallel_context().set_full_batch,
|
|
1146
1242
|
"dataset_strategy": auto_parallel_context().set_dataset_strategy,
|
|
1147
1243
|
"enable_parallel_optimizer": auto_parallel_context().set_enable_parallel_optimizer,
|
|
1244
|
+
"force_fp32_communication": auto_parallel_context().set_force_fp32_communication,
|
|
1148
1245
|
"parallel_optimizer_config": auto_parallel_context().set_parallel_optimizer_config,
|
|
1246
|
+
"pipeline_config": auto_parallel_context().set_pipeline_config,
|
|
1149
1247
|
"grad_accumulation_step": auto_parallel_context().set_grad_accumulation_step,
|
|
1150
1248
|
"all_reduce_fusion_config": auto_parallel_context().set_all_reduce_fusion_split_indices,
|
|
1151
1249
|
"communi_parallel_mode": auto_parallel_context().set_communi_parallel_mode,
|
|
@@ -1164,6 +1262,8 @@ _get_auto_parallel_context_func_map = {
|
|
|
1164
1262
|
"loss_repeated_mean": auto_parallel_context().get_loss_repeated_mean,
|
|
1165
1263
|
"pipeline_stages": auto_parallel_context().get_pipeline_stages,
|
|
1166
1264
|
"pipeline_result_broadcast": auto_parallel_context().get_pipeline_result_broadcast,
|
|
1265
|
+
"pipeline_interleave": auto_parallel_context().get_pipeline_interleave,
|
|
1266
|
+
"pipeline_scheduler": auto_parallel_context().get_pipeline_scheduler,
|
|
1167
1267
|
"parallel_mode": auto_parallel_context().get_parallel_mode,
|
|
1168
1268
|
"search_mode": auto_parallel_context().get_strategy_search_mode,
|
|
1169
1269
|
"auto_parallel_search_mode": auto_parallel_context().get_auto_parallel_search_mode,
|
|
@@ -1173,6 +1273,7 @@ _get_auto_parallel_context_func_map = {
|
|
|
1173
1273
|
"full_batch": auto_parallel_context().get_full_batch,
|
|
1174
1274
|
"dataset_strategy": auto_parallel_context().get_dataset_strategy,
|
|
1175
1275
|
"enable_parallel_optimizer": auto_parallel_context().get_enable_parallel_optimizer,
|
|
1276
|
+
"force_fp32_communication": auto_parallel_context().get_force_fp32_communication,
|
|
1176
1277
|
"grad_accumulation_step": auto_parallel_context().get_grad_accumulation_step,
|
|
1177
1278
|
"all_reduce_fusion_config": auto_parallel_context().get_all_reduce_fusion_split_indices,
|
|
1178
1279
|
"communi_parallel_mode": auto_parallel_context().get_communi_parallel_mode,
|
|
@@ -1192,7 +1293,7 @@ _get_auto_parallel_context_func_map = {
|
|
|
1192
1293
|
grad_accumulation_step=int, all_reduce_fusion_config=list, group_ckpt_save_file=str,
|
|
1193
1294
|
communi_parallel_mode=str, optimizer_weight_shard_size=int, sharding_propagation=bool,
|
|
1194
1295
|
optimizer_weight_shard_aggregated_save=bool, enable_alltoall=bool, comm_fusion=dict,
|
|
1195
|
-
strategy_ckpt_config=dict)
|
|
1296
|
+
strategy_ckpt_config=dict, force_fp32_communication=bool)
|
|
1196
1297
|
def _set_auto_parallel_context(**kwargs):
|
|
1197
1298
|
"""
|
|
1198
1299
|
Set auto parallel context.
|
|
@@ -1240,6 +1341,9 @@ def _set_auto_parallel_context(**kwargs):
|
|
|
1240
1341
|
full_batch (bool): Whether to load the whole batch on each device. Default: ``False``.
|
|
1241
1342
|
dataset_strategy Union[str, tuple]: Dataset sharding strategy. Default: "data_parallel".
|
|
1242
1343
|
enable_parallel_optimizer (bool): Enable using optimizer segmentation or not. Default: ``False``.
|
|
1344
|
+
force_fp32_communication (bool): A switch that determines whether reduce operators (AllReduce, ReduceScatter)
|
|
1345
|
+
are forced to use the fp32 data type for communication during communication. True is the enable
|
|
1346
|
+
switch. Default: ``False`` .
|
|
1243
1347
|
all_reduce_fusion_config (list): Set allreduce fusion strategy by parameters indices.
|
|
1244
1348
|
pipeline_stages (int): Set the stage information for pipeline parallel. This indicates how
|
|
1245
1349
|
the devices are distributed alone the pipeline. The total devices will be divided into
|
|
@@ -1330,6 +1434,7 @@ def _reset_auto_parallel_context():
|
|
|
1330
1434
|
- strategy_ckpt_load_file: ""
|
|
1331
1435
|
- strategy_ckpt_save_file: ""
|
|
1332
1436
|
- enable_parallel_optimizer: False
|
|
1437
|
+
- force_fp32_communication: False
|
|
1333
1438
|
- search_mode: 'recursive_programming
|
|
1334
1439
|
- auto_parallel_search_mode: 'recursive_programming
|
|
1335
1440
|
- sharding_propagation: False
|
|
@@ -259,6 +259,33 @@ def _extract_pipeline_stage_num(strategy_file):
|
|
|
259
259
|
return pipeline_stage_num
|
|
260
260
|
|
|
261
261
|
|
|
262
|
+
def _extract_src_dst_layout_map_by_src(src_strategy_file=None, dst_strategy_file=None):
|
|
263
|
+
"""Extract strategy list by src strategy"""
|
|
264
|
+
src_layout_map = _extract_layout_map(src_strategy_file)
|
|
265
|
+
dst_layout_map = _extract_layout_map(dst_strategy_file)
|
|
266
|
+
if dst_layout_map is None:
|
|
267
|
+
return src_layout_map, dst_layout_map
|
|
268
|
+
for param_name in list(dst_layout_map.keys()):
|
|
269
|
+
if param_name in src_layout_map.keys():
|
|
270
|
+
continue
|
|
271
|
+
dst_layout_map.pop(param_name)
|
|
272
|
+
stage_id = 0
|
|
273
|
+
if src_strategy_file[-5:] == ".json":
|
|
274
|
+
with open(src_strategy_file, 'r') as f:
|
|
275
|
+
json_content = json.load(f)
|
|
276
|
+
strategy_items = json_content.get("parallel_strategy_item")
|
|
277
|
+
if not strategy_items:
|
|
278
|
+
raise ValueError("The strategy file {} if empty.".format(src_strategy_file))
|
|
279
|
+
stage_id = strategy_items.get(list(strategy_items.keys())[0]).get('stage')
|
|
280
|
+
else:
|
|
281
|
+
src_parallel_strategy_map = _load_protobuf_strategy(src_strategy_file)
|
|
282
|
+
strategy_items = src_parallel_strategy_map.parallel_strategy_item
|
|
283
|
+
if not strategy_items:
|
|
284
|
+
raise ValueError("The strategy file {} if empty.".format(src_strategy_file))
|
|
285
|
+
stage_id = strategy_items[0].parallel_strategys.stage
|
|
286
|
+
return src_layout_map, dst_layout_map, stage_id
|
|
287
|
+
|
|
288
|
+
|
|
262
289
|
def _extract_src_dst_layout_map(rank_id, src_strategy_file=None, dst_strategy_file=None):
|
|
263
290
|
"""Extract strategy list"""
|
|
264
291
|
src_layout_map = _extract_layout_map(src_strategy_file, None)
|
|
@@ -357,6 +384,7 @@ def _transform_parallel_checkpoint(rank_id, param_total_dict, param_attr_dict, s
|
|
|
357
384
|
Transform model parallel dimension for distributed checkpoint files.
|
|
358
385
|
"""
|
|
359
386
|
transform_param_dict = {}
|
|
387
|
+
device_num = -1
|
|
360
388
|
for param_name, _ in param_total_dict.items():
|
|
361
389
|
tensor_shape = list(param_total_dict[param_name].values())[0].shape
|
|
362
390
|
from_dev_matrix = [1]
|
|
@@ -410,14 +438,18 @@ def _transform_parallel_checkpoint(rank_id, param_total_dict, param_attr_dict, s
|
|
|
410
438
|
to_info_tuple = (to_opt_shard_size, to_dev_matrix_origin, to_tensor_map_origin, origin_tensor_shape)
|
|
411
439
|
_insert_opt_shard_reshape(param_rank_map, from_info_tuple, to_info_tuple)
|
|
412
440
|
transform_operator_stack = _generate_transform_operator_stack(param_rank_map, rank_id)
|
|
413
|
-
|
|
414
|
-
|
|
441
|
+
param_total_dict_copy = param_total_dict[param_name].copy()
|
|
442
|
+
_apply_tensor_transform_operators(transform_operator_stack, param_total_dict_copy, device_num)
|
|
443
|
+
transform_tensor = ms.Tensor(param_total_dict_copy[rank_id % device_num])
|
|
415
444
|
requires_grad = param_attr_dict[param_name][rank_id % device_num][0]
|
|
416
445
|
layerwise_parallel = param_attr_dict[param_name][rank_id % device_num][1]
|
|
417
446
|
transform_para = ms.Parameter(transform_tensor, param_name, requires_grad, layerwise_parallel)
|
|
418
447
|
if param_type_dict[param_name][rank_id % device_num] == "BFloat16":
|
|
419
448
|
transform_para.set_dtype(ms.bfloat16)
|
|
420
449
|
transform_param_dict[param_name] = transform_para
|
|
450
|
+
if device_num < 1:
|
|
451
|
+
raise ValueError("None of the parameters in checkpoint file are in either src strategy or "
|
|
452
|
+
"dst strategy. Please check correctness of strategy files.")
|
|
421
453
|
|
|
422
454
|
# Handle those parameter like learning_rate, global_step which not in strategy_file.
|
|
423
455
|
for param_name, _ in param_total_dict.items():
|
mindspore/parallel/_utils.py
CHANGED
|
@@ -191,6 +191,22 @@ def _origin_shapes(shapes):
|
|
|
191
191
|
return new_shapes
|
|
192
192
|
|
|
193
193
|
|
|
194
|
+
def _dynamic_shape_for_dataset(dataset_shapes, dynamic_shapes):
|
|
195
|
+
"""convert static dataset shapes to dynamic shape"""
|
|
196
|
+
if len(dataset_shapes) != len(dynamic_shapes):
|
|
197
|
+
raise ValueError("The dataset shapes size of {} is not equal to "
|
|
198
|
+
"dynamic shapes size of {}".format(dataset_shapes, dynamic_shapes))
|
|
199
|
+
ret = dataset_shapes
|
|
200
|
+
for i in range(len(dynamic_shapes)):
|
|
201
|
+
if len(dataset_shapes[i]) != len(dynamic_shapes[i]):
|
|
202
|
+
raise ValueError("The dataset shapes size of {} is not equal to "
|
|
203
|
+
"dynamic shapes size of {}".format(dataset_shapes, dynamic_shapes))
|
|
204
|
+
for j in range(len(dynamic_shapes[i])):
|
|
205
|
+
if dynamic_shapes[i][j] == -1:
|
|
206
|
+
ret[i][j] = -1
|
|
207
|
+
return ret
|
|
208
|
+
|
|
209
|
+
|
|
194
210
|
def _to_full_tensor(elem, global_device_num, global_rank, scaling_sens=None):
|
|
195
211
|
"""Convert numpy to tensor, expanding batch dimension according to device_num, adapt to feed the data
|
|
196
212
|
from host solution.
|
|
@@ -229,7 +229,7 @@ def set_algo_parameters(**kwargs):
|
|
|
229
229
|
"""
|
|
230
230
|
Set parameters in the algorithm for parallel strategy searching. See a typical use in
|
|
231
231
|
`test_auto_parallel_resnet.py
|
|
232
|
-
<https://gitee.com/mindspore/mindspore/blob/
|
|
232
|
+
<https://gitee.com/mindspore/mindspore/blob/master/tests/ut/python/parallel/test_auto_parallel_resnet.py>`_.
|
|
233
233
|
|
|
234
234
|
Note:
|
|
235
235
|
The attribute name is required. This interface works ONLY in AUTO_PARALLEL mode.
|
|
@@ -266,14 +266,14 @@ def set_algo_parameters(**kwargs):
|
|
|
266
266
|
|
|
267
267
|
For the Ascend devices, users need to prepare the rank table, set rank_id and device_id.
|
|
268
268
|
Please see the `rank table startup
|
|
269
|
-
<https://www.mindspore.cn/tutorials/experts/en/
|
|
269
|
+
<https://www.mindspore.cn/tutorials/experts/en/master/parallel/rank_table.html>`_
|
|
270
270
|
for more details.
|
|
271
271
|
|
|
272
272
|
For the GPU devices, users need to prepare the host file and mpi, please see the `mpirun startup
|
|
273
|
-
<https://www.mindspore.cn/tutorials/experts/en/
|
|
273
|
+
<https://www.mindspore.cn/tutorials/experts/en/master/parallel/mpirun.html>`_ .
|
|
274
274
|
|
|
275
275
|
For the CPU device, users need to write a dynamic cluster startup script, please see the `Dynamic Cluster
|
|
276
|
-
Startup <https://www.mindspore.cn/tutorials/experts/en/
|
|
276
|
+
Startup <https://www.mindspore.cn/tutorials/experts/en/master/parallel/dynamic_cluster.html>`_ .
|
|
277
277
|
|
|
278
278
|
>>> import numpy as np
|
|
279
279
|
>>> import mindspore as ms
|