mindspore 2.7.0__cp310-cp310-win_amd64.whl → 2.7.0rc1__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +1 -1
- mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +2 -2
- mindspore/_extends/builtin_operations.py +3 -3
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +3 -3
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -0
- mindspore/_extends/parse/parser.py +22 -28
- mindspore/_extends/parse/standard_method.py +1 -15
- mindspore/_extends/pijit/pijit_func_white_list.py +5 -2
- mindspore/_extends/remote/kernel_build_server_ascend.py +75 -0
- mindspore/amp.py +18 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/common/__init__.py +12 -18
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +38 -102
- mindspore/common/_utils.py +1 -9
- mindspore/common/api.py +106 -155
- mindspore/common/{dynamic_shape/auto_dynamic_shape.py → auto_dynamic_shape.py} +23 -17
- mindspore/common/dtype.py +57 -98
- mindspore/common/dump.py +1 -1
- mindspore/common/file_system.py +9 -59
- mindspore/common/hook_handle.py +3 -22
- mindspore/common/np_dtype.py +3 -3
- mindspore/common/parameter.py +20 -4
- mindspore/common/recompute.py +4 -2
- mindspore/common/tensor.py +52 -38
- mindspore/communication/_hccl_management.py +297 -0
- mindspore/context.py +21 -15
- mindspore/dataset/__init__.py +1 -1
- mindspore/dataset/audio/transforms.py +1 -1
- mindspore/dataset/core/config.py +1 -35
- mindspore/dataset/engine/datasets.py +315 -330
- mindspore/dataset/engine/datasets_user_defined.py +22 -38
- mindspore/dataset/transforms/c_transforms.py +2 -2
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/vision/__init__.py +1 -1
- mindspore/dataset/vision/py_transforms.py +8 -8
- mindspore/dataset/vision/transforms.py +5 -17
- mindspore/dataset/vision/utils.py +21 -632
- mindspore/device_context/ascend/op_tuning.py +1 -35
- mindspore/dnnl.dll +0 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -3
- mindspore/include/api/cell.h +4 -28
- mindspore/include/api/cfg.h +7 -24
- mindspore/include/api/context.h +0 -1
- mindspore/include/api/delegate.h +2 -0
- mindspore/include/api/dual_abi_helper.h +19 -100
- mindspore/include/api/graph.h +1 -14
- mindspore/include/api/kernel.h +3 -16
- mindspore/include/api/kernel_api.h +1 -9
- mindspore/include/api/metrics/accuracy.h +0 -9
- mindspore/include/api/model.h +1 -5
- mindspore/include/api/model_group.h +0 -4
- mindspore/include/api/model_parallel_runner.h +0 -2
- mindspore/include/api/status.h +10 -48
- mindspore/include/api/types.h +1 -6
- mindspore/include/dataset/constants.h +0 -9
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/cifar10.py +2 -3
- mindspore/mindrecord/tools/cifar10_to_mr.py +5 -5
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mindspore_ops_host.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/distributed/__init__.py +0 -4
- mindspore/mint/distributed/distributed.py +14 -217
- mindspore/mint/nn/layer/_functions.py +2 -1
- mindspore/mint/nn/layer/conv.py +6 -6
- mindspore/mint/nn/layer/normalization.py +3 -3
- mindspore/nn/cell.py +174 -216
- mindspore/nn/layer/activation.py +2 -4
- mindspore/nn/layer/basic.py +13 -7
- mindspore/nn/layer/image.py +1 -1
- mindspore/nn/optim/adam.py +3 -1
- mindspore/nn/optim/lamb.py +3 -1
- mindspore/nn/optim/tft_wrapper.py +3 -2
- mindspore/nn/probability/distribution/_utils/utils.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +5 -39
- mindspore/nn/wrap/grad_reducer.py +15 -0
- mindspore/numpy/array_creations.py +2 -2
- mindspore/numpy/utils_const.py +1 -1
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
- mindspore/ops/_op_impl/cpu/__init__.py +0 -1
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +2 -12
- mindspore/ops/auto_generate/gen_extend_func.py +4 -4
- mindspore/ops/auto_generate/gen_ops_def.py +16 -290
- mindspore/ops/auto_generate/gen_ops_prim.py +76 -563
- mindspore/ops/composite/base.py +1 -1
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/function/__init__.py +0 -1
- mindspore/ops/function/array_func.py +6 -10
- mindspore/ops/function/debug_func.py +2 -4
- mindspore/ops/function/grad/grad_func.py +12 -4
- mindspore/ops/function/math_func.py +32 -44
- mindspore/ops/function/nn_func.py +20 -18
- mindspore/ops/functional.py +1 -2
- mindspore/ops/functional_overload.py +12 -23
- mindspore/ops/operations/_inner_ops.py +12 -11
- mindspore/ops/operations/array_ops.py +50 -4
- mindspore/ops/operations/comm_ops.py +15 -1
- mindspore/ops/operations/custom_ops.py +4 -10
- mindspore/ops/operations/debug_ops.py +6 -6
- mindspore/ops/operations/manually_defined/ops_def.py +12 -12
- mindspore/ops/operations/math_ops.py +5 -5
- mindspore/ops/operations/nn_ops.py +1 -1
- mindspore/ops/primitive.py +10 -3
- mindspore/ops/tensor_method.py +7 -16
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +16 -0
- mindspore/parallel/_auto_parallel_context.py +15 -5
- mindspore/parallel/_parallel_serialization.py +2 -3
- mindspore/parallel/_ps_context.py +2 -2
- mindspore/parallel/_transformer/transformer.py +4 -4
- mindspore/parallel/_utils.py +11 -5
- mindspore/parallel/auto_parallel.py +9 -23
- mindspore/parallel/checkpoint_transform.py +0 -2
- mindspore/parallel/cluster/process_entity/_api.py +1 -4
- mindspore/parallel/cluster/run.py +3 -5
- mindspore/parallel/function/reshard_func.py +5 -6
- mindspore/parallel/nn/parallel_cell_wrapper.py +3 -40
- mindspore/parallel/nn/parallel_grad_reducer.py +8 -0
- mindspore/parallel/shard.py +21 -7
- mindspore/parallel/transform_safetensors.py +4 -10
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +9 -10
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +1 -1
- mindspore/profiler/common/msprof_cmd_tool.py +2 -2
- mindspore/profiler/common/path_manager.py +0 -9
- mindspore/profiler/common/profiler_context.py +2 -25
- mindspore/profiler/common/profiler_meta_data.py +0 -1
- mindspore/profiler/common/profiler_op_analyse.py +6 -10
- mindspore/{ops/_op_impl/cpu/joinedstr_op.py → profiler/common/validator/__init__.py} +1 -15
- mindspore/profiler/common/validator/validate_path.py +84 -0
- mindspore/profiler/dynamic_profiler.py +46 -91
- mindspore/profiler/envprofiler.py +5 -30
- mindspore/profiler/experimental_config.py +1 -16
- mindspore/profiler/platform/cpu_profiler.py +4 -10
- mindspore/profiler/platform/npu_profiler.py +1 -1
- mindspore/profiler/profiler.py +145 -193
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +2 -2
- mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
- mindspore/runtime/__init__.py +4 -6
- mindspore/runtime/executor.py +0 -27
- mindspore/runtime/memory.py +0 -1
- mindspore/runtime/thread_bind_core.py +1 -1
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/_utils.py +3 -3
- mindspore/train/amp.py +3 -0
- mindspore/train/callback/_callback.py +1 -2
- mindspore/train/callback/_checkpoint.py +8 -1
- mindspore/train/callback/_flops_collector.py +6 -10
- mindspore/train/callback/_train_fault_tolerance.py +7 -3
- mindspore/train/data_sink.py +4 -4
- mindspore/train/dataset_helper.py +5 -5
- mindspore/train/model.py +20 -4
- mindspore/train/serialization.py +15 -35
- mindspore/train/train_thor/model_thor.py +2 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/utils.py +8 -8
- mindspore/version.py +1 -1
- {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +1 -1
- {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +193 -192
- mindspore/_extends/parallel_compile/akg_compiler/custom.py +0 -1109
- mindspore/common/dynamic_shape/__init__.py +0 -0
- mindspore/common/dynamic_shape/enable_dynamic.py +0 -197
- /mindspore/common/{dynamic_shape/_auto_dynamic.py → _auto_dynamic.py} +0 -0
- {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
|
@@ -25,10 +25,10 @@ from mindspore.ops.operations._scalar_ops import bit_or, bit_and
|
|
|
25
25
|
from mindspore.ops import signature as sig
|
|
26
26
|
from mindspore.ops.operations.math_ops import _infer_shape_reduce
|
|
27
27
|
from mindspore.ops.primitive import PrimitiveWithCheck, PrimitiveWithInfer, prim_attr_register, Primitive, \
|
|
28
|
-
_check_contains_variable
|
|
28
|
+
_run_op, _check_contains_variable
|
|
29
29
|
from mindspore._c_expression import TensorPy as Tensor_
|
|
30
30
|
from mindspore._c_expression import typing, HookType
|
|
31
|
-
from mindspore._c_expression import pyboost_generator
|
|
31
|
+
from mindspore._c_expression import pyboost_generator
|
|
32
32
|
from mindspore import _checkparam as validator
|
|
33
33
|
from mindspore.common import dtype as mstype
|
|
34
34
|
from mindspore.common.parameter import Parameter
|
|
@@ -1565,11 +1565,10 @@ class CellBackwardHook(PrimitiveWithInfer):
|
|
|
1565
1565
|
self.add_prim_attr("cell_id", cell_id)
|
|
1566
1566
|
self.grad_output = None
|
|
1567
1567
|
|
|
1568
|
-
def __call__(self, args):
|
|
1569
|
-
|
|
1570
|
-
if not
|
|
1571
|
-
|
|
1572
|
-
is_tuple = False
|
|
1568
|
+
def __call__(self, *args):
|
|
1569
|
+
# If args is empty, just return.
|
|
1570
|
+
if not args:
|
|
1571
|
+
return args
|
|
1573
1572
|
|
|
1574
1573
|
# Collect the indices and values of arguments that are instances of Tensor
|
|
1575
1574
|
tensors_idx = []
|
|
@@ -1579,18 +1578,20 @@ class CellBackwardHook(PrimitiveWithInfer):
|
|
|
1579
1578
|
tensors_idx.append(i)
|
|
1580
1579
|
tensors.append(arg)
|
|
1581
1580
|
|
|
1582
|
-
# If there are no Tensor arguments, return original
|
|
1581
|
+
# If there are no Tensor arguments, return the single argument or the original tuple
|
|
1583
1582
|
if not tensors:
|
|
1584
|
-
return args if
|
|
1583
|
+
return args[0] if len(args) == 1 else args
|
|
1585
1584
|
|
|
1586
|
-
new_tensors =
|
|
1585
|
+
new_tensors = _run_op(self, self.name, tensors)
|
|
1586
|
+
if not isinstance(new_tensors, tuple):
|
|
1587
|
+
new_tensors = (new_tensors,)
|
|
1587
1588
|
|
|
1588
1589
|
# Replace the original Tensor arguments with the processed ones
|
|
1589
1590
|
arg_list = list(args)
|
|
1590
1591
|
for idx, val in zip(tensors_idx, new_tensors):
|
|
1591
1592
|
arg_list[idx] = val
|
|
1592
1593
|
|
|
1593
|
-
return
|
|
1594
|
+
return arg_list[0] if len(arg_list) == 1 else tuple(arg_list)
|
|
1594
1595
|
|
|
1595
1596
|
def infer_shape(self, *inputs_shape):
|
|
1596
1597
|
if len(inputs_shape) == 1:
|
|
@@ -42,7 +42,7 @@ from ..auto_generate import (
|
|
|
42
42
|
NonZero, ResizeNearestNeighbor, Identity, Split, CumSum, CumProd,
|
|
43
43
|
MaskedSelect, Cummax, Cummin, Argmin, Concat, UnsortedSegmentSum, UniqueConsecutive,
|
|
44
44
|
ScalarToTensor, Triu, BroadcastTo, StridedSlice, Select, TopkExt,
|
|
45
|
-
SearchSorted, Meshgrid, Squeeze, Slice, TransposeExtView
|
|
45
|
+
SearchSorted, Meshgrid, Squeeze, Slice, TransposeExtView)
|
|
46
46
|
from .manually_defined import Rank, Shape, Tile, Cast, Ones, Zeros, TypeAs
|
|
47
47
|
from ..auto_generate import ArgMaxWithValue, ArgMinWithValue
|
|
48
48
|
from ..auto_generate import TensorScatterElements as TensorScatterElementsExt
|
|
@@ -1048,11 +1048,11 @@ class Fill(PrimitiveWithCheck):
|
|
|
1048
1048
|
self.init_prim_io_names(inputs=['type', 'shape', 'value'], outputs=['y'])
|
|
1049
1049
|
|
|
1050
1050
|
def __call__(self, dtype, dims, x):
|
|
1051
|
-
if dtype not in mstype.all_types:
|
|
1051
|
+
if dtype not in mstype.all_types and dtype not in [mstype.uint16, mstype.uint32, mstype.uint64]:
|
|
1052
1052
|
raise TypeError(
|
|
1053
1053
|
f"For \'{self.name}\', the supported data type is ['bool', 'int8', 'int16', 'int32', 'int64', 'uint8', "
|
|
1054
1054
|
"'uint16', 'uint32', 'uint64','float16', 'float32', 'float64'], but got an invalid dtype!.")
|
|
1055
|
-
x_nptype = mstype.
|
|
1055
|
+
x_nptype = mstype.dtype_to_nptype(dtype)
|
|
1056
1056
|
if not isinstance(dims, Tensor) and not isinstance(dims, tuple):
|
|
1057
1057
|
raise TypeError(f"For \'{self.name}\', input[1] must be tensor.")
|
|
1058
1058
|
if not isinstance(x, Tensor) and not isinstance(x, float) and not isinstance(x, int):
|
|
@@ -1065,7 +1065,7 @@ class Fill(PrimitiveWithCheck):
|
|
|
1065
1065
|
return Tensor(ret, dtype=dtype)
|
|
1066
1066
|
|
|
1067
1067
|
def infer_value(self, dtype, dims, x):
|
|
1068
|
-
x_nptype = mstype.
|
|
1068
|
+
x_nptype = mstype.dtype_to_nptype(dtype)
|
|
1069
1069
|
if dims is not None and None not in dims and x is not None:
|
|
1070
1070
|
if isinstance(dims, Tensor):
|
|
1071
1071
|
dims = dims.asnumpy()
|
|
@@ -3974,6 +3974,52 @@ class RangeV2(Primitive):
|
|
|
3974
3974
|
validator.check_positive_int(maxlen, "maxlen", self.name)
|
|
3975
3975
|
|
|
3976
3976
|
|
|
3977
|
+
class MaskedScatter(Primitive):
|
|
3978
|
+
"""
|
|
3979
|
+
Updates the value in the input with value in `updates` according to the `mask`.
|
|
3980
|
+
|
|
3981
|
+
.. warning::
|
|
3982
|
+
This is an experimental API that is subject to change or deletion.
|
|
3983
|
+
|
|
3984
|
+
Inputs:
|
|
3985
|
+
- **x** (Tensor): The input Tensor to be updated.
|
|
3986
|
+
- **mask** (Tensor[bool]): The mask Tensor indicating which elements should be modified or replaced.
|
|
3987
|
+
The shapes of `mask` and `x` must be the same or broadcastable.
|
|
3988
|
+
- **updates** (Tensor): The values to scatter into the target tensor `x`. It has the same data type as `x`. The
|
|
3989
|
+
number of elements must be greater than or equal to the number of True's in `mask`.
|
|
3990
|
+
|
|
3991
|
+
Outputs:
|
|
3992
|
+
Tensor, with the same type and shape as `x`.
|
|
3993
|
+
|
|
3994
|
+
Raises:
|
|
3995
|
+
TypeError: If `x`, `mask` or `updates` is not a Tensor.
|
|
3996
|
+
TypeError: If data type of `x` is not be supported.
|
|
3997
|
+
TypeError: If dtype of `mask` is not bool.
|
|
3998
|
+
TypeError: If the dim of `x` less than the dim of `mask`.
|
|
3999
|
+
ValueError: If `mask` can not be broadcastable to `x`.
|
|
4000
|
+
ValueError: If the number of elements in `updates` is less than number of True's in `mask`.
|
|
4001
|
+
|
|
4002
|
+
Supported Platforms:
|
|
4003
|
+
``Ascend`` ``CPU``
|
|
4004
|
+
|
|
4005
|
+
Examples:
|
|
4006
|
+
>>> import mindspore
|
|
4007
|
+
>>> import numpy as np
|
|
4008
|
+
>>> from mindspore import Tensor, ops
|
|
4009
|
+
>>> input_x = Tensor(np.array([1., 2., 3., 4.]), mindspore.float32)
|
|
4010
|
+
>>> mask = Tensor(np.array([True, True, False, True]), mindspore.bool_)
|
|
4011
|
+
>>> updates = Tensor(np.array([5., 6., 7.]), mindspore.float32)
|
|
4012
|
+
>>> output = ops.MaskedScatter()(input_x, mask, updates)
|
|
4013
|
+
>>> print(output)
|
|
4014
|
+
[5. 6. 3. 7.]
|
|
4015
|
+
"""
|
|
4016
|
+
|
|
4017
|
+
@prim_attr_register
|
|
4018
|
+
def __init__(self):
|
|
4019
|
+
"""Initialize MaskedScatter"""
|
|
4020
|
+
self.init_prim_io_names(inputs=['x', 'mask', 'updates'], outputs=['y'])
|
|
4021
|
+
|
|
4022
|
+
|
|
3977
4023
|
class _TensorScatterOp(PrimitiveWithInfer):
|
|
3978
4024
|
"""
|
|
3979
4025
|
Defines TensorScatter Base Operators
|
|
@@ -18,9 +18,10 @@
|
|
|
18
18
|
from __future__ import absolute_import
|
|
19
19
|
from __future__ import division
|
|
20
20
|
|
|
21
|
+
import os
|
|
21
22
|
from mindspore.common import Tensor
|
|
22
23
|
from mindspore import _checkparam as validator
|
|
23
|
-
from mindspore.communication.management import get_rank, get_group_size, GlobalComm, _get_group
|
|
24
|
+
from mindspore.communication.management import get_rank, get_group_size, GlobalComm, _get_group, _host_distribute
|
|
24
25
|
from mindspore.common import dtype as mstype
|
|
25
26
|
from mindspore.ops.primitive import PrimitiveWithInfer, PrimitiveWithCheck, Primitive, prim_attr_register
|
|
26
27
|
from mindspore.common.api import context
|
|
@@ -97,6 +98,17 @@ def check_collective_target_dtype(data_name, data_dtype, prim_name):
|
|
|
97
98
|
validator.check_tensor_dtype_valid(data_name, data_dtype, valid_dtype, prim_name)
|
|
98
99
|
|
|
99
100
|
|
|
101
|
+
def check_hcom_group_valid(group, prim_name=None):
|
|
102
|
+
"""Check if hcom group is valid."""
|
|
103
|
+
msg_prefix = f"For '{prim_name}', the" if prim_name else "The"
|
|
104
|
+
sim_level = os.getenv("MS_SIMULATION_LEVEL")
|
|
105
|
+
no_sim = (sim_level is None or sim_level.strip() == '')
|
|
106
|
+
if no_sim and (not _host_distribute()) and context.get_context("mode") == context.PYNATIVE_MODE and \
|
|
107
|
+
group != GlobalComm.WORLD_COMM_GROUP:
|
|
108
|
+
raise RuntimeError(f"{msg_prefix} 'group' only support 'hccl_world_group' in pynative mode, but got "
|
|
109
|
+
f"'group': {group}. Please start by using mpi-run.")
|
|
110
|
+
|
|
111
|
+
|
|
100
112
|
class AllReduce(Primitive):
|
|
101
113
|
"""
|
|
102
114
|
Reduces tensors across all devices in such a way that all devices will get the same final result,
|
|
@@ -175,6 +187,7 @@ class AllReduce(Primitive):
|
|
|
175
187
|
if not isinstance(self.group, str):
|
|
176
188
|
raise TypeError(f"For '{self.name}', the 'group' must be str, "
|
|
177
189
|
f"but got {type(self.group).__name__}.")
|
|
190
|
+
check_hcom_group_valid(self.group, prim_name=self.name)
|
|
178
191
|
self.op = op
|
|
179
192
|
self.add_prim_attr('group', self.group)
|
|
180
193
|
self.add_prim_attr('fusion', 0)
|
|
@@ -707,6 +720,7 @@ class Broadcast(PrimitiveWithInfer):
|
|
|
707
720
|
"""Initialize Broadcast."""
|
|
708
721
|
validator.check_value_type('root_rank', root_rank, (int,), self.name)
|
|
709
722
|
validator.check_value_type('group', _get_group(group), (str,), self.name)
|
|
723
|
+
check_hcom_group_valid(group, prim_name=self.name)
|
|
710
724
|
self.add_prim_attr('group', _get_group(group))
|
|
711
725
|
self.add_prim_attr('no_eliminate', True)
|
|
712
726
|
|
|
@@ -1075,18 +1075,17 @@ class Custom(ops.PrimitiveWithInfer):
|
|
|
1075
1075
|
if isinstance(arg_dtype, mstype.TensorType):
|
|
1076
1076
|
arg_dtype = arg_dtype.element_type()
|
|
1077
1077
|
fake_arg = np.zeros(arg["shape"]).astype(
|
|
1078
|
-
mstype.
|
|
1078
|
+
mstype.dtype_to_nptype(arg_dtype))
|
|
1079
1079
|
fake_input.append(fake_arg)
|
|
1080
1080
|
|
|
1081
1081
|
fake_output = self.func(*fake_input)
|
|
1082
1082
|
|
|
1083
1083
|
if hasattr(fake_output, 'shape'):
|
|
1084
1084
|
infer_shape = fake_output.shape
|
|
1085
|
-
|
|
1086
|
-
infer_dtype = mstype.TensorType(mstype._pytype_to_dtype(fake_output.dtype))
|
|
1085
|
+
infer_dtype = mstype.TensorType(mstype.pytype_to_dtype(fake_output.dtype))
|
|
1087
1086
|
else:
|
|
1088
1087
|
infer_shape = (1,)
|
|
1089
|
-
infer_dtype = mstype.
|
|
1088
|
+
infer_dtype = mstype.pytype_to_dtype(fake_output.dtype)
|
|
1090
1089
|
|
|
1091
1090
|
infer_value = Tensor(fake_output) if enable_infer_value else None
|
|
1092
1091
|
|
|
@@ -1247,7 +1246,6 @@ class CustomOpBuilder:
|
|
|
1247
1246
|
self.ldflags = ldflags
|
|
1248
1247
|
self.build_dir = kwargs.get("build_dir")
|
|
1249
1248
|
self.enable_atb = kwargs.get("enable_atb", False)
|
|
1250
|
-
self.debug_mode = kwargs.get("debug_mode", False)
|
|
1251
1249
|
self._ms_path = os.path.dirname(os.path.abspath(ms.__file__))
|
|
1252
1250
|
if self.enable_atb:
|
|
1253
1251
|
if backend is not None and backend != "Ascend":
|
|
@@ -1318,8 +1316,6 @@ class CustomOpBuilder:
|
|
|
1318
1316
|
"""
|
|
1319
1317
|
flags = [f'-DMS_EXTENSION_NAME={self.name}', '-D_GLIBCXX_USE_CXX11_ABI=0', '-DENABLE_FAST_HASH_TABLE=1']
|
|
1320
1318
|
flags += ['-std=c++17', '-fstack-protector-all', '-fPIC', '-pie']
|
|
1321
|
-
if self.debug_mode:
|
|
1322
|
-
flags.append('-g')
|
|
1323
1319
|
if self.backend == "Ascend":
|
|
1324
1320
|
flags.append('-DCUSTOM_ASCEND_OP')
|
|
1325
1321
|
if self.enable_atb:
|
|
@@ -1336,9 +1332,7 @@ class CustomOpBuilder:
|
|
|
1336
1332
|
list[str], A list of linker flags.
|
|
1337
1333
|
"""
|
|
1338
1334
|
flags = ['-shared']
|
|
1339
|
-
flags += ['-Wl,-z,relro,-z,now,-z,noexecstack', '-Wl,--disable-new-dtags,--rpath']
|
|
1340
|
-
if not self.debug_mode:
|
|
1341
|
-
flags.append('-s') # strip
|
|
1335
|
+
flags += ['-Wl,-z,relro,-z,now,-z,noexecstack', '-Wl,--disable-new-dtags,--rpath', '-s']
|
|
1342
1336
|
flags += [
|
|
1343
1337
|
f"-L{os.path.abspath(os.path.join(self._ms_path, 'lib'))}",
|
|
1344
1338
|
'-lmindspore_core',
|
|
@@ -309,8 +309,6 @@ class TensorDump(Primitive):
|
|
|
309
309
|
"""Initialize TensorDump."""
|
|
310
310
|
if security.enable_security():
|
|
311
311
|
raise ValueError('The TensorDump is not supported, please without `-s on` and recompile source.')
|
|
312
|
-
if input_output not in ['in', 'out']:
|
|
313
|
-
raise ValueError(f"The 'input_output' argument should be one of ['in', 'out'], but got: {input_output}")
|
|
314
312
|
self.add_prim_attr("side_effect_io", True)
|
|
315
313
|
self.add_prim_attr("channel_name", "ms_tensor_dump")
|
|
316
314
|
|
|
@@ -503,11 +501,13 @@ class DumpGradient(Primitive):
|
|
|
503
501
|
def __init__(self):
|
|
504
502
|
pass
|
|
505
503
|
|
|
504
|
+
def _dump_hook(self, dout):
|
|
505
|
+
P.TensorDump()(self.bwd_dump_path, dout)
|
|
506
|
+
return dout
|
|
507
|
+
|
|
506
508
|
def __call__(self, path, x, input_output):
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
return dout
|
|
510
|
-
x = P.InsertGradientOf(_dump_hook)(x)
|
|
509
|
+
self.bwd_dump_path = path
|
|
510
|
+
x = P.InsertGradientOf(self._dump_hook)(x)
|
|
511
511
|
return x
|
|
512
512
|
|
|
513
513
|
|
|
@@ -977,7 +977,7 @@ class ScalarToTensor(PrimitiveWithInfer):
|
|
|
977
977
|
def __call__(self, x, dtype=mstype.float32):
|
|
978
978
|
validator.check_value_type("x", x, [bool, int, float], self.name)
|
|
979
979
|
validator.check_subclass("dtype", dtype, mstype.number, self.name)
|
|
980
|
-
data_type = mstype.
|
|
980
|
+
data_type = mstype.dtype_to_nptype(dtype)
|
|
981
981
|
return Tensor(np.array(x, data_type), dtype=dtype)
|
|
982
982
|
|
|
983
983
|
|
|
@@ -1149,7 +1149,7 @@ def scalar_cast(input_x, input_y):
|
|
|
1149
1149
|
Args:
|
|
1150
1150
|
input_x (scalar): The input scalar.
|
|
1151
1151
|
input_y (mindspore.dtype): The type to be cast. Only constant value is allowed.
|
|
1152
|
-
The value should only be mindspore.int64, mindspore.float64, or mindspore.bool.
|
|
1152
|
+
The value should only be mindspore.int64, mindspore.float64, or mindspore.bool\_.
|
|
1153
1153
|
|
|
1154
1154
|
Returns:
|
|
1155
1155
|
Scalar, the type is the same as the python type corresponding to `input_y`.
|
|
@@ -1714,7 +1714,7 @@ def infer_value_for_Arange(start, end, step, dtype=None):
|
|
|
1714
1714
|
if has_float:
|
|
1715
1715
|
np_dtype = np.float32
|
|
1716
1716
|
else:
|
|
1717
|
-
np_dtype = mstype.
|
|
1717
|
+
np_dtype = mstype.dtype_to_nptype(typing.type_id_to_type(dtype))
|
|
1718
1718
|
return Tensor(np.arange(start, end, step, dtype=np_dtype))
|
|
1719
1719
|
|
|
1720
1720
|
|
|
@@ -1738,7 +1738,7 @@ def _infer_value_for_ReduceExtand(input_x, axis, keep_dims, dtype, prim_name):
|
|
|
1738
1738
|
else:
|
|
1739
1739
|
axis = tuple(range(len(value.shape)))
|
|
1740
1740
|
if dtype is not None:
|
|
1741
|
-
np_dtype = mstype.
|
|
1741
|
+
np_dtype = mstype.dtype_to_nptype(typing.type_id_to_type(dtype))
|
|
1742
1742
|
value = np_reduce_extand_func(value, axis, dtype=np_dtype, keepdims=keep_dims)
|
|
1743
1743
|
else:
|
|
1744
1744
|
value = np_reduce_extand_func(value, axis, keepdims=keep_dims)
|
|
@@ -1771,7 +1771,7 @@ def infer_value_for_Cast(x, dst_type_enum=None):
|
|
|
1771
1771
|
if x is None or dst_type_enum is None:
|
|
1772
1772
|
return None
|
|
1773
1773
|
dst_type = typing.type_id_to_type(dst_type_enum)
|
|
1774
|
-
src_type = mstype.
|
|
1774
|
+
src_type = mstype.get_py_obj_dtype(x)
|
|
1775
1775
|
validator.check_subclass("input_x", src_type, [mstype.tensor_type, mstype.number], "Cast")
|
|
1776
1776
|
validator.check_subclass("type", dst_type, mstype.number, "Cast")
|
|
1777
1777
|
|
|
@@ -1781,7 +1781,7 @@ def infer_value_for_Cast(x, dst_type_enum=None):
|
|
|
1781
1781
|
dst_type = dst_type.element_type()
|
|
1782
1782
|
|
|
1783
1783
|
value = None
|
|
1784
|
-
np_dst_type = mstype.
|
|
1784
|
+
np_dst_type = mstype.dtype_to_nptype(dst_type)
|
|
1785
1785
|
if isinstance(x, (int, float)):
|
|
1786
1786
|
value = Tensor(np.array(x).astype(np_dst_type), dtype=dst_type)
|
|
1787
1787
|
else:
|
|
@@ -2839,8 +2839,8 @@ class WhileLoop(Primitive):
|
|
|
2839
2839
|
while cond_func(val):
|
|
2840
2840
|
val = loop_func(val)
|
|
2841
2841
|
except Exception as e:
|
|
2842
|
-
raise ValueError(
|
|
2843
|
-
|
|
2842
|
+
raise ValueError("Invalid loop_func, please check input arguments and \
|
|
2843
|
+
return value, error info: {}".format(e))
|
|
2844
2844
|
return val
|
|
2845
2845
|
|
|
2846
2846
|
|
|
@@ -2935,8 +2935,8 @@ class Scan(Primitive):
|
|
|
2935
2935
|
ys.append(y)
|
|
2936
2936
|
i = i + 1
|
|
2937
2937
|
except Exception as e:
|
|
2938
|
-
raise ValueError(
|
|
2939
|
-
|
|
2938
|
+
raise ValueError("Invalid loop_func, please check input arguments and \
|
|
2939
|
+
return value, error info: {}".format(e))
|
|
2940
2940
|
return carry, ys
|
|
2941
2941
|
|
|
2942
2942
|
|
|
@@ -3011,6 +3011,6 @@ class ForiLoop(Primitive):
|
|
|
3011
3011
|
for i in range(lower, upper):
|
|
3012
3012
|
val = loop_func(i, val)
|
|
3013
3013
|
except Exception as e:
|
|
3014
|
-
raise ValueError(
|
|
3015
|
-
|
|
3014
|
+
raise ValueError("Invalid loop_func, please check input arguments and \
|
|
3015
|
+
return value, error info: {}".format(e))
|
|
3016
3016
|
return val
|
|
@@ -882,7 +882,7 @@ class Sub(_MathBinaryOp):
|
|
|
882
882
|
Note:
|
|
883
883
|
- When the two inputs have different shapes, they must be able to broadcast to a common shape.
|
|
884
884
|
- The two inputs can not be bool type at the same time,
|
|
885
|
-
[True, Tensor(True), Tensor(np.array([True]))] are all considered bool type.
|
|
885
|
+
[True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
|
|
886
886
|
- The two inputs comply with the implicit type conversion rules to make the data types
|
|
887
887
|
consistent.
|
|
888
888
|
|
|
@@ -890,7 +890,7 @@ class Sub(_MathBinaryOp):
|
|
|
890
890
|
- **x** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
|
|
891
891
|
a bool or a tensor whose data type is
|
|
892
892
|
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
893
|
-
`
|
|
893
|
+
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
894
894
|
- **y** (Union[Tensor, number.Number, bool]) - The second input, when the first input is a Tensor,
|
|
895
895
|
the second input should be a number.Number or bool value, or a Tensor whose data type is number or bool.
|
|
896
896
|
|
|
@@ -1289,10 +1289,10 @@ class DivNoNan(Primitive):
|
|
|
1289
1289
|
- **x1** (Union[Tensor, number.Number, bool]) - The first input is a number.Number or
|
|
1290
1290
|
a bool or a tensor whose data type is
|
|
1291
1291
|
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
|
|
1292
|
-
`
|
|
1292
|
+
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
1293
1293
|
- **x2** (Union[Tensor, number.Number, bool]) - The second input is a number.Number or
|
|
1294
|
-
a bool when the first input is a bool or a tensor whose data type is number or bool.
|
|
1295
|
-
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
|
|
1294
|
+
a bool when the first input is a bool or a tensor whose data type is number or bool\_.
|
|
1295
|
+
When the first input is Scalar, the second input must be a Tensor whose data type is number or bool\_.
|
|
1296
1296
|
|
|
1297
1297
|
Outputs:
|
|
1298
1298
|
Tensor, the shape is the same as the one after broadcasting,
|
|
@@ -41,7 +41,7 @@ from ..auto_generate import (CeLU, Flatten, LogSoftmax, LogSoftmaxExt, GLU, ReLU
|
|
|
41
41
|
UpsampleNearest3D, UpsampleTrilinear3D,
|
|
42
42
|
SoftMarginLoss, UpsampleBilinear2D, UpsampleLinear1D,
|
|
43
43
|
BinaryCrossEntropy, BCEWithLogitsLoss, SoftShrink, AdaptiveMaxPool2D,
|
|
44
|
-
SmoothL1Loss
|
|
44
|
+
SmoothL1Loss)
|
|
45
45
|
from .manually_defined import BatchNorm
|
|
46
46
|
|
|
47
47
|
|
mindspore/ops/primitive.py
CHANGED
|
@@ -458,7 +458,7 @@ class Primitive(Primitive_):
|
|
|
458
458
|
|
|
459
459
|
- If the computation involves something like randomization or global variable, the equivalence
|
|
460
460
|
is not guaranteed currently.
|
|
461
|
-
-
|
|
461
|
+
- Not supported in pynative mode
|
|
462
462
|
|
|
463
463
|
Args:
|
|
464
464
|
mode (bool): Specifies whether the primitive is recomputed. Default: ``True`` .
|
|
@@ -466,7 +466,7 @@ class Primitive(Primitive_):
|
|
|
466
466
|
Examples:
|
|
467
467
|
>>> import numpy as np
|
|
468
468
|
>>> import mindspore as ms
|
|
469
|
-
>>> from mindspore import Tensor, ops, nn
|
|
469
|
+
>>> from mindspore import Tensor, ops, nn
|
|
470
470
|
>>> class NetRecompute(nn.Cell):
|
|
471
471
|
... def __init__(self):
|
|
472
472
|
... super(NetRecompute,self).__init__()
|
|
@@ -481,7 +481,6 @@ class Primitive(Primitive_):
|
|
|
481
481
|
... super(GradNet,self).__init__()
|
|
482
482
|
... self.network = network
|
|
483
483
|
... self.grad = ops.GradOperation()
|
|
484
|
-
... @jit
|
|
485
484
|
... def construct(self, x):
|
|
486
485
|
... g_out = self.grad(self.network)(x)
|
|
487
486
|
... return g_out
|
|
@@ -493,6 +492,8 @@ class Primitive(Primitive_):
|
|
|
493
492
|
>>> print(a)
|
|
494
493
|
[0. 0.5]
|
|
495
494
|
"""
|
|
495
|
+
if context.get_context("mode") == context.PYNATIVE_MODE:
|
|
496
|
+
raise TypeError("Recompute is not supported in pynative mode currently.")
|
|
496
497
|
Validator.check_bool(mode)
|
|
497
498
|
self.add_prim_attr("recompute", mode)
|
|
498
499
|
return self
|
|
@@ -509,6 +510,8 @@ class Primitive(Primitive_):
|
|
|
509
510
|
Args:
|
|
510
511
|
backward_prefetch(Union[str, int]): Specifies whether the activation is prefetched in backward pass.
|
|
511
512
|
"""
|
|
513
|
+
if context.get_context("mode") == context.PYNATIVE_MODE:
|
|
514
|
+
raise ValueError("Offload is not supported in pynative mode currently.")
|
|
512
515
|
self.add_prim_attr("offload", True)
|
|
513
516
|
if isinstance(backward_prefetch, str):
|
|
514
517
|
Validator.check_string(backward_prefetch, ['Auto'], 'backward_prefetch', 'Primitive._offload')
|
|
@@ -548,6 +551,10 @@ class Primitive(Primitive_):
|
|
|
548
551
|
Validator.check_non_negative_int(rank_id, "rank_id", "Primitive.place")
|
|
549
552
|
Validator.check_string(role, "MS_WORKER", "role", "Primitive.place")
|
|
550
553
|
|
|
554
|
+
if context.get_context("mode") == context.PYNATIVE_MODE:
|
|
555
|
+
raise RuntimeError("You are calling Primitive.place in pynative mode."
|
|
556
|
+
"It's only supported in graph mode. Please switch to graph mode.")
|
|
557
|
+
|
|
551
558
|
# Get the execution context and check whether calling of this 'place' method is valid.
|
|
552
559
|
# This is because placing operators to arbitrary processes while other distributed training mode
|
|
553
560
|
# is enabled is very unpredictable and may cause fatal error.
|
mindspore/ops/tensor_method.py
CHANGED
|
@@ -24,7 +24,8 @@ from mindspore.ops.composite.multitype_ops._compile_utils import (
|
|
|
24
24
|
sequence_to_tensor, _tensor_sub, _tensor_pow, _tensor_div, _tensor_floordiv, _tensor_mod
|
|
25
25
|
)
|
|
26
26
|
from mindspore.ops.auto_generate.gen_ops_prim import (
|
|
27
|
-
inplace_scatter_src_op, inplace_scatter_src_reduce_op, inplace_scatter_value_op, inplace_scatter_value_reduce_op
|
|
27
|
+
inplace_scatter_src_op, inplace_scatter_src_reduce_op, inplace_scatter_value_op, inplace_scatter_value_reduce_op,
|
|
28
|
+
inplace_copy_op
|
|
28
29
|
)
|
|
29
30
|
from mindspore.ops.auto_generate.gen_ops_prim import (
|
|
30
31
|
floor_div_op, floor_div_scalar_op
|
|
@@ -441,9 +442,6 @@ from mindspore.ops.auto_generate.gen_ops_prim import inplace_exp_op
|
|
|
441
442
|
# 1030 log_
|
|
442
443
|
from mindspore.ops.auto_generate.gen_ops_prim import inplace_log_op
|
|
443
444
|
|
|
444
|
-
# 1031 masked_scatter
|
|
445
|
-
from mindspore.ops.auto_generate import masked_scatter
|
|
446
|
-
|
|
447
445
|
from .._checkparam import check_axis_in_range
|
|
448
446
|
from ..ops.composite.multitype_ops import _compile_utils as compile_utils
|
|
449
447
|
|
|
@@ -1455,10 +1453,14 @@ def tensor_atanh(input):
|
|
|
1455
1453
|
return F.atanh(input)
|
|
1456
1454
|
|
|
1457
1455
|
|
|
1458
|
-
def tensor_copy_(input, src
|
|
1456
|
+
def tensor_copy_(input, src):
|
|
1459
1457
|
raise ValueError("should not come here for copy_ method")
|
|
1460
1458
|
|
|
1461
1459
|
|
|
1460
|
+
def deprecated_tensor_copy_(input, src, non_blocking=False):
|
|
1461
|
+
return inplace_copy_op(input, src)
|
|
1462
|
+
|
|
1463
|
+
|
|
1462
1464
|
def tensor_tan(input):
|
|
1463
1465
|
return F.tan(input)
|
|
1464
1466
|
|
|
@@ -1862,10 +1864,6 @@ def conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
|
|
|
1862
1864
|
raise NotImplementedError('conv3d only supports Ascend.')
|
|
1863
1865
|
|
|
1864
1866
|
|
|
1865
|
-
def tensor_remainder_(input, other):
|
|
1866
|
-
return _tensor_mod(input, other)
|
|
1867
|
-
|
|
1868
|
-
|
|
1869
1867
|
def tensor_floor_divide_(input, other):
|
|
1870
1868
|
return _tensor_floordiv(input, other)
|
|
1871
1869
|
|
|
@@ -1910,10 +1908,6 @@ def tensor_gelu(input, *, approximate):
|
|
|
1910
1908
|
return gelu(input, approximate)
|
|
1911
1909
|
|
|
1912
1910
|
|
|
1913
|
-
def tensor_bernoulli_(input, p, seed, offset):
|
|
1914
|
-
raise RuntimeError("'bernoulli_' is not supported on this device.")
|
|
1915
|
-
|
|
1916
|
-
|
|
1917
1911
|
def deprecated_pixel_shuffle(input, upscale_factor):
|
|
1918
1912
|
return F.pixel_shuffle(input, upscale_factor)
|
|
1919
1913
|
|
|
@@ -1931,6 +1925,3 @@ def tensor_gmm(x, weight, *, bias=None, group_list=None, group_type=0, group_lis
|
|
|
1931
1925
|
|
|
1932
1926
|
def raise_func(*args, **kwargs):
|
|
1933
1927
|
raise NotImplementedError("this func has not been implemented.")
|
|
1934
|
-
|
|
1935
|
-
def tensor_masked_scatter(input, mask, source):
|
|
1936
|
-
return masked_scatter(input, mask, source)
|
|
@@ -16,6 +16,9 @@
|
|
|
16
16
|
Generate pyboost function from pyboost_op.yaml
|
|
17
17
|
"""
|
|
18
18
|
|
|
19
|
+
import os
|
|
20
|
+
import shutil
|
|
21
|
+
import logging
|
|
19
22
|
from resources.resource_list import ResourceType
|
|
20
23
|
from common import gen_constants as K
|
|
21
24
|
from api.functions_cc_generator import FunctionsGenerator, FunctionsHeaderGenerator
|
|
@@ -45,6 +48,18 @@ from .auto_grad_impl_cc_generator import AutoGradImplGenerator
|
|
|
45
48
|
from .auto_grad_reg_cc_generator import AutoGradRegHeaderGenerator
|
|
46
49
|
|
|
47
50
|
|
|
51
|
+
def clear_old_generated_code(work_path):
|
|
52
|
+
""" delete old generated files to prevent compilation failure """
|
|
53
|
+
files_to_clear = ['mindspore/ops/kernel/common/pyboost',
|
|
54
|
+
'mindspore/ops/kernel/functions/auto_generate',
|
|
55
|
+
'mindspore/ccsrc/runtime/pynative/op_function']
|
|
56
|
+
for f in files_to_clear:
|
|
57
|
+
real_path = os.path.join(work_path, f)
|
|
58
|
+
if os.path.exists(real_path):
|
|
59
|
+
shutil.rmtree(real_path)
|
|
60
|
+
logging.warning("rm file %s", real_path)
|
|
61
|
+
|
|
62
|
+
|
|
48
63
|
def gen_pyboost_code(resource_mgr):
|
|
49
64
|
""" gen_pyboost_code """
|
|
50
65
|
work_path = K.WORK_DIR
|
|
@@ -52,6 +67,7 @@ def gen_pyboost_code(resource_mgr):
|
|
|
52
67
|
doc_yaml_data = resource_mgr.get_resource(ResourceType.OP_DOC_YAML)
|
|
53
68
|
mint_func_protos = resource_mgr.get_resource(ResourceType.MINT_FUNC_PROTOS)
|
|
54
69
|
alias_func_mapping = resource_mgr.get_resource(ResourceType.ALIAS_API_MAPPING)
|
|
70
|
+
clear_old_generated_code(work_path)
|
|
55
71
|
call_pyboost_inner_prim_generator(work_path, op_protos)
|
|
56
72
|
call_pyboost_functions_py_generator(work_path, op_protos, doc_yaml_data)
|
|
57
73
|
call_pyboost_functions_h_generator(work_path, op_protos)
|
|
@@ -81,7 +81,6 @@ class _PipelineScheduler:
|
|
|
81
81
|
PIPELINE_SEQPIPE = "seqpipe"
|
|
82
82
|
PIPELINE_SEQVPP = "seqvpp"
|
|
83
83
|
PIPELINE_SEQSMARTVPP = "seqsmartvpp"
|
|
84
|
-
PIPELINE_ZBV = "zero_bubble_v"
|
|
85
84
|
|
|
86
85
|
|
|
87
86
|
class _AutoParallelContext:
|
|
@@ -435,6 +434,13 @@ class _AutoParallelContext:
|
|
|
435
434
|
"""
|
|
436
435
|
self.check_context_handle()
|
|
437
436
|
run_mode = context.get_context("mode")
|
|
437
|
+
if run_mode == context.PYNATIVE_MODE and parallel_mode not in (
|
|
438
|
+
context.ParallelMode.DATA_PARALLEL, context.ParallelMode.STAND_ALONE,
|
|
439
|
+
context.ParallelMode.AUTO_PARALLEL):
|
|
440
|
+
raise ValueError(f"Pynative only supports STAND_ALONE, DATA_PARALLEL and AUTO_PARALLEL using"
|
|
441
|
+
f" sharding_propagation under shard function"
|
|
442
|
+
f" for ParallelMode, "
|
|
443
|
+
f"but got {parallel_mode.upper()}.")
|
|
438
444
|
ret = self._context_handle.set_parallel_mode(parallel_mode)
|
|
439
445
|
if ret is False:
|
|
440
446
|
raise ValueError("The context configuration parameter 'parallel_mode' only support 'stand_alone', "
|
|
@@ -598,6 +604,9 @@ class _AutoParallelContext:
|
|
|
598
604
|
if not isinstance(dim, int):
|
|
599
605
|
raise TypeError("For 'set_auto_parallel_context', the element of argument "
|
|
600
606
|
"'dataset_strategy' must be int type, but got the type : {} .".format(type(dim)))
|
|
607
|
+
if context.get_context('mode') == context.PYNATIVE_MODE:
|
|
608
|
+
raise ValueError("In PyNative mode, the setting value of 'dataset_strategy' must be either 'full_batch' "
|
|
609
|
+
f"or 'data_parallel', but got {dataset_strategy}.")
|
|
601
610
|
self._dataset_strategy_using_str = False
|
|
602
611
|
self._context_handle.set_dataset_strategy(dataset_strategy)
|
|
603
612
|
|
|
@@ -637,6 +646,9 @@ class _AutoParallelContext:
|
|
|
637
646
|
return "full_batch"
|
|
638
647
|
return "data_parallel"
|
|
639
648
|
dataset_strategy = self._context_handle.get_dataset_strategy()
|
|
649
|
+
if context.get_context('mode') == context.PYNATIVE_MODE:
|
|
650
|
+
raise ValueError("In PyNative mode, the value of 'dataset_strategy' must be either 'full_batch' "
|
|
651
|
+
f"or 'data_parallel', but got the setting value is {dataset_strategy}.")
|
|
640
652
|
return dataset_strategy
|
|
641
653
|
|
|
642
654
|
def set_grad_accumulation_step(self, grad_accumulation_step):
|
|
@@ -650,7 +662,7 @@ class _AutoParallelContext:
|
|
|
650
662
|
raise ValueError("The interface is deprecated. To use gradient accumulation, "
|
|
651
663
|
"please use GradAccumulationCell in mindspore.nn.wrap.cell_wrapper.")
|
|
652
664
|
self.check_context_handle()
|
|
653
|
-
Validator.check_positive_int(grad_accumulation_step
|
|
665
|
+
Validator.check_positive_int(grad_accumulation_step)
|
|
654
666
|
self._context_handle.set_grad_accumulation_step(grad_accumulation_step)
|
|
655
667
|
|
|
656
668
|
def get_grad_accumulation_step(self):
|
|
@@ -986,8 +998,6 @@ class _AutoParallelContext:
|
|
|
986
998
|
_PipelineScheduler.PIPELINE_GPIPE,
|
|
987
999
|
_PipelineScheduler.PIPELINE_SEQPIPE,
|
|
988
1000
|
_PipelineScheduler.PIPELINE_SEQVPP,
|
|
989
|
-
_PipelineScheduler.PIPELINE_SEQSMARTVPP,
|
|
990
|
-
_PipelineScheduler.PIPELINE_ZBV,
|
|
991
1001
|
_PipelineScheduler.PIPELINE_SEQSMARTVPP])
|
|
992
1002
|
scheduler_val = pipeline_config[pp_scheduler]
|
|
993
1003
|
if not pipeline_config[pp_interleave] and scheduler_val != _PipelineScheduler.PIPELINE_1F1B:
|
|
@@ -1062,7 +1072,7 @@ class _AutoParallelContext:
|
|
|
1062
1072
|
|
|
1063
1073
|
if threshold_name in parallel_optimizer_config:
|
|
1064
1074
|
Validator.check_non_negative_int(
|
|
1065
|
-
parallel_optimizer_config[threshold_name]
|
|
1075
|
+
parallel_optimizer_config[threshold_name])
|
|
1066
1076
|
self._context_handle.set_parallel_optimizer_threshold(
|
|
1067
1077
|
parallel_optimizer_config[threshold_name])
|
|
1068
1078
|
|
|
@@ -144,7 +144,8 @@ def _build_protobuf_strategy(strategy_filename):
|
|
|
144
144
|
parallel_strategy_map = _load_protobuf_strategy(strategy_filename)
|
|
145
145
|
layout_items = parallel_strategy_map.parallel_layout_item
|
|
146
146
|
if not layout_items:
|
|
147
|
-
|
|
147
|
+
raise ValueError(f"For 'build_searched_strategy', the strategy file {strategy_filename} has no sliced "
|
|
148
|
+
f"parameter, please check whether the 'strategy_filename' is correct.")
|
|
148
149
|
|
|
149
150
|
strategy = {}
|
|
150
151
|
for layout_item in layout_items:
|
|
@@ -158,8 +159,6 @@ def _build_json_strategy(strategy_filename):
|
|
|
158
159
|
"""build strategy from json file"""
|
|
159
160
|
with open(strategy_filename, 'r') as f:
|
|
160
161
|
json_content = json.load(f)
|
|
161
|
-
if "parallel_layout_item" not in json_content:
|
|
162
|
-
return {}
|
|
163
162
|
layout_items = json_content.get("parallel_layout_item")
|
|
164
163
|
strategy = {}
|
|
165
164
|
for parameter_name, layout_item in layout_items.items():
|
|
@@ -114,8 +114,8 @@ def _set_ps_context(**kwargs):
|
|
|
114
114
|
Default: ``False``.
|
|
115
115
|
config_file_path (str): Configuration file path used by recovery. Default: ''.
|
|
116
116
|
enable_ssl (bool): Set PS SSL mode enabled or disabled. Default: ``False``.
|
|
117
|
-
|
|
118
|
-
|
|
117
|
+
There might be risk when this is set to False.
|
|
118
|
+
It is user's responsibility to ensure the network environment is safe.
|
|
119
119
|
client_password (str): Password to decrypt the secret key stored in the client certificate. Default: ''.
|
|
120
120
|
server_password (str): Password to decrypt the secret key stored in the server certificate. Default: ''.
|
|
121
121
|
|