mindspore 2.2.10__cp39-cp39-win_amd64.whl → 2.2.14__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +2 -1
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_extends/parse/__init__.py +3 -2
- mindspore/_extends/parse/parser.py +6 -1
- mindspore/_extends/parse/standard_method.py +12 -2
- mindspore/common/_utils.py +16 -0
- mindspore/common/tensor.py +0 -2
- mindspore/communication/management.py +3 -0
- mindspore/context.py +34 -4
- mindspore/dataset/engine/cache_client.py +8 -5
- mindspore/dataset/engine/datasets.py +23 -0
- mindspore/dataset/engine/validators.py +1 -1
- mindspore/dataset/vision/py_transforms_util.py +2 -2
- mindspore/dnnl.dll +0 -0
- mindspore/experimental/optim/lr_scheduler.py +5 -6
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +49 -57
- mindspore/mindrecord/tools/cifar10_to_mr.py +46 -55
- mindspore/mindrecord/tools/csv_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +4 -9
- mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -4
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/nn/layer/activation.py +1 -1
- mindspore/nn/layer/embedding.py +2 -2
- mindspore/nn/layer/flash_attention.py +48 -135
- mindspore/nn/loss/loss.py +1 -1
- mindspore/nn/optim/ada_grad.py +2 -2
- mindspore/nn/optim/sgd.py +3 -2
- mindspore/nn/wrap/__init__.py +4 -2
- mindspore/nn/wrap/cell_wrapper.py +6 -3
- mindspore/numpy/math_ops.py +1 -1
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +3 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +0 -31
- mindspore/ops/_grad_experimental/grad_comm_ops.py +4 -2
- mindspore/ops/_grad_experimental/grad_inner_ops.py +8 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +37 -17
- mindspore/ops/_op_impl/aicpu/__init__.py +1 -0
- mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +38 -0
- mindspore/ops/_op_impl/aicpu/linear_sum_assignment.py +21 -2
- mindspore/ops/function/array_func.py +6 -5
- mindspore/ops/function/debug_func.py +1 -1
- mindspore/ops/function/linalg_func.py +21 -11
- mindspore/ops/function/math_func.py +3 -0
- mindspore/ops/function/nn_func.py +13 -11
- mindspore/ops/function/parameter_func.py +2 -0
- mindspore/ops/function/sparse_unary_func.py +2 -2
- mindspore/ops/function/vmap_func.py +1 -0
- mindspore/ops/operations/__init__.py +5 -2
- mindspore/ops/operations/_embedding_cache_ops.py +1 -1
- mindspore/ops/operations/_grad_ops.py +3 -4
- mindspore/ops/operations/_inner_ops.py +56 -1
- mindspore/ops/operations/_quant_ops.py +4 -4
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +15 -4
- mindspore/ops/operations/custom_ops.py +1 -1
- mindspore/ops/operations/debug_ops.py +1 -1
- mindspore/ops/operations/image_ops.py +3 -3
- mindspore/ops/operations/inner_ops.py +49 -0
- mindspore/ops/operations/math_ops.py +65 -3
- mindspore/ops/operations/nn_ops.py +95 -28
- mindspore/ops/operations/random_ops.py +2 -0
- mindspore/ops/operations/sparse_ops.py +4 -4
- mindspore/ops/silent_check.py +162 -0
- mindspore/parallel/__init__.py +3 -2
- mindspore/parallel/_auto_parallel_context.py +82 -3
- mindspore/parallel/_parallel_serialization.py +34 -2
- mindspore/parallel/_tensor.py +3 -1
- mindspore/parallel/_transformer/transformer.py +8 -8
- mindspore/parallel/checkpoint_transform.py +191 -45
- mindspore/profiler/parser/ascend_cluster_generator.py +111 -0
- mindspore/profiler/parser/ascend_communicate_generator.py +315 -0
- mindspore/profiler/parser/ascend_flops_generator.py +8 -2
- mindspore/profiler/parser/ascend_fpbp_generator.py +8 -2
- mindspore/profiler/parser/ascend_hccl_generator.py +2 -2
- mindspore/profiler/parser/ascend_msprof_exporter.py +30 -6
- mindspore/profiler/parser/ascend_msprof_generator.py +16 -5
- mindspore/profiler/parser/ascend_op_generator.py +15 -7
- mindspore/profiler/parser/ascend_timeline_generator.py +5 -2
- mindspore/profiler/parser/base_timeline_generator.py +11 -3
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +2 -1
- mindspore/profiler/parser/framework_parser.py +8 -2
- mindspore/profiler/parser/memory_usage_parser.py +8 -2
- mindspore/profiler/parser/minddata_analyzer.py +8 -2
- mindspore/profiler/parser/minddata_parser.py +1 -1
- mindspore/profiler/parser/msadvisor_analyzer.py +4 -2
- mindspore/profiler/parser/msadvisor_parser.py +9 -3
- mindspore/profiler/profiling.py +97 -25
- mindspore/rewrite/api/node.py +1 -1
- mindspore/rewrite/api/symbol_tree.py +2 -2
- mindspore/rewrite/parsers/for_parser.py +6 -6
- mindspore/rewrite/parsers/module_parser.py +4 -4
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/callback/_checkpoint.py +8 -8
- mindspore/train/callback/_landscape.py +2 -3
- mindspore/train/callback/_summary_collector.py +6 -7
- mindspore/train/dataset_helper.py +6 -0
- mindspore/train/model.py +17 -5
- mindspore/train/serialization.py +6 -1
- mindspore/train/summary/_writer_pool.py +1 -1
- mindspore/train/summary/summary_record.py +5 -6
- mindspore/turbojpeg.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.2.10.dist-info → mindspore-2.2.14.dist-info}/METADATA +3 -2
- {mindspore-2.2.10.dist-info → mindspore-2.2.14.dist-info}/RECORD +117 -124
- mindspore/ops/_op_impl/_custom_op/flash_attention/__init__.py +0 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/attention.py +0 -406
- mindspore/ops/_op_impl/_custom_op/flash_attention/constants.py +0 -41
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_bwd.py +0 -467
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_fwd.py +0 -563
- mindspore/ops/_op_impl/_custom_op/flash_attention/flash_attention_impl.py +0 -193
- mindspore/ops/_op_impl/_custom_op/flash_attention/tik_ops_utils.py +0 -435
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/__init__.py +0 -0
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/sparse_tiling.py +0 -45
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/strategy.py +0 -67
- mindspore/ops/_op_impl/_custom_op/flash_attention/tiling_strategy/wukong_tiling.py +0 -62
- {mindspore-2.2.10.dist-info → mindspore-2.2.14.dist-info}/WHEEL +0 -0
- {mindspore-2.2.10.dist-info → mindspore-2.2.14.dist-info}/entry_points.txt +0 -0
- {mindspore-2.2.10.dist-info → mindspore-2.2.14.dist-info}/top_level.txt +0 -0
|
@@ -22,10 +22,6 @@ from mindspore import log as logger
|
|
|
22
22
|
from ..filewriter import FileWriter
|
|
23
23
|
from ..shardutils import check_filename, ExceptionThread
|
|
24
24
|
|
|
25
|
-
try:
|
|
26
|
-
pd = import_module("pandas")
|
|
27
|
-
except ModuleNotFoundError:
|
|
28
|
-
pd = None
|
|
29
25
|
|
|
30
26
|
__all__ = ['CsvToMR']
|
|
31
27
|
|
|
@@ -55,8 +51,7 @@ class CsvToMR:
|
|
|
55
51
|
"""
|
|
56
52
|
|
|
57
53
|
def __init__(self, source, destination, columns_list=None, partition_number=1):
|
|
58
|
-
|
|
59
|
-
raise Exception("Module pandas is not found, please use pip install it.")
|
|
54
|
+
self.pd = import_module("pandas")
|
|
60
55
|
if isinstance(source, str):
|
|
61
56
|
check_filename(source)
|
|
62
57
|
self.source = source
|
|
@@ -135,8 +130,8 @@ class CsvToMR:
|
|
|
135
130
|
if not os.path.exists(self.source):
|
|
136
131
|
raise IOError("Csv file {} do not exist.".format(self.source))
|
|
137
132
|
|
|
138
|
-
pd.set_option('display.max_columns', None)
|
|
139
|
-
df = pd.read_csv(self.source)
|
|
133
|
+
self.pd.set_option('display.max_columns', None)
|
|
134
|
+
df = self.pd.read_csv(self.source)
|
|
140
135
|
|
|
141
136
|
csv_schema = self._get_schema(df)
|
|
142
137
|
|
|
@@ -25,10 +25,6 @@ from mindspore import log as logger
|
|
|
25
25
|
from ..filewriter import FileWriter
|
|
26
26
|
from ..shardutils import check_filename, ExceptionThread, SUCCESS, FAILED
|
|
27
27
|
|
|
28
|
-
try:
|
|
29
|
-
cv_import = import_module("cv2")
|
|
30
|
-
except ModuleNotFoundError:
|
|
31
|
-
cv_import = None
|
|
32
28
|
|
|
33
29
|
__all__ = ['MnistToMR']
|
|
34
30
|
|
|
@@ -58,6 +54,8 @@ class MnistToMR:
|
|
|
58
54
|
"""
|
|
59
55
|
|
|
60
56
|
def __init__(self, source, destination, partition_number=1):
|
|
57
|
+
self.cv_import = import_module("cv2")
|
|
58
|
+
|
|
61
59
|
self.image_size = 28
|
|
62
60
|
self.num_channels = 1
|
|
63
61
|
|
|
@@ -89,9 +87,6 @@ class MnistToMR:
|
|
|
89
87
|
|
|
90
88
|
# pylint: disable=missing-docstring
|
|
91
89
|
def run(self):
|
|
92
|
-
if not cv_import:
|
|
93
|
-
raise ModuleNotFoundError("opencv-python module not found, please use pip install it.")
|
|
94
|
-
|
|
95
90
|
if self._transform_train() == FAILED:
|
|
96
91
|
return FAILED
|
|
97
92
|
if self._transform_test() == FAILED:
|
|
@@ -155,7 +150,7 @@ class MnistToMR:
|
|
|
155
150
|
train_data = self._extract_images(self.train_data_filename_)
|
|
156
151
|
train_labels = self._extract_labels(self.train_labels_filename_)
|
|
157
152
|
for data, label in zip(train_data, train_labels):
|
|
158
|
-
_, img = cv_import.imencode(".jpeg", data)
|
|
153
|
+
_, img = self.cv_import.imencode(".jpeg", data)
|
|
159
154
|
yield {"label": int(label), "data": img.tobytes()}
|
|
160
155
|
|
|
161
156
|
def _mnist_test_iterator(self):
|
|
@@ -168,7 +163,7 @@ class MnistToMR:
|
|
|
168
163
|
test_data = self._extract_images(self.test_data_filename_)
|
|
169
164
|
test_labels = self._extract_labels(self.test_labels_filename_)
|
|
170
165
|
for data, label in zip(test_data, test_labels):
|
|
171
|
-
_, img = cv_import.imencode(".jpeg", data)
|
|
166
|
+
_, img = self.cv_import.imencode(".jpeg", data)
|
|
172
167
|
yield {"label": int(label), "data": img.tobytes()}
|
|
173
168
|
|
|
174
169
|
def _transform_train(self):
|
|
@@ -97,10 +97,7 @@ class TFRecordToMR:
|
|
|
97
97
|
"""
|
|
98
98
|
|
|
99
99
|
def __init__(self, source, destination, feature_dict, bytes_fields=None):
|
|
100
|
-
|
|
101
|
-
self.tf = import_module("tensorflow") # just used to convert tfrecord to mindrecord
|
|
102
|
-
except ModuleNotFoundError:
|
|
103
|
-
raise Exception("Module tensorflow is not found, please use pip install it.")
|
|
100
|
+
self.tf = import_module("tensorflow") # just used to convert tfrecord to mindrecord
|
|
104
101
|
|
|
105
102
|
if self.tf.__version__ < SupportedTensorFlowVersion:
|
|
106
103
|
raise Exception("Module tensorflow version must be greater or equal {}.".format(SupportedTensorFlowVersion))
|
mindspore/mindspore_backend.dll
CHANGED
|
Binary file
|
mindspore/mindspore_common.dll
CHANGED
|
Binary file
|
mindspore/mindspore_core.dll
CHANGED
|
Binary file
|
mindspore/mindspore_glog.dll
CHANGED
|
Binary file
|
|
Binary file
|
mindspore/nn/layer/activation.py
CHANGED
|
@@ -782,7 +782,7 @@ class Tanhshrink(Cell):
|
|
|
782
782
|
``Ascend`` ``GPU`` ``CPU``
|
|
783
783
|
|
|
784
784
|
Examples:
|
|
785
|
-
>>> import mindspore
|
|
785
|
+
>>> import mindspore as ms
|
|
786
786
|
>>> from mindspore import Tensor, nn
|
|
787
787
|
>>> import numpy as np
|
|
788
788
|
>>> x = Tensor(np.array([1, 2, 3, 2, 1]), ms.float16)
|
mindspore/nn/layer/embedding.py
CHANGED
|
@@ -522,12 +522,12 @@ class MultiFieldEmbeddingLookup(EmbeddingLookup):
|
|
|
522
522
|
this interface. Type is Int32, Int64.
|
|
523
523
|
- **input_values** (Tensor) - The shape of tensor is :math:`(batch\_size, seq\_length)`.
|
|
524
524
|
Specifies the weights of elements of the input_indices. The lookout vector will multiply with
|
|
525
|
-
the input_values. Type is
|
|
525
|
+
the input_values. Type is float32.
|
|
526
526
|
- **field_ids** (Tensor) - The shape of tensor is :math:`(batch\_size, seq\_length)`.
|
|
527
527
|
Specifies the field id of elements of the input_indices. Type is Int32.
|
|
528
528
|
|
|
529
529
|
Outputs:
|
|
530
|
-
Tensor, the shape of tensor is :math:`(batch\_size, field\_size, embedding\_size)`. Type is
|
|
530
|
+
Tensor, the shape of tensor is :math:`(batch\_size, field\_size, embedding\_size)`. Type is float32.
|
|
531
531
|
|
|
532
532
|
Raises:
|
|
533
533
|
TypeError: If `vocab_size` or `embedding_size` or `field_size` is not an int.
|
|
@@ -21,9 +21,7 @@ import mindspore.common.dtype as mstype
|
|
|
21
21
|
from mindspore.common.tensor import Tensor
|
|
22
22
|
from mindspore import ops
|
|
23
23
|
from mindspore.nn.cell import Cell
|
|
24
|
-
from mindspore.ops._op_impl._custom_op.flash_attention.flash_attention_impl import get_flash_attention
|
|
25
24
|
from mindspore.ops.operations.nn_ops import FlashAttentionScore
|
|
26
|
-
from mindspore._c_expression import MSContext
|
|
27
25
|
|
|
28
26
|
__all__ = ['FlashAttention']
|
|
29
27
|
|
|
@@ -46,18 +44,17 @@ class FlashAttention(Cell):
|
|
|
46
44
|
Default 65536.
|
|
47
45
|
next_block_num(int): A integer to define the number of blocks to look behind for local block sparse attention.
|
|
48
46
|
Default 65536.
|
|
49
|
-
tiling_stgy_name(str): A str to define tiling strategy of flash attention.
|
|
50
47
|
dp(int): data parallel.
|
|
51
48
|
Default 1.
|
|
52
49
|
mp(int): model parallel.
|
|
53
50
|
Default 1.
|
|
54
|
-
high_precision(bool): This mode has higher precision but some performance loss.
|
|
51
|
+
high_precision(bool): This mode has higher precision but some performance loss. Only take effect on Ascend910A.
|
|
55
52
|
Default False.
|
|
56
53
|
have_attention_mask_batch(bool): indicates whether attention_mask contains the batch dimension.
|
|
57
54
|
Default True
|
|
58
55
|
alibi(bool): This parameter indicates whether the flashattention supports the Alibi.
|
|
59
56
|
Default: False
|
|
60
|
-
use_mqa(bool): Using
|
|
57
|
+
use_mqa(bool): Using MQA if True, only take effect under 910B. Default: False.
|
|
61
58
|
|
|
62
59
|
|
|
63
60
|
Inputs:
|
|
@@ -98,7 +95,6 @@ class FlashAttention(Cell):
|
|
|
98
95
|
dropout_rate=0.0,
|
|
99
96
|
prev_block_num=65536,
|
|
100
97
|
next_block_num=65536,
|
|
101
|
-
tiling_stgy_name="sparse",
|
|
102
98
|
dp=1,
|
|
103
99
|
mp=1,
|
|
104
100
|
high_precision=False,
|
|
@@ -112,52 +108,36 @@ class FlashAttention(Cell):
|
|
|
112
108
|
if scaling_constant == 0:
|
|
113
109
|
raise ValueError("the scaling constant must not be 0.")
|
|
114
110
|
self.dropout_rate = dropout_rate
|
|
115
|
-
self.
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
self.flash_attention.add_prim_attr("primitive_target", "Ascend")
|
|
111
|
+
self.alibi = alibi
|
|
112
|
+
self.have_attention_mask_batch = have_attention_mask_batch
|
|
113
|
+
|
|
114
|
+
self.transpose_4d_pre = ops.Transpose().shard(((dp, mp, 1, 1),))
|
|
115
|
+
self.transpose_4d_post = ops.Transpose().shard(((dp, 1, mp, 1),))
|
|
116
|
+
self.reshape = ops.Reshape()
|
|
117
|
+
self.zeros_like = ops.ZerosLike().shard(((dp, mp, 1, 1),))
|
|
118
|
+
self.zeros = ops.Zeros()
|
|
119
|
+
self.attn_cast = ops.Cast()
|
|
120
|
+
if use_mqa:
|
|
121
|
+
fa_strategies = ((dp, mp, 1, 1),
|
|
122
|
+
(dp, 1, 1, 1),
|
|
123
|
+
(dp, 1, 1, 1))
|
|
124
|
+
else:
|
|
130
125
|
fa_strategies = ((dp, mp, 1, 1),
|
|
131
126
|
(dp, mp, 1, 1),
|
|
132
127
|
(dp, mp, 1, 1))
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
(dp, 1, 1, 1),
|
|
147
|
-
(dp, 1, 1, 1))
|
|
148
|
-
else:
|
|
149
|
-
fa_strategies = ((dp, mp, 1, 1),
|
|
150
|
-
(dp, mp, 1, 1),
|
|
151
|
-
(dp, mp, 1, 1),
|
|
152
|
-
(dp, 1, 1, 1))
|
|
153
|
-
if dropout_rate > 1e-5:
|
|
154
|
-
fa_strategies += ((dp, mp, 1, 1),)
|
|
155
|
-
self.flash_attention = FlashAttentionScore(head_num=head_num, pre_tokens=prev_block_num,
|
|
156
|
-
next_tokens=next_block_num,
|
|
157
|
-
keep_prob=1 - dropout_rate,
|
|
158
|
-
scale_value=1. / scaling_constant,
|
|
159
|
-
inner_precise=0 if high_precision else 1,
|
|
160
|
-
input_layout="BNSD").shard(fa_strategies)
|
|
128
|
+
if self.alibi:
|
|
129
|
+
self.alibi_rescale_mul = ops.Mul().shard(((dp, mp, 1, 1), (1,)))
|
|
130
|
+
self.alibi_rescale_factor = Tensor([scaling_constant], dtype=mstype.float16)
|
|
131
|
+
fa_strategies += ((dp, mp, 1, 1),)
|
|
132
|
+
if dropout_rate > 1e-5:
|
|
133
|
+
fa_strategies += ((dp, mp, 1, 1),)
|
|
134
|
+
fa_strategies += ((dp, 1, 1, 1),)
|
|
135
|
+
self.flash_attention = FlashAttentionScore(head_num=head_num, pre_tokens=prev_block_num,
|
|
136
|
+
next_tokens=next_block_num,
|
|
137
|
+
keep_prob=1 - dropout_rate,
|
|
138
|
+
scale_value=1. / scaling_constant,
|
|
139
|
+
inner_precise=0,
|
|
140
|
+
input_layout="BNSD").shard(fa_strategies)
|
|
161
141
|
|
|
162
142
|
self.dropout_rate = dropout_rate
|
|
163
143
|
if self.dropout_rate > 1e-5:
|
|
@@ -175,49 +155,7 @@ class FlashAttention(Cell):
|
|
|
175
155
|
such as MatMul. Default: None.
|
|
176
156
|
:return:
|
|
177
157
|
"""
|
|
178
|
-
|
|
179
|
-
if in_strategy is None:
|
|
180
|
-
# default: dp=1, mp=1, construct inputs only contain query, key, value
|
|
181
|
-
in_strategy = (
|
|
182
|
-
(1, 1, 1, 1),
|
|
183
|
-
(1, 1, 1, 1),
|
|
184
|
-
(1, 1, 1, 1),
|
|
185
|
-
)
|
|
186
|
-
self.flash_attention.shard(in_strategy)
|
|
187
|
-
dp = in_strategy[0][0]
|
|
188
|
-
mp = in_strategy[0][1]
|
|
189
|
-
self.flash_attention.add_prim_attr("dev_matrix_shape", [dp, mp, 1, 1])
|
|
190
|
-
inputs_tensor_map = [
|
|
191
|
-
[3, 2, 1, 0],
|
|
192
|
-
[3, 2, 1, 0],
|
|
193
|
-
[3, 2, 1, 0],
|
|
194
|
-
]
|
|
195
|
-
if self.have_attention_mask_batch:
|
|
196
|
-
inputs_tensor_map.append([3, 1, 0])
|
|
197
|
-
else:
|
|
198
|
-
inputs_tensor_map.append([-1, 1, 0])
|
|
199
|
-
|
|
200
|
-
input_empty_args_num = 2
|
|
201
|
-
# dropout_mask
|
|
202
|
-
if self.dropout_rate > 1e-5:
|
|
203
|
-
input_empty_args_num -= 1
|
|
204
|
-
inputs_tensor_map.append([3, 2, 1, 0])
|
|
205
|
-
|
|
206
|
-
if self.alibi:
|
|
207
|
-
input_empty_args_num -= 1
|
|
208
|
-
inputs_tensor_map.append([3, 2, 1, 0])
|
|
209
|
-
|
|
210
|
-
self.flash_attention.add_prim_attr("inputs_tensor_map", inputs_tensor_map)
|
|
211
|
-
|
|
212
|
-
self.flash_attention.add_prim_attr("outputs_tensor_map", [
|
|
213
|
-
[3, 2, 1, 0], # O
|
|
214
|
-
[3, 2, 1], # L
|
|
215
|
-
[3, 2, 1] # M
|
|
216
|
-
])
|
|
217
|
-
self.flash_attention.add_prim_attr("as_loss_divisor", 0)
|
|
218
|
-
self.flash_attention.add_prim_attr("empty_mirror_ops", input_empty_args_num)
|
|
219
|
-
else:
|
|
220
|
-
self.flash_attention.shard(in_strategy)
|
|
158
|
+
self.flash_attention.shard(in_strategy)
|
|
221
159
|
|
|
222
160
|
def construct(self, query, key, value, attn_mask=None, alibi_mask=None):
|
|
223
161
|
"""FlashAttention forward
|
|
@@ -228,49 +166,24 @@ class FlashAttention(Cell):
|
|
|
228
166
|
:param alibi_mask: [bsz, head_num, 1, seq_len], if not None
|
|
229
167
|
:return: output [bsz, head_num, seq_len, head_dim]
|
|
230
168
|
"""
|
|
231
|
-
bsz, head_num, seq_len,
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
raise ValueError(
|
|
237
|
-
"the head_num of query, key and value must be the same, "
|
|
238
|
-
"If different head_num are used, users need to change themselves to be same by tile.")
|
|
239
|
-
if seq_len % 16 != 0 or k_seq_len % 16 != 0 or k_seq_len != v_seq_len:
|
|
240
|
-
raise ValueError(
|
|
241
|
-
"query, key, value seq_len must be a multiple of 16, "
|
|
242
|
-
"and the seq_len between key and value must be equal.")
|
|
243
|
-
# 910A -- FlashAttentionPrimtive
|
|
244
|
-
if head_dim > 304:
|
|
245
|
-
raise ValueError(
|
|
246
|
-
"the head_dim must be less than 304, otherwise the ub would be OOM.")
|
|
247
|
-
if self.dropout_rate > 1e-5:
|
|
248
|
-
drop_mask_bits = self.drop_gen_mask((bsz, head_num, seq_len, seq_len), self.keep_prob)
|
|
249
|
-
tensor_shape = Tensor((bsz, head_num, seq_len, seq_len), mstype.int32)
|
|
250
|
-
ones = self.fill_v2(tensor_shape, self.tensor_one)
|
|
251
|
-
ones = self.depend(ones, query)
|
|
252
|
-
drop_mask = self.do_dropout(ones, drop_mask_bits, self.keep_prob)
|
|
253
|
-
else:
|
|
254
|
-
drop_mask = None
|
|
255
|
-
query = self.scale_mul(query, self.scale_factor)
|
|
256
|
-
key = self.scale_mul(key, self.scale_factor)
|
|
257
|
-
attn_mask = self.cast(attn_mask, mstype.float16)
|
|
258
|
-
output, _, _ = self.flash_attention(query, key, value, attn_mask, drop_mask, alibi_mask)
|
|
169
|
+
bsz, head_num, seq_len, _ = query.shape
|
|
170
|
+
# 910B -- FlashAttentionScore
|
|
171
|
+
if self.dropout_rate > 1e-5:
|
|
172
|
+
drop_mask_bits = self.reshape(self.drop_gen_mask((bsz, head_num, seq_len, seq_len), self.keep_prob),
|
|
173
|
+
(bsz, head_num, seq_len, seq_len // 8))
|
|
259
174
|
else:
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
drop_mask_bits = None
|
|
266
|
-
# (B, S, S) -> (B, 1, S, S)
|
|
175
|
+
drop_mask_bits = None
|
|
176
|
+
if self.alibi:
|
|
177
|
+
alibi_mask = self.alibi_rescale_mul(alibi_mask, self.cast(self.alibi_rescale_factor, alibi_mask.dtype))
|
|
178
|
+
# (B, S, S) -> (B, 1, S, S)
|
|
179
|
+
if self.have_attention_mask_batch:
|
|
267
180
|
attn_mask = self.cast(self.reshape(attn_mask, (bsz, 1, seq_len, seq_len)), mstype.uint8)
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
181
|
+
_, _, _, output = self.flash_attention(query,
|
|
182
|
+
key,
|
|
183
|
+
value,
|
|
184
|
+
alibi_mask,
|
|
185
|
+
drop_mask_bits,
|
|
186
|
+
None,
|
|
187
|
+
attn_mask,
|
|
188
|
+
None)
|
|
276
189
|
return output
|
mindspore/nn/loss/loss.py
CHANGED
|
@@ -1996,7 +1996,7 @@ class FocalLoss(LossBase):
|
|
|
1996
1996
|
>>> import mindspore.nn as nn
|
|
1997
1997
|
>>> logits = ms.Tensor([[0.8, 1.4], [0.5, 0.9], [1.2, 0.9]], ms.float32)
|
|
1998
1998
|
>>> labels = ms.Tensor([[1], [1], [0]], ms.int32)
|
|
1999
|
-
>>> focalloss = nn.FocalLoss(weight=Tensor([1, 2]), gamma=2.0, reduction='mean')
|
|
1999
|
+
>>> focalloss = nn.FocalLoss(weight=ms.Tensor([1, 2]), gamma=2.0, reduction='mean')
|
|
2000
2000
|
>>> output = focalloss(logits, labels)
|
|
2001
2001
|
>>> print(output)
|
|
2002
2002
|
0.12516622
|
mindspore/nn/optim/ada_grad.py
CHANGED
|
@@ -162,7 +162,7 @@ class Adagrad(Optimizer):
|
|
|
162
162
|
``Ascend`` ``GPU`` ``CPU``
|
|
163
163
|
|
|
164
164
|
Examples:
|
|
165
|
-
>>> import
|
|
165
|
+
>>> from mindspore import train
|
|
166
166
|
>>> import mindspore.nn as nn
|
|
167
167
|
>>>
|
|
168
168
|
>>> # Define the network structure of LeNet5. Refer to
|
|
@@ -185,7 +185,7 @@ class Adagrad(Optimizer):
|
|
|
185
185
|
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
|
|
186
186
|
>>>
|
|
187
187
|
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
|
188
|
-
>>> model =
|
|
188
|
+
>>> model = train.Model(net, loss_fn=loss, optimizer=optim)
|
|
189
189
|
"""
|
|
190
190
|
|
|
191
191
|
@opt_init_args_register
|
mindspore/nn/optim/sgd.py
CHANGED
|
@@ -193,9 +193,9 @@ class SGD(Optimizer):
|
|
|
193
193
|
"or 'weight_decay' set in grouped 'params' must be float or int type.")
|
|
194
194
|
|
|
195
195
|
if hasattr(self, "group_weight_decay") and self.group_weight_decay:
|
|
196
|
-
self.opt = tuple(P.SGD(dampening,
|
|
196
|
+
self.opt = tuple(P.SGD(dampening, 0.0, nesterov) for _ in self.group_weight_decay)
|
|
197
197
|
else:
|
|
198
|
-
self.opt = tuple([P.SGD(dampening,
|
|
198
|
+
self.opt = tuple([P.SGD(dampening, 0.0, nesterov)] * len(self._parameters))
|
|
199
199
|
|
|
200
200
|
self.momentum = Parameter(Tensor(momentum, mstype.float32), name="momentum")
|
|
201
201
|
|
|
@@ -220,6 +220,7 @@ class SGD(Optimizer):
|
|
|
220
220
|
params = self._parameters
|
|
221
221
|
accum = self.accum
|
|
222
222
|
stat = self.stat
|
|
223
|
+
gradients = self.decay_weight(gradients)
|
|
223
224
|
gradients = self.flatten_gradients(gradients)
|
|
224
225
|
gradients = self.gradients_centralization(gradients)
|
|
225
226
|
gradients = self.scale_grad(gradients)
|
mindspore/nn/wrap/__init__.py
CHANGED
|
@@ -20,7 +20,8 @@ Use the Wrapper to combine the loss or build the training steps.
|
|
|
20
20
|
from __future__ import absolute_import
|
|
21
21
|
|
|
22
22
|
from mindspore.nn.wrap.cell_wrapper import ForwardValueAndGrad, TrainOneStepCell, WithLossCell, WithGradCell, \
|
|
23
|
-
WithEvalCell, ParameterUpdate, GetNextSingleOp, VirtualDatasetCellTriple, MicroBatchInterleaved, PipelineCell
|
|
23
|
+
WithEvalCell, ParameterUpdate, GetNextSingleOp, VirtualDatasetCellTriple, MicroBatchInterleaved, PipelineCell, \
|
|
24
|
+
GradAccumulationCell
|
|
24
25
|
from mindspore.nn.wrap.loss_scale import TrainOneStepWithLossScaleCell,\
|
|
25
26
|
DynamicLossScaleUpdateCell, FixedLossScaleUpdateCell
|
|
26
27
|
from mindspore.nn.wrap.grad_reducer import DistributedGradReducer
|
|
@@ -42,5 +43,6 @@ __all__ = [
|
|
|
42
43
|
"ParameterUpdate",
|
|
43
44
|
"DynamicLossScaleUpdateCell",
|
|
44
45
|
"FixedLossScaleUpdateCell",
|
|
45
|
-
"VirtualDatasetCellTriple"
|
|
46
|
+
"VirtualDatasetCellTriple",
|
|
47
|
+
"GradAccumulationCell"
|
|
46
48
|
]
|
|
@@ -673,7 +673,7 @@ class PipelineCell(Cell):
|
|
|
673
673
|
|
|
674
674
|
class GradAccumulationCell(Cell):
|
|
675
675
|
"""
|
|
676
|
-
Wrap the network with Micro Batch.
|
|
676
|
+
Wrap the network with Micro Batch to enable the grad accumulation in semi_auto_parallel/auto_parallel mode.
|
|
677
677
|
|
|
678
678
|
Args:
|
|
679
679
|
network (Cell): The target network to wrap.
|
|
@@ -683,8 +683,11 @@ class GradAccumulationCell(Cell):
|
|
|
683
683
|
``Ascend`` ``GPU``
|
|
684
684
|
|
|
685
685
|
Examples:
|
|
686
|
-
>>>
|
|
687
|
-
>>>
|
|
686
|
+
>>> import mindspore.nn as nn
|
|
687
|
+
>>> # Define the network structure of LeNet5. Refer to
|
|
688
|
+
>>> # https://gitee.com/mindspore/docs/blob/r2.2/docs/mindspore/code/lenet.py
|
|
689
|
+
>>> net = LeNet5()
|
|
690
|
+
>>> net = nn.GradAccumulationCell(net, 4)
|
|
688
691
|
"""
|
|
689
692
|
def __init__(self, network, micro_size):
|
|
690
693
|
super(GradAccumulationCell, self).__init__(auto_prefix=False)
|
mindspore/numpy/math_ops.py
CHANGED
|
@@ -4285,7 +4285,7 @@ def argmin(a, axis=None):
|
|
|
4285
4285
|
|
|
4286
4286
|
Examples:
|
|
4287
4287
|
>>> import mindspore.numpy as np
|
|
4288
|
-
>>> a = np.arange(10, 16).reshape(2, 3)
|
|
4288
|
+
>>> a = np.arange(10, 16).reshape(2, 3).astype(np.float32)
|
|
4289
4289
|
>>> print(np.argmin(a))
|
|
4290
4290
|
0
|
|
4291
4291
|
>>> print(np.argmin(a, axis=0))
|
mindspore/opencv_core452.dll
CHANGED
|
Binary file
|
|
Binary file
|
mindspore/opencv_imgproc452.dll
CHANGED
|
Binary file
|
mindspore/ops/__init__.py
CHANGED
|
@@ -34,6 +34,7 @@ from mindspore.ops.composite import *
|
|
|
34
34
|
from mindspore.ops.operations import *
|
|
35
35
|
from mindspore.ops.function import *
|
|
36
36
|
from mindspore.ops.functional import *
|
|
37
|
+
from mindspore.ops.silent_check import _silent_check
|
|
37
38
|
|
|
38
39
|
__primitive__ = [
|
|
39
40
|
"prim_attr_register", "Primitive", "PrimitiveWithInfer", "PrimitiveWithCheck", "signature"
|
|
@@ -48,3 +49,5 @@ __all__.extend(composite.__all__)
|
|
|
48
49
|
__all__.extend(operations.__all__)
|
|
49
50
|
__all__.extend(functional.__all__)
|
|
50
51
|
__all__.extend(function.__all__)
|
|
52
|
+
|
|
53
|
+
_silent_check()
|
|
@@ -36,8 +36,6 @@ from mindspore.ops.operations.array_ops import ScatterAddWithAxis
|
|
|
36
36
|
from mindspore.ops.operations.array_ops import Expand
|
|
37
37
|
from mindspore.ops.operations.array_ops import SegmentMean
|
|
38
38
|
from mindspore.ops.operations.array_ops import AffineGrid
|
|
39
|
-
from mindspore.ops.operations.array_ops import Im2Col
|
|
40
|
-
from mindspore.ops.operations.array_ops import Col2Im
|
|
41
39
|
from mindspore.ops.operations.array_ops import MaskedScatter
|
|
42
40
|
from mindspore.ops.operations.array_ops import MaskedSelect
|
|
43
41
|
from mindspore.ops.operations.array_ops import CountNonZero
|
|
@@ -360,35 +358,6 @@ def get_bprop_resize_nearest_neighbor_v2(self):
|
|
|
360
358
|
return bprop
|
|
361
359
|
|
|
362
360
|
|
|
363
|
-
@bprop_getters.register(Im2Col)
|
|
364
|
-
def get_bprop_im2col(self):
|
|
365
|
-
"""
|
|
366
|
-
Generate bprop for Im2Col
|
|
367
|
-
|
|
368
|
-
Im2Col, corresponding to torch's UnFold operator.
|
|
369
|
-
The Unfold operator has no `padding_mode` attribute,
|
|
370
|
-
and it's implementation corresponds to the mindspore
|
|
371
|
-
implementation with `padding_mode=CALCULATED` .
|
|
372
|
-
So, currently the bprop function of Im2Col only supports
|
|
373
|
-
the CALCULATED mode.
|
|
374
|
-
"""
|
|
375
|
-
kernel_size = self.ksizes
|
|
376
|
-
dilation = self.dilations
|
|
377
|
-
stride = self.strides
|
|
378
|
-
padding = (self.pads[0], self.pads[-1])
|
|
379
|
-
col2im = Col2Im(kernel_size=kernel_size,
|
|
380
|
-
dilation=dilation,
|
|
381
|
-
stride=stride,
|
|
382
|
-
padding=padding)
|
|
383
|
-
|
|
384
|
-
def bprop(x, out, dout):
|
|
385
|
-
x_shape = P.TensorShape()(x)[2:]
|
|
386
|
-
dx = col2im(dout, x_shape)
|
|
387
|
-
return (dx,)
|
|
388
|
-
|
|
389
|
-
return bprop
|
|
390
|
-
|
|
391
|
-
|
|
392
361
|
@bprop_getters.register(P.ExtractVolumePatches)
|
|
393
362
|
def get_bprop_extract_volume_patches(self):
|
|
394
363
|
"""Generate bprop for ExtractVolumePatches"""
|
|
@@ -92,7 +92,8 @@ def get_bprop_send(self):
|
|
|
92
92
|
"""Generate bprop for Send."""
|
|
93
93
|
shape = self.get_attr_dict()["shape"]
|
|
94
94
|
dtype = self.get_attr_dict()["dtype"]
|
|
95
|
-
|
|
95
|
+
tag = self.get_attr_dict()["sr_tag"]
|
|
96
|
+
send_grad = Receive(tag, self.rank, shape, dtype, self.group_back)
|
|
96
97
|
virtual_input = Tensor(0.0, dtype)
|
|
97
98
|
|
|
98
99
|
def bprop(x, out, dout):
|
|
@@ -105,7 +106,8 @@ def get_bprop_send(self):
|
|
|
105
106
|
@bprop_getters.register(Receive)
|
|
106
107
|
def get_bprop_receive(self):
|
|
107
108
|
"""Generate bprop for Receive."""
|
|
108
|
-
|
|
109
|
+
tag = self.get_attr_dict()["sr_tag"]
|
|
110
|
+
receive_grad = Send(tag, self.rank, self.group_back)
|
|
109
111
|
depend = P.Depend()
|
|
110
112
|
cast = P.Cast()
|
|
111
113
|
out_tensor = Tensor(0.0, mstype.float16)
|
|
@@ -36,6 +36,14 @@ def get_bprop_parallel_resize_bilinear(self):
|
|
|
36
36
|
return bprop
|
|
37
37
|
|
|
38
38
|
|
|
39
|
+
@bprop_getters.register(P.inner_ops.GenerateEodMask)
|
|
40
|
+
def get_bprop_generate_eod_mask(self):
|
|
41
|
+
|
|
42
|
+
def bprop(x, out, dout):
|
|
43
|
+
return dout, dout
|
|
44
|
+
return bprop
|
|
45
|
+
|
|
46
|
+
|
|
39
47
|
@bprop_getters.register(inner.PsROIPooling)
|
|
40
48
|
def get_bprop_ps_roi_pooling(self):
|
|
41
49
|
"""Grad definition for `PsROIPooling` operation."""
|