mindspore 2.2.11__cp38-none-any.whl → 2.2.14__cp38-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +2 -1
- mindspore/_akg/akg/topi/cpp/impl.py +1 -1
- mindspore/_akg/akg/tvm/_ffi/base.py +1 -1
- mindspore/_c_dataengine.cpython-38-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-38-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-38-aarch64-linux-gnu.so +0 -0
- mindspore/_mindspore_offline_debug.cpython-38-aarch64-linux-gnu.so +0 -0
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/common/tensor.py +0 -2
- mindspore/communication/management.py +3 -0
- mindspore/context.py +34 -4
- mindspore/dataset/engine/datasets.py +23 -0
- mindspore/dataset/engine/validators.py +1 -1
- mindspore/dataset/vision/py_transforms_util.py +2 -2
- mindspore/experimental/optim/lr_scheduler.py +5 -6
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc++.so.1 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
- mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +48 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +49 -57
- mindspore/mindrecord/tools/cifar10_to_mr.py +46 -55
- mindspore/mindrecord/tools/csv_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +4 -9
- mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -4
- mindspore/nn/layer/activation.py +1 -1
- mindspore/nn/layer/embedding.py +2 -2
- mindspore/nn/loss/loss.py +1 -1
- mindspore/nn/optim/ada_grad.py +2 -2
- mindspore/nn/optim/sgd.py +3 -2
- mindspore/numpy/math_ops.py +1 -1
- mindspore/ops/__init__.py +3 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +0 -31
- mindspore/ops/_grad_experimental/grad_comm_ops.py +4 -2
- mindspore/ops/_grad_experimental/grad_inner_ops.py +8 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +37 -17
- mindspore/ops/_op_impl/aicpu/__init__.py +1 -0
- mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +38 -0
- mindspore/ops/function/array_func.py +6 -5
- mindspore/ops/function/debug_func.py +1 -1
- mindspore/ops/function/linalg_func.py +21 -11
- mindspore/ops/function/math_func.py +3 -0
- mindspore/ops/function/nn_func.py +13 -11
- mindspore/ops/function/parameter_func.py +2 -0
- mindspore/ops/function/sparse_unary_func.py +2 -2
- mindspore/ops/function/vmap_func.py +1 -0
- mindspore/ops/operations/_embedding_cache_ops.py +1 -1
- mindspore/ops/operations/_inner_ops.py +56 -1
- mindspore/ops/operations/_quant_ops.py +4 -4
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +15 -4
- mindspore/ops/operations/custom_ops.py +1 -1
- mindspore/ops/operations/debug_ops.py +1 -1
- mindspore/ops/operations/image_ops.py +3 -3
- mindspore/ops/operations/inner_ops.py +49 -0
- mindspore/ops/operations/math_ops.py +62 -0
- mindspore/ops/operations/nn_ops.py +7 -3
- mindspore/ops/operations/random_ops.py +2 -0
- mindspore/ops/operations/sparse_ops.py +4 -4
- mindspore/ops/silent_check.py +162 -0
- mindspore/parallel/__init__.py +3 -2
- mindspore/parallel/_auto_parallel_context.py +82 -3
- mindspore/parallel/_parallel_serialization.py +34 -2
- mindspore/parallel/_tensor.py +3 -1
- mindspore/parallel/_transformer/transformer.py +8 -8
- mindspore/parallel/checkpoint_transform.py +191 -45
- mindspore/profiler/parser/ascend_cluster_generator.py +111 -0
- mindspore/profiler/parser/ascend_communicate_generator.py +315 -0
- mindspore/profiler/parser/ascend_flops_generator.py +8 -2
- mindspore/profiler/parser/ascend_fpbp_generator.py +8 -2
- mindspore/profiler/parser/ascend_hccl_generator.py +2 -2
- mindspore/profiler/parser/ascend_msprof_exporter.py +30 -6
- mindspore/profiler/parser/ascend_msprof_generator.py +16 -5
- mindspore/profiler/parser/ascend_op_generator.py +15 -7
- mindspore/profiler/parser/ascend_timeline_generator.py +5 -2
- mindspore/profiler/parser/base_timeline_generator.py +11 -3
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +2 -1
- mindspore/profiler/parser/framework_parser.py +8 -2
- mindspore/profiler/parser/memory_usage_parser.py +8 -2
- mindspore/profiler/parser/minddata_analyzer.py +8 -2
- mindspore/profiler/parser/minddata_parser.py +1 -1
- mindspore/profiler/parser/msadvisor_analyzer.py +4 -2
- mindspore/profiler/parser/msadvisor_parser.py +9 -3
- mindspore/profiler/profiling.py +97 -25
- mindspore/rewrite/api/node.py +1 -1
- mindspore/rewrite/api/symbol_tree.py +2 -2
- mindspore/train/callback/_checkpoint.py +8 -8
- mindspore/train/callback/_landscape.py +2 -3
- mindspore/train/callback/_summary_collector.py +6 -7
- mindspore/train/dataset_helper.py +6 -0
- mindspore/train/model.py +17 -5
- mindspore/train/serialization.py +6 -1
- mindspore/train/summary/_writer_pool.py +1 -1
- mindspore/train/summary/summary_record.py +5 -6
- mindspore/version.py +1 -1
- {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/METADATA +1 -1
- {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/RECORD +120 -117
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/WHEEL +0 -0
- {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/entry_points.txt +0 -0
- {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/top_level.txt +0 -0
|
@@ -18,7 +18,6 @@ Cifar10 convert tool for MindRecord.
|
|
|
18
18
|
|
|
19
19
|
from importlib import import_module
|
|
20
20
|
import os
|
|
21
|
-
import numpy as np
|
|
22
21
|
|
|
23
22
|
from mindspore import log as logger
|
|
24
23
|
from .cifar10 import Cifar10
|
|
@@ -26,10 +25,6 @@ from ..common.exceptions import PathNotExistsError
|
|
|
26
25
|
from ..filewriter import FileWriter
|
|
27
26
|
from ..shardutils import check_filename, ExceptionThread, SUCCESS, FAILED
|
|
28
27
|
|
|
29
|
-
try:
|
|
30
|
-
cv_import = import_module("cv2")
|
|
31
|
-
except ModuleNotFoundError:
|
|
32
|
-
cv_import = None
|
|
33
28
|
|
|
34
29
|
__all__ = ['Cifar10ToMR']
|
|
35
30
|
|
|
@@ -57,6 +52,8 @@ class Cifar10ToMR:
|
|
|
57
52
|
"""
|
|
58
53
|
|
|
59
54
|
def __init__(self, source, destination):
|
|
55
|
+
self.cv_import = import_module("cv2")
|
|
56
|
+
|
|
60
57
|
check_filename(source)
|
|
61
58
|
self.source = source
|
|
62
59
|
|
|
@@ -96,12 +93,12 @@ class Cifar10ToMR:
|
|
|
96
93
|
test_labels = cifar10_data.Test.labels
|
|
97
94
|
logger.info("test images label: {}".format(test_labels.shape))
|
|
98
95
|
|
|
99
|
-
data_list = _construct_raw_data(images, labels)
|
|
100
|
-
test_data_list = _construct_raw_data(test_images, test_labels)
|
|
96
|
+
data_list = self._construct_raw_data(images, labels)
|
|
97
|
+
test_data_list = self._construct_raw_data(test_images, test_labels)
|
|
101
98
|
|
|
102
|
-
if _generate_mindrecord(self.destination, data_list, fields, "img_train") != SUCCESS:
|
|
99
|
+
if self._generate_mindrecord(self.destination, data_list, fields, "img_train") != SUCCESS:
|
|
103
100
|
return FAILED
|
|
104
|
-
if _generate_mindrecord(self.destination + "_test", test_data_list, fields, "img_test") != SUCCESS:
|
|
101
|
+
if self._generate_mindrecord(self.destination + "_test", test_data_list, fields, "img_test") != SUCCESS:
|
|
105
102
|
return FAILED
|
|
106
103
|
return SUCCESS
|
|
107
104
|
|
|
@@ -137,56 +134,50 @@ class Cifar10ToMR:
|
|
|
137
134
|
raise t.exception
|
|
138
135
|
return t.res
|
|
139
136
|
|
|
137
|
+
def _construct_raw_data(self, images, labels):
|
|
138
|
+
"""
|
|
139
|
+
Construct raw data from cifar10 data.
|
|
140
140
|
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
Args:
|
|
146
|
-
images (list): image list from cifar10.
|
|
147
|
-
labels (list): label list from cifar10.
|
|
148
|
-
|
|
149
|
-
Returns:
|
|
150
|
-
list[dict], data dictionary constructed from cifar10.
|
|
151
|
-
"""
|
|
152
|
-
|
|
153
|
-
if not cv_import:
|
|
154
|
-
raise ModuleNotFoundError("opencv-python module not found, please use pip install it.")
|
|
155
|
-
|
|
156
|
-
raw_data = []
|
|
157
|
-
for i, img in enumerate(images):
|
|
158
|
-
label = np.int(labels[i][0])
|
|
159
|
-
_, img = cv_import.imencode(".jpeg", img[..., [2, 1, 0]])
|
|
160
|
-
row_data = {"id": int(i),
|
|
161
|
-
"data": img.tobytes(),
|
|
162
|
-
"label": int(label)}
|
|
163
|
-
raw_data.append(row_data)
|
|
164
|
-
return raw_data
|
|
141
|
+
Args:
|
|
142
|
+
images (list): image list from cifar10.
|
|
143
|
+
labels (list): label list from cifar10.
|
|
165
144
|
|
|
145
|
+
Returns:
|
|
146
|
+
list[dict], data dictionary constructed from cifar10.
|
|
147
|
+
"""
|
|
148
|
+
raw_data = []
|
|
149
|
+
for i, img in enumerate(images):
|
|
150
|
+
label = labels[i][0]
|
|
151
|
+
_, img = self.cv_import.imencode(".jpeg", img[..., [2, 1, 0]])
|
|
152
|
+
row_data = {"id": int(i),
|
|
153
|
+
"data": img.tobytes(),
|
|
154
|
+
"label": int(label)}
|
|
155
|
+
raw_data.append(row_data)
|
|
156
|
+
return raw_data
|
|
157
|
+
|
|
158
|
+
def _generate_mindrecord(self, file_name, raw_data, fields, schema_desc):
|
|
159
|
+
"""
|
|
160
|
+
Generate MindRecord file from raw data.
|
|
166
161
|
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
162
|
+
Args:
|
|
163
|
+
file_name (str): File name of MindRecord File.
|
|
164
|
+
fields (list[str]): Fields would be set as index which
|
|
165
|
+
could not belong to blob fields and type could not be 'array' or 'bytes'.
|
|
166
|
+
raw_data (dict): dict of raw data.
|
|
167
|
+
schema_desc (str): String of schema description.
|
|
170
168
|
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
could not belong to blob fields and type could not be 'array' or 'bytes'.
|
|
175
|
-
raw_data (dict): dict of raw data.
|
|
176
|
-
schema_desc (str): String of schema description.
|
|
177
|
-
|
|
178
|
-
Returns:
|
|
179
|
-
MSRStatus, SUCCESS or FAILED.
|
|
180
|
-
"""
|
|
169
|
+
Returns:
|
|
170
|
+
MSRStatus, SUCCESS or FAILED.
|
|
171
|
+
"""
|
|
181
172
|
|
|
182
|
-
|
|
183
|
-
|
|
173
|
+
schema = {"id": {"type": "int64"}, "label": {"type": "int64"},
|
|
174
|
+
"data": {"type": "bytes"}}
|
|
184
175
|
|
|
185
|
-
|
|
176
|
+
logger.info("transformed MindRecord schema is: {}".format(schema))
|
|
186
177
|
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
178
|
+
writer = FileWriter(file_name, 1)
|
|
179
|
+
writer.add_schema(schema, schema_desc)
|
|
180
|
+
if fields and isinstance(fields, list):
|
|
181
|
+
writer.add_index(fields)
|
|
182
|
+
writer.write_raw_data(raw_data)
|
|
183
|
+
return writer.commit()
|
|
@@ -22,10 +22,6 @@ from mindspore import log as logger
|
|
|
22
22
|
from ..filewriter import FileWriter
|
|
23
23
|
from ..shardutils import check_filename, ExceptionThread
|
|
24
24
|
|
|
25
|
-
try:
|
|
26
|
-
pd = import_module("pandas")
|
|
27
|
-
except ModuleNotFoundError:
|
|
28
|
-
pd = None
|
|
29
25
|
|
|
30
26
|
__all__ = ['CsvToMR']
|
|
31
27
|
|
|
@@ -55,8 +51,7 @@ class CsvToMR:
|
|
|
55
51
|
"""
|
|
56
52
|
|
|
57
53
|
def __init__(self, source, destination, columns_list=None, partition_number=1):
|
|
58
|
-
|
|
59
|
-
raise Exception("Module pandas is not found, please use pip install it.")
|
|
54
|
+
self.pd = import_module("pandas")
|
|
60
55
|
if isinstance(source, str):
|
|
61
56
|
check_filename(source)
|
|
62
57
|
self.source = source
|
|
@@ -135,8 +130,8 @@ class CsvToMR:
|
|
|
135
130
|
if not os.path.exists(self.source):
|
|
136
131
|
raise IOError("Csv file {} do not exist.".format(self.source))
|
|
137
132
|
|
|
138
|
-
pd.set_option('display.max_columns', None)
|
|
139
|
-
df = pd.read_csv(self.source)
|
|
133
|
+
self.pd.set_option('display.max_columns', None)
|
|
134
|
+
df = self.pd.read_csv(self.source)
|
|
140
135
|
|
|
141
136
|
csv_schema = self._get_schema(df)
|
|
142
137
|
|
|
@@ -25,10 +25,6 @@ from mindspore import log as logger
|
|
|
25
25
|
from ..filewriter import FileWriter
|
|
26
26
|
from ..shardutils import check_filename, ExceptionThread, SUCCESS, FAILED
|
|
27
27
|
|
|
28
|
-
try:
|
|
29
|
-
cv_import = import_module("cv2")
|
|
30
|
-
except ModuleNotFoundError:
|
|
31
|
-
cv_import = None
|
|
32
28
|
|
|
33
29
|
__all__ = ['MnistToMR']
|
|
34
30
|
|
|
@@ -58,6 +54,8 @@ class MnistToMR:
|
|
|
58
54
|
"""
|
|
59
55
|
|
|
60
56
|
def __init__(self, source, destination, partition_number=1):
|
|
57
|
+
self.cv_import = import_module("cv2")
|
|
58
|
+
|
|
61
59
|
self.image_size = 28
|
|
62
60
|
self.num_channels = 1
|
|
63
61
|
|
|
@@ -89,9 +87,6 @@ class MnistToMR:
|
|
|
89
87
|
|
|
90
88
|
# pylint: disable=missing-docstring
|
|
91
89
|
def run(self):
|
|
92
|
-
if not cv_import:
|
|
93
|
-
raise ModuleNotFoundError("opencv-python module not found, please use pip install it.")
|
|
94
|
-
|
|
95
90
|
if self._transform_train() == FAILED:
|
|
96
91
|
return FAILED
|
|
97
92
|
if self._transform_test() == FAILED:
|
|
@@ -155,7 +150,7 @@ class MnistToMR:
|
|
|
155
150
|
train_data = self._extract_images(self.train_data_filename_)
|
|
156
151
|
train_labels = self._extract_labels(self.train_labels_filename_)
|
|
157
152
|
for data, label in zip(train_data, train_labels):
|
|
158
|
-
_, img = cv_import.imencode(".jpeg", data)
|
|
153
|
+
_, img = self.cv_import.imencode(".jpeg", data)
|
|
159
154
|
yield {"label": int(label), "data": img.tobytes()}
|
|
160
155
|
|
|
161
156
|
def _mnist_test_iterator(self):
|
|
@@ -168,7 +163,7 @@ class MnistToMR:
|
|
|
168
163
|
test_data = self._extract_images(self.test_data_filename_)
|
|
169
164
|
test_labels = self._extract_labels(self.test_labels_filename_)
|
|
170
165
|
for data, label in zip(test_data, test_labels):
|
|
171
|
-
_, img = cv_import.imencode(".jpeg", data)
|
|
166
|
+
_, img = self.cv_import.imencode(".jpeg", data)
|
|
172
167
|
yield {"label": int(label), "data": img.tobytes()}
|
|
173
168
|
|
|
174
169
|
def _transform_train(self):
|
|
@@ -97,10 +97,7 @@ class TFRecordToMR:
|
|
|
97
97
|
"""
|
|
98
98
|
|
|
99
99
|
def __init__(self, source, destination, feature_dict, bytes_fields=None):
|
|
100
|
-
|
|
101
|
-
self.tf = import_module("tensorflow") # just used to convert tfrecord to mindrecord
|
|
102
|
-
except ModuleNotFoundError:
|
|
103
|
-
raise Exception("Module tensorflow is not found, please use pip install it.")
|
|
100
|
+
self.tf = import_module("tensorflow") # just used to convert tfrecord to mindrecord
|
|
104
101
|
|
|
105
102
|
if self.tf.__version__ < SupportedTensorFlowVersion:
|
|
106
103
|
raise Exception("Module tensorflow version must be greater or equal {}.".format(SupportedTensorFlowVersion))
|
mindspore/nn/layer/activation.py
CHANGED
|
@@ -782,7 +782,7 @@ class Tanhshrink(Cell):
|
|
|
782
782
|
``Ascend`` ``GPU`` ``CPU``
|
|
783
783
|
|
|
784
784
|
Examples:
|
|
785
|
-
>>> import mindspore
|
|
785
|
+
>>> import mindspore as ms
|
|
786
786
|
>>> from mindspore import Tensor, nn
|
|
787
787
|
>>> import numpy as np
|
|
788
788
|
>>> x = Tensor(np.array([1, 2, 3, 2, 1]), ms.float16)
|
mindspore/nn/layer/embedding.py
CHANGED
|
@@ -522,12 +522,12 @@ class MultiFieldEmbeddingLookup(EmbeddingLookup):
|
|
|
522
522
|
this interface. Type is Int32, Int64.
|
|
523
523
|
- **input_values** (Tensor) - The shape of tensor is :math:`(batch\_size, seq\_length)`.
|
|
524
524
|
Specifies the weights of elements of the input_indices. The lookout vector will multiply with
|
|
525
|
-
the input_values. Type is
|
|
525
|
+
the input_values. Type is float32.
|
|
526
526
|
- **field_ids** (Tensor) - The shape of tensor is :math:`(batch\_size, seq\_length)`.
|
|
527
527
|
Specifies the field id of elements of the input_indices. Type is Int32.
|
|
528
528
|
|
|
529
529
|
Outputs:
|
|
530
|
-
Tensor, the shape of tensor is :math:`(batch\_size, field\_size, embedding\_size)`. Type is
|
|
530
|
+
Tensor, the shape of tensor is :math:`(batch\_size, field\_size, embedding\_size)`. Type is float32.
|
|
531
531
|
|
|
532
532
|
Raises:
|
|
533
533
|
TypeError: If `vocab_size` or `embedding_size` or `field_size` is not an int.
|
mindspore/nn/loss/loss.py
CHANGED
|
@@ -1996,7 +1996,7 @@ class FocalLoss(LossBase):
|
|
|
1996
1996
|
>>> import mindspore.nn as nn
|
|
1997
1997
|
>>> logits = ms.Tensor([[0.8, 1.4], [0.5, 0.9], [1.2, 0.9]], ms.float32)
|
|
1998
1998
|
>>> labels = ms.Tensor([[1], [1], [0]], ms.int32)
|
|
1999
|
-
>>> focalloss = nn.FocalLoss(weight=Tensor([1, 2]), gamma=2.0, reduction='mean')
|
|
1999
|
+
>>> focalloss = nn.FocalLoss(weight=ms.Tensor([1, 2]), gamma=2.0, reduction='mean')
|
|
2000
2000
|
>>> output = focalloss(logits, labels)
|
|
2001
2001
|
>>> print(output)
|
|
2002
2002
|
0.12516622
|
mindspore/nn/optim/ada_grad.py
CHANGED
|
@@ -162,7 +162,7 @@ class Adagrad(Optimizer):
|
|
|
162
162
|
``Ascend`` ``GPU`` ``CPU``
|
|
163
163
|
|
|
164
164
|
Examples:
|
|
165
|
-
>>> import
|
|
165
|
+
>>> from mindspore import train
|
|
166
166
|
>>> import mindspore.nn as nn
|
|
167
167
|
>>>
|
|
168
168
|
>>> # Define the network structure of LeNet5. Refer to
|
|
@@ -185,7 +185,7 @@ class Adagrad(Optimizer):
|
|
|
185
185
|
>>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'.
|
|
186
186
|
>>>
|
|
187
187
|
>>> loss = nn.SoftmaxCrossEntropyWithLogits()
|
|
188
|
-
>>> model =
|
|
188
|
+
>>> model = train.Model(net, loss_fn=loss, optimizer=optim)
|
|
189
189
|
"""
|
|
190
190
|
|
|
191
191
|
@opt_init_args_register
|
mindspore/nn/optim/sgd.py
CHANGED
|
@@ -193,9 +193,9 @@ class SGD(Optimizer):
|
|
|
193
193
|
"or 'weight_decay' set in grouped 'params' must be float or int type.")
|
|
194
194
|
|
|
195
195
|
if hasattr(self, "group_weight_decay") and self.group_weight_decay:
|
|
196
|
-
self.opt = tuple(P.SGD(dampening,
|
|
196
|
+
self.opt = tuple(P.SGD(dampening, 0.0, nesterov) for _ in self.group_weight_decay)
|
|
197
197
|
else:
|
|
198
|
-
self.opt = tuple([P.SGD(dampening,
|
|
198
|
+
self.opt = tuple([P.SGD(dampening, 0.0, nesterov)] * len(self._parameters))
|
|
199
199
|
|
|
200
200
|
self.momentum = Parameter(Tensor(momentum, mstype.float32), name="momentum")
|
|
201
201
|
|
|
@@ -220,6 +220,7 @@ class SGD(Optimizer):
|
|
|
220
220
|
params = self._parameters
|
|
221
221
|
accum = self.accum
|
|
222
222
|
stat = self.stat
|
|
223
|
+
gradients = self.decay_weight(gradients)
|
|
223
224
|
gradients = self.flatten_gradients(gradients)
|
|
224
225
|
gradients = self.gradients_centralization(gradients)
|
|
225
226
|
gradients = self.scale_grad(gradients)
|
mindspore/numpy/math_ops.py
CHANGED
|
@@ -4285,7 +4285,7 @@ def argmin(a, axis=None):
|
|
|
4285
4285
|
|
|
4286
4286
|
Examples:
|
|
4287
4287
|
>>> import mindspore.numpy as np
|
|
4288
|
-
>>> a = np.arange(10, 16).reshape(2, 3)
|
|
4288
|
+
>>> a = np.arange(10, 16).reshape(2, 3).astype(np.float32)
|
|
4289
4289
|
>>> print(np.argmin(a))
|
|
4290
4290
|
0
|
|
4291
4291
|
>>> print(np.argmin(a, axis=0))
|
mindspore/ops/__init__.py
CHANGED
|
@@ -34,6 +34,7 @@ from mindspore.ops.composite import *
|
|
|
34
34
|
from mindspore.ops.operations import *
|
|
35
35
|
from mindspore.ops.function import *
|
|
36
36
|
from mindspore.ops.functional import *
|
|
37
|
+
from mindspore.ops.silent_check import _silent_check
|
|
37
38
|
|
|
38
39
|
__primitive__ = [
|
|
39
40
|
"prim_attr_register", "Primitive", "PrimitiveWithInfer", "PrimitiveWithCheck", "signature"
|
|
@@ -48,3 +49,5 @@ __all__.extend(composite.__all__)
|
|
|
48
49
|
__all__.extend(operations.__all__)
|
|
49
50
|
__all__.extend(functional.__all__)
|
|
50
51
|
__all__.extend(function.__all__)
|
|
52
|
+
|
|
53
|
+
_silent_check()
|
|
@@ -36,8 +36,6 @@ from mindspore.ops.operations.array_ops import ScatterAddWithAxis
|
|
|
36
36
|
from mindspore.ops.operations.array_ops import Expand
|
|
37
37
|
from mindspore.ops.operations.array_ops import SegmentMean
|
|
38
38
|
from mindspore.ops.operations.array_ops import AffineGrid
|
|
39
|
-
from mindspore.ops.operations.array_ops import Im2Col
|
|
40
|
-
from mindspore.ops.operations.array_ops import Col2Im
|
|
41
39
|
from mindspore.ops.operations.array_ops import MaskedScatter
|
|
42
40
|
from mindspore.ops.operations.array_ops import MaskedSelect
|
|
43
41
|
from mindspore.ops.operations.array_ops import CountNonZero
|
|
@@ -360,35 +358,6 @@ def get_bprop_resize_nearest_neighbor_v2(self):
|
|
|
360
358
|
return bprop
|
|
361
359
|
|
|
362
360
|
|
|
363
|
-
@bprop_getters.register(Im2Col)
|
|
364
|
-
def get_bprop_im2col(self):
|
|
365
|
-
"""
|
|
366
|
-
Generate bprop for Im2Col
|
|
367
|
-
|
|
368
|
-
Im2Col, corresponding to torch's UnFold operator.
|
|
369
|
-
The Unfold operator has no `padding_mode` attribute,
|
|
370
|
-
and it's implementation corresponds to the mindspore
|
|
371
|
-
implementation with `padding_mode=CALCULATED` .
|
|
372
|
-
So, currently the bprop function of Im2Col only supports
|
|
373
|
-
the CALCULATED mode.
|
|
374
|
-
"""
|
|
375
|
-
kernel_size = self.ksizes
|
|
376
|
-
dilation = self.dilations
|
|
377
|
-
stride = self.strides
|
|
378
|
-
padding = (self.pads[0], self.pads[-1])
|
|
379
|
-
col2im = Col2Im(kernel_size=kernel_size,
|
|
380
|
-
dilation=dilation,
|
|
381
|
-
stride=stride,
|
|
382
|
-
padding=padding)
|
|
383
|
-
|
|
384
|
-
def bprop(x, out, dout):
|
|
385
|
-
x_shape = P.TensorShape()(x)[2:]
|
|
386
|
-
dx = col2im(dout, x_shape)
|
|
387
|
-
return (dx,)
|
|
388
|
-
|
|
389
|
-
return bprop
|
|
390
|
-
|
|
391
|
-
|
|
392
361
|
@bprop_getters.register(P.ExtractVolumePatches)
|
|
393
362
|
def get_bprop_extract_volume_patches(self):
|
|
394
363
|
"""Generate bprop for ExtractVolumePatches"""
|
|
@@ -92,7 +92,8 @@ def get_bprop_send(self):
|
|
|
92
92
|
"""Generate bprop for Send."""
|
|
93
93
|
shape = self.get_attr_dict()["shape"]
|
|
94
94
|
dtype = self.get_attr_dict()["dtype"]
|
|
95
|
-
|
|
95
|
+
tag = self.get_attr_dict()["sr_tag"]
|
|
96
|
+
send_grad = Receive(tag, self.rank, shape, dtype, self.group_back)
|
|
96
97
|
virtual_input = Tensor(0.0, dtype)
|
|
97
98
|
|
|
98
99
|
def bprop(x, out, dout):
|
|
@@ -105,7 +106,8 @@ def get_bprop_send(self):
|
|
|
105
106
|
@bprop_getters.register(Receive)
|
|
106
107
|
def get_bprop_receive(self):
|
|
107
108
|
"""Generate bprop for Receive."""
|
|
108
|
-
|
|
109
|
+
tag = self.get_attr_dict()["sr_tag"]
|
|
110
|
+
receive_grad = Send(tag, self.rank, self.group_back)
|
|
109
111
|
depend = P.Depend()
|
|
110
112
|
cast = P.Cast()
|
|
111
113
|
out_tensor = Tensor(0.0, mstype.float16)
|
|
@@ -36,6 +36,14 @@ def get_bprop_parallel_resize_bilinear(self):
|
|
|
36
36
|
return bprop
|
|
37
37
|
|
|
38
38
|
|
|
39
|
+
@bprop_getters.register(P.inner_ops.GenerateEodMask)
|
|
40
|
+
def get_bprop_generate_eod_mask(self):
|
|
41
|
+
|
|
42
|
+
def bprop(x, out, dout):
|
|
43
|
+
return dout, dout
|
|
44
|
+
return bprop
|
|
45
|
+
|
|
46
|
+
|
|
39
47
|
@bprop_getters.register(inner.PsROIPooling)
|
|
40
48
|
def get_bprop_ps_roi_pooling(self):
|
|
41
49
|
"""Grad definition for `PsROIPooling` operation."""
|
|
@@ -18,11 +18,13 @@
|
|
|
18
18
|
import numpy as np
|
|
19
19
|
import mindspore.numpy as mnp
|
|
20
20
|
from mindspore.common import dtype as mstype
|
|
21
|
+
import mindspore.ops as ops
|
|
21
22
|
from mindspore.ops import functional as F
|
|
22
23
|
from mindspore.ops import operations as P
|
|
23
24
|
from mindspore import Tensor
|
|
24
25
|
from mindspore.ops.operations.math_ops import Real, Imag, Complex, Angle
|
|
25
|
-
from mindspore.ops.operations.math_ops import Polar
|
|
26
|
+
from mindspore.ops.operations.math_ops import Polar, SilentCheck
|
|
27
|
+
from mindspore.ops.operations._inner_ops import _MirrorSilentCheck
|
|
26
28
|
from mindspore.ops.operations import _grad_ops as G
|
|
27
29
|
from mindspore.ops.operations.math_ops import Lgamma
|
|
28
30
|
from mindspore.ops.operations.math_ops import Digamma
|
|
@@ -763,6 +765,7 @@ def get_bprop_fft_with_size(self):
|
|
|
763
765
|
to_tensor_op = P.ScalarToTensor()
|
|
764
766
|
type_op = P.DType()
|
|
765
767
|
concat_op = P.Concat()
|
|
768
|
+
concat_op_last = P.Concat(axis=-1)
|
|
766
769
|
ones_op = P.Ones()
|
|
767
770
|
zeros_op = P.Zeros()
|
|
768
771
|
real_op = P.Real()
|
|
@@ -794,8 +797,7 @@ def get_bprop_fft_with_size(self):
|
|
|
794
797
|
signal_sizes=offset_shape[-1:])
|
|
795
798
|
irfft2d_ = FFTWithSize(signal_ndim=2, inverse=True, real=True, norm="backward", onesided=onesided,
|
|
796
799
|
signal_sizes=offset_shape[-2:])
|
|
797
|
-
irfft3d_ = FFTWithSize(signal_ndim=3, inverse=True, real=
|
|
798
|
-
signal_sizes=offset_shape[-3:])
|
|
800
|
+
irfft3d_ = FFTWithSize(signal_ndim=3, inverse=True, real=False, norm="backward", onesided=onesided)
|
|
799
801
|
if inverse is False:
|
|
800
802
|
if onesided is True:
|
|
801
803
|
terms = 0
|
|
@@ -811,6 +813,7 @@ def get_bprop_fft_with_size(self):
|
|
|
811
813
|
vec_mask = complex_op(1 - 2 * (mnp.arange(0, input_shape[-1], 1, input_type) % 2),
|
|
812
814
|
zeros_op(input_shape[-1], input_type))
|
|
813
815
|
terms = real_op(dout_first) + is_even * real_op(dout_last * vec_mask)
|
|
816
|
+
dx = to_tensor_op(0.5, input_type) * (dx * rfft_offset_size + terms) * rfft_norm_offset
|
|
814
817
|
elif signal_ndim == 2:
|
|
815
818
|
dx = irfft2d_(dout)
|
|
816
819
|
arange_inner = mnp.arange(0, input_shape[-2], 1, input_type)
|
|
@@ -852,26 +855,27 @@ def get_bprop_fft_with_size(self):
|
|
|
852
855
|
dout_shape, [input_shape[-1]])))
|
|
853
856
|
dout_last_term = dout_last_term * vec_mask
|
|
854
857
|
terms = real_op(dout_first_term) + is_even * real_op(dout_last_term)
|
|
858
|
+
dx = to_tensor_op(0.5, input_type) * (dx * rfft_offset_size + terms) * rfft_norm_offset
|
|
855
859
|
elif signal_ndim == 3:
|
|
856
|
-
|
|
857
|
-
|
|
860
|
+
zeros_shape = offset_shape[:-1] + (offset_shape[-1] - dout_shape[-1],)
|
|
861
|
+
zeros_values = zeros_op(zeros_shape, input_type)
|
|
862
|
+
zeros_padding = complex_op(zeros_values, zeros_values)
|
|
863
|
+
dout = concat_op_last((dout, zeros_padding))
|
|
864
|
+
dx = real_op(irfft3d_(dout)) * real_op(offset_size)
|
|
858
865
|
else:
|
|
859
866
|
dx = irfft_fn(dout) * real_op(offset_size)
|
|
860
867
|
else:
|
|
861
868
|
dx = rfft_fn(dout)
|
|
862
869
|
if onesided is True:
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
dx = dx * complex_op(irfft_offset_size, zeros_op(1, output_type))
|
|
873
|
-
else:
|
|
874
|
-
dx = dx * complex_op(offset_size, zeros_op(1, output_type))
|
|
870
|
+
is_odd = dout_shape[-1] % 2
|
|
871
|
+
last_shape = offset_shape[-1]
|
|
872
|
+
mask = concat_op((ones_op(1, output_type), 2.0 * ones_op(
|
|
873
|
+
(last_shape - 2 + is_odd,), output_type), ones_op((1 - is_odd,), output_type)))
|
|
874
|
+
dx = dx * complex_op(mask, zeros_op(shape_op(mask), output_type))
|
|
875
|
+
irfft_offset_size = to_tensor_op(
|
|
876
|
+
_fft_with_size_back_norm(shape_op(dout), norm, inverse, signal_ndim),
|
|
877
|
+
output_type)
|
|
878
|
+
dx = dx * complex_op(irfft_offset_size, zeros_op(1, output_type))
|
|
875
879
|
else:
|
|
876
880
|
dx = dx * complex_op(offset_size, zeros_op(1, output_type))
|
|
877
881
|
return (dx,)
|
|
@@ -1017,3 +1021,19 @@ def get_bprop_tensor_add(self):
|
|
|
1017
1021
|
return binop_grad_common(x, y, dout, dout)
|
|
1018
1022
|
|
|
1019
1023
|
return bprop
|
|
1024
|
+
|
|
1025
|
+
|
|
1026
|
+
@bprop_getters.register(_MirrorSilentCheck)
|
|
1027
|
+
def get_bprop_mirror_silent_check(self):
|
|
1028
|
+
"""Grad definition for '_MirrorSilentCheck' op"""
|
|
1029
|
+
silent_check = SilentCheck(self.min_steps, self.thresh_l1, self.coeff_l1, self.thresh_l2, self.coeff_l2)
|
|
1030
|
+
out_tensor = Tensor([0.0], mstype.float32)
|
|
1031
|
+
|
|
1032
|
+
def bporp(x, pre_val, min_val, max_val, n_step, loss_scale, out, dout):
|
|
1033
|
+
if loss_scale is not None:
|
|
1034
|
+
dout = dout / loss_scale
|
|
1035
|
+
grad = ops.norm(dout)
|
|
1036
|
+
dx, _, _, _, _ = silent_check(grad, dout, pre_val, min_val, max_val, n_step)
|
|
1037
|
+
return (dx, out_tensor, out_tensor, out_tensor, out_tensor, out_tensor)
|
|
1038
|
+
|
|
1039
|
+
return bporp
|
|
@@ -60,6 +60,7 @@ from .init_data_set_queue import _init_data_set_queue_aicpu
|
|
|
60
60
|
from .embedding_lookup import _embedding_lookup_aicpu
|
|
61
61
|
from .padding import _padding_aicpu
|
|
62
62
|
from .gather import _gather_aicpu
|
|
63
|
+
from .generate_eod_mask import _generate_eod_mask_aicpu
|
|
63
64
|
from .gather_grad import _gather_grad_aicpu
|
|
64
65
|
from .gather_d_grad_v2 import _gather_d_grad_v2_aicpu
|
|
65
66
|
from .gather_d import _gather_d_aicpu
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
# Copyright 2023 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
|
|
16
|
+
"""GenerateEodMask op"""
|
|
17
|
+
from mindspore.ops.op_info_register import op_info_register, AiCPURegOp, DataType
|
|
18
|
+
|
|
19
|
+
generate_eod_mask_op_info = AiCPURegOp("GenerateEodMask") \
|
|
20
|
+
.fusion_type("OPAQUE") \
|
|
21
|
+
.attr("eod_token_id", "int") \
|
|
22
|
+
.attr("n_pos", "int") \
|
|
23
|
+
.attr("n_step", "listint") \
|
|
24
|
+
.attr("n_error_mode", "str") \
|
|
25
|
+
.input(0, "inputs_ids", "required") \
|
|
26
|
+
.output(0, "position_ids", "required") \
|
|
27
|
+
.dtype_format(DataType.U16_Default, DataType.U16_Default) \
|
|
28
|
+
.dtype_format(DataType.U32_Default, DataType.U32_Default) \
|
|
29
|
+
.dtype_format(DataType.U64_Default, DataType.U64_Default) \
|
|
30
|
+
.dtype_format(DataType.I32_Default, DataType.I32_Default) \
|
|
31
|
+
.dtype_format(DataType.I64_Default, DataType.I64_Default) \
|
|
32
|
+
.get_op_info()
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@op_info_register(generate_eod_mask_op_info)
|
|
36
|
+
def _generate_eod_mask_aicpu():
|
|
37
|
+
"""GenerateEodMask AiCPU register"""
|
|
38
|
+
return
|
|
@@ -268,7 +268,7 @@ def cat(tensors, axis=0):
|
|
|
268
268
|
|
|
269
269
|
Returns:
|
|
270
270
|
Tensor, the shape is :math:`(x_1, x_2, ..., \sum_{i=1}^Nx_{mi}, ..., x_R)`.
|
|
271
|
-
|
|
271
|
+
The data type is the same with `tensors`.
|
|
272
272
|
|
|
273
273
|
Raises:
|
|
274
274
|
TypeError: If `axis` is not an int.
|
|
@@ -660,7 +660,7 @@ def one_hot(indices, depth, on_value=1, off_value=0, axis=-1):
|
|
|
660
660
|
|
|
661
661
|
Note:
|
|
662
662
|
If the input indices is rank `N`, the output will have rank `N+1`. The new axis is created at dimension `axis`.
|
|
663
|
-
On Ascend, if `on_value` is
|
|
663
|
+
On Ascend, if `on_value` is int64 dtype, `indices` must be int64 dtype.
|
|
664
664
|
|
|
665
665
|
Args:
|
|
666
666
|
indices(Tensor): A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
|
|
@@ -4228,6 +4228,7 @@ def space_to_batch_nd(input_x, block_size, paddings):
|
|
|
4228
4228
|
|
|
4229
4229
|
Examples:
|
|
4230
4230
|
>>> import numpy as np
|
|
4231
|
+
>>> import mindspore
|
|
4231
4232
|
>>> from mindspore import Tensor, ops
|
|
4232
4233
|
>>> block_size = [2, 2]
|
|
4233
4234
|
>>> paddings = [[0, 0], [0, 0]]
|
|
@@ -5395,8 +5396,8 @@ def masked_select(input, mask):
|
|
|
5395
5396
|
|
|
5396
5397
|
Examples:
|
|
5397
5398
|
>>> import numpy as np
|
|
5398
|
-
>>> import mindspore
|
|
5399
|
-
>>> from mindspore import Tensor
|
|
5399
|
+
>>> import mindspore
|
|
5400
|
+
>>> from mindspore import Tensor, ops
|
|
5400
5401
|
>>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
|
|
5401
5402
|
>>> mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)
|
|
5402
5403
|
>>> output = ops.masked_select(x, mask)
|
|
@@ -6520,7 +6521,7 @@ def topk(input, k, dim=None, largest=True, sorted=True):
|
|
|
6520
6521
|
|
|
6521
6522
|
Args:
|
|
6522
6523
|
input (Tensor): Input to be computed, data type must be float16, float32 or int32.
|
|
6523
|
-
k (int): The number of top or bottom elements to be computed along the last dimension
|
|
6524
|
+
k (int): The number of top or bottom elements to be computed along the last dimension.
|
|
6524
6525
|
dim (int, optional): The dimension to sort along. Default: ``None`` .
|
|
6525
6526
|
largest (bool, optional): If largest is ``False`` then the k smallest elements are returned.
|
|
6526
6527
|
Default: ``True`` .
|
|
@@ -51,7 +51,7 @@ def print_(*input_x):
|
|
|
51
51
|
|
|
52
52
|
Examples:
|
|
53
53
|
>>> import numpy as np
|
|
54
|
-
>>> from mindspore import Tensor
|
|
54
|
+
>>> from mindspore import Tensor, ops
|
|
55
55
|
>>> x = Tensor(np.ones([2, 1]).astype(np.int32))
|
|
56
56
|
>>> y = Tensor(np.ones([2, 2]).astype(np.int32))
|
|
57
57
|
>>> result = ops.print_('Print Tensor x and Tensor y:', x, y)
|