mindspore 2.2.11__cp39-cp39-macosx_11_0_arm64.whl → 2.2.14__cp39-cp39-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +2 -1
- mindspore/_c_dataengine.cpython-39-darwin.so +0 -0
- mindspore/_c_expression.cpython-39-darwin.so +0 -0
- mindspore/_c_mindrecord.cpython-39-darwin.so +0 -0
- mindspore/_mindspore_offline_debug.cpython-39-darwin.so +0 -0
- mindspore/common/tensor.py +0 -2
- mindspore/communication/management.py +3 -0
- mindspore/context.py +34 -4
- mindspore/dataset/engine/datasets.py +23 -0
- mindspore/dataset/engine/validators.py +1 -1
- mindspore/dataset/vision/py_transforms_util.py +2 -2
- mindspore/experimental/optim/lr_scheduler.py +5 -6
- mindspore/lib/libmindspore_backend.dylib +0 -0
- mindspore/lib/libmindspore_common.dylib +0 -0
- mindspore/lib/libmindspore_core.dylib +0 -0
- mindspore/lib/libmindspore_grpc.15.dylib +0 -0
- mindspore/lib/libmindspore_shared_lib.dylib +0 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +49 -57
- mindspore/mindrecord/tools/cifar10_to_mr.py +46 -55
- mindspore/mindrecord/tools/csv_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +4 -9
- mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -4
- mindspore/nn/layer/activation.py +1 -1
- mindspore/nn/layer/embedding.py +2 -2
- mindspore/nn/loss/loss.py +1 -1
- mindspore/nn/optim/ada_grad.py +2 -2
- mindspore/nn/optim/sgd.py +3 -2
- mindspore/numpy/math_ops.py +1 -1
- mindspore/ops/__init__.py +3 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +0 -31
- mindspore/ops/_grad_experimental/grad_comm_ops.py +4 -2
- mindspore/ops/_grad_experimental/grad_inner_ops.py +8 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +37 -17
- mindspore/ops/_op_impl/aicpu/__init__.py +1 -0
- mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +38 -0
- mindspore/ops/function/array_func.py +6 -5
- mindspore/ops/function/debug_func.py +1 -1
- mindspore/ops/function/linalg_func.py +21 -11
- mindspore/ops/function/math_func.py +3 -0
- mindspore/ops/function/nn_func.py +13 -11
- mindspore/ops/function/parameter_func.py +2 -0
- mindspore/ops/function/sparse_unary_func.py +2 -2
- mindspore/ops/function/vmap_func.py +1 -0
- mindspore/ops/operations/_embedding_cache_ops.py +1 -1
- mindspore/ops/operations/_inner_ops.py +56 -1
- mindspore/ops/operations/_quant_ops.py +4 -4
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +15 -4
- mindspore/ops/operations/custom_ops.py +1 -1
- mindspore/ops/operations/debug_ops.py +1 -1
- mindspore/ops/operations/image_ops.py +3 -3
- mindspore/ops/operations/inner_ops.py +49 -0
- mindspore/ops/operations/math_ops.py +62 -0
- mindspore/ops/operations/nn_ops.py +7 -3
- mindspore/ops/operations/random_ops.py +2 -0
- mindspore/ops/operations/sparse_ops.py +4 -4
- mindspore/ops/silent_check.py +162 -0
- mindspore/parallel/__init__.py +3 -2
- mindspore/parallel/_auto_parallel_context.py +82 -3
- mindspore/parallel/_parallel_serialization.py +34 -2
- mindspore/parallel/_tensor.py +3 -1
- mindspore/parallel/_transformer/transformer.py +8 -8
- mindspore/parallel/checkpoint_transform.py +191 -45
- mindspore/profiler/parser/ascend_cluster_generator.py +111 -0
- mindspore/profiler/parser/ascend_communicate_generator.py +315 -0
- mindspore/profiler/parser/ascend_flops_generator.py +8 -2
- mindspore/profiler/parser/ascend_fpbp_generator.py +8 -2
- mindspore/profiler/parser/ascend_hccl_generator.py +2 -2
- mindspore/profiler/parser/ascend_msprof_exporter.py +30 -6
- mindspore/profiler/parser/ascend_msprof_generator.py +16 -5
- mindspore/profiler/parser/ascend_op_generator.py +15 -7
- mindspore/profiler/parser/ascend_timeline_generator.py +5 -2
- mindspore/profiler/parser/base_timeline_generator.py +11 -3
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +2 -1
- mindspore/profiler/parser/framework_parser.py +8 -2
- mindspore/profiler/parser/memory_usage_parser.py +8 -2
- mindspore/profiler/parser/minddata_analyzer.py +8 -2
- mindspore/profiler/parser/minddata_parser.py +1 -1
- mindspore/profiler/parser/msadvisor_analyzer.py +4 -2
- mindspore/profiler/parser/msadvisor_parser.py +9 -3
- mindspore/profiler/profiling.py +97 -25
- mindspore/rewrite/api/node.py +1 -1
- mindspore/rewrite/api/symbol_tree.py +2 -2
- mindspore/train/callback/_checkpoint.py +8 -8
- mindspore/train/callback/_landscape.py +2 -3
- mindspore/train/callback/_summary_collector.py +6 -7
- mindspore/train/dataset_helper.py +6 -0
- mindspore/train/model.py +17 -5
- mindspore/train/serialization.py +6 -1
- mindspore/train/summary/_writer_pool.py +1 -1
- mindspore/train/summary/summary_record.py +5 -6
- mindspore/version.py +1 -1
- {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/METADATA +1 -1
- {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/RECORD +98 -94
- {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/WHEEL +0 -0
- {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/entry_points.txt +0 -0
- {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/top_level.txt +0 -0
mindspore/.commit_id
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__commit_id__ = ''[sha1]:
|
|
1
|
+
__commit_id__ = ''[sha1]:4f30cd5f,[branch]:(HEAD,origin/r2.2,r2.2)''
|
mindspore/__init__.py
CHANGED
|
@@ -29,7 +29,8 @@ from mindspore.context import GRAPH_MODE, PYNATIVE_MODE, set_context, get_contex
|
|
|
29
29
|
from mindspore.version import __version__
|
|
30
30
|
from mindspore.profiler import Profiler, EnvProfiler
|
|
31
31
|
from mindspore.parallel import set_algo_parameters, get_algo_parameters, reset_algo_parameters, \
|
|
32
|
-
rank_list_for_transform, transform_checkpoint_by_rank, transform_checkpoints, merge_pipeline_strategys, shard
|
|
32
|
+
rank_list_for_transform, transform_checkpoint_by_rank, transform_checkpoints, merge_pipeline_strategys, shard, \
|
|
33
|
+
load_segmented_checkpoints
|
|
33
34
|
from mindspore.rewrite import SymbolTree, ScopedValue, Node, NodeType, TreeNodeHelper
|
|
34
35
|
from mindspore.safeguard import obfuscate_ckpt, load_obf_params_into_net
|
|
35
36
|
from mindspore._check_jit_forbidden_api import get_obj_module_and_name_info, is_jit_forbidden_module, \
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
mindspore/common/tensor.py
CHANGED
|
@@ -959,8 +959,6 @@ class Tensor(Tensor_, metaclass=_TensorMeta):
|
|
|
959
959
|
[11. 2.]
|
|
960
960
|
"""
|
|
961
961
|
self._init_check()
|
|
962
|
-
if self.dtype == mstype.bfloat16:
|
|
963
|
-
raise TypeError(f"For asnumpy, the type of tensor cannot be BFloat16, but got {self.dtype}.")
|
|
964
962
|
return Tensor_.asnumpy(self)
|
|
965
963
|
|
|
966
964
|
def numpy(self):
|
|
@@ -74,6 +74,9 @@ def _check_parallel_envs():
|
|
|
74
74
|
"""
|
|
75
75
|
if not GlobalComm.CHECK_ENVS:
|
|
76
76
|
return
|
|
77
|
+
compile_level = os.getenv("MS_SIMULATION_LEVEL")
|
|
78
|
+
if compile_level:
|
|
79
|
+
return
|
|
77
80
|
rank_id_str = os.getenv("RANK_ID")
|
|
78
81
|
if not rank_id_str:
|
|
79
82
|
raise RuntimeError("Environment variables RANK_ID has not been exported, please export variables 'RANK_ID'.")
|
mindspore/context.py
CHANGED
|
@@ -284,7 +284,8 @@ class _Context:
|
|
|
284
284
|
'conv_allow_hf32': [True, False],
|
|
285
285
|
'exception_dump': ["0", "1", "2"],
|
|
286
286
|
'op_precision_mode': (str,),
|
|
287
|
-
'parallel_speed_up_json_path': (str, None)
|
|
287
|
+
'parallel_speed_up_json_path': (str, None),
|
|
288
|
+
'topo_order': (dict,)
|
|
288
289
|
}
|
|
289
290
|
ascend_cfg_setters = {
|
|
290
291
|
'precision_mode': self._get_ascend_config_setter('precision_mode'),
|
|
@@ -294,7 +295,8 @@ class _Context:
|
|
|
294
295
|
'conv_allow_hf32': self._get_ascend_config_setter('conv_allow_hf32', lambda v: "1" if v else "0"),
|
|
295
296
|
'exception_dump': self._get_ascend_config_setter('exception_dump'),
|
|
296
297
|
'op_precision_mode': self._set_op_precision_mode,
|
|
297
|
-
'parallel_speed_up_json_path': self._set_speedup_config_path
|
|
298
|
+
'parallel_speed_up_json_path': self._set_speedup_config_path,
|
|
299
|
+
'topo_order': self._set_topo_order
|
|
298
300
|
}
|
|
299
301
|
ascend_cfg_set = tuple(ascend_cfg_modes.keys())
|
|
300
302
|
for ascend_key, ascend_value in ascend_config.items():
|
|
@@ -618,6 +620,28 @@ class _Context:
|
|
|
618
620
|
f"got '{op_precision_path}'.")
|
|
619
621
|
self.set_param(ms_ctx_param.op_precision_mode, ascend_value)
|
|
620
622
|
|
|
623
|
+
def _set_topo_order(self, topo_order):
|
|
624
|
+
"""
|
|
625
|
+
Set topo order.
|
|
626
|
+
|
|
627
|
+
Args:
|
|
628
|
+
topo_order (dict):
|
|
629
|
+
key: str, the name of the graph.
|
|
630
|
+
value: str, the topo order of the graph, should be one of 'dfs', 'bfs', 'rdfs'.
|
|
631
|
+
"""
|
|
632
|
+
valid_order = {'dfs', 'bfs', 'rdfs'}
|
|
633
|
+
if not isinstance(topo_order, dict):
|
|
634
|
+
raise TypeError(f"For 'ascend_config', the 'topo_order' should be a dict, "
|
|
635
|
+
f"got '{type(topo_order)}'.")
|
|
636
|
+
for k, v in topo_order.items():
|
|
637
|
+
if not isinstance(k, str):
|
|
638
|
+
raise TypeError("key {} is not a str".format(k))
|
|
639
|
+
if v not in valid_order:
|
|
640
|
+
raise ValueError("value {} should be one of {}.".format(v, valid_order))
|
|
641
|
+
|
|
642
|
+
options_str = json.dumps(topo_order)
|
|
643
|
+
self.set_param(ms_ctx_param.topo_order, options_str)
|
|
644
|
+
|
|
621
645
|
def _set_speedup_config_path(self, speedup_config_path):
|
|
622
646
|
""""Check and set speedup config for auto parallel."""
|
|
623
647
|
if speedup_config_path is None or speedup_config_path == "":
|
|
@@ -681,7 +705,7 @@ def _context():
|
|
|
681
705
|
auto_parallel_search_mode=str, search_mode=str, parameter_broadcast=bool, strategy_ckpt_load_file=str,
|
|
682
706
|
strategy_ckpt_save_file=str, full_batch=bool, enable_parallel_optimizer=bool, enable_alltoall=bool,
|
|
683
707
|
all_reduce_fusion_config=list, pipeline_stages=int, pipeline_segments=int,
|
|
684
|
-
parallel_optimizer_config=dict,
|
|
708
|
+
pipeline_config=dict, parallel_optimizer_config=dict,
|
|
685
709
|
comm_fusion=dict, strategy_ckpt_config=dict)
|
|
686
710
|
def set_auto_parallel_context(**kwargs):
|
|
687
711
|
r"""
|
|
@@ -708,7 +732,7 @@ def set_auto_parallel_context(**kwargs):
|
|
|
708
732
|
enable_parallel_optimizer strategy_ckpt_save_file
|
|
709
733
|
parallel_optimizer_config dataset_strategy
|
|
710
734
|
enable_alltoall pipeline_stages
|
|
711
|
-
|
|
735
|
+
pipeline_config auto_parallel_search_mode
|
|
712
736
|
\ comm_fusion
|
|
713
737
|
\ strategy_ckpt_config
|
|
714
738
|
=========================== ===========================
|
|
@@ -777,6 +801,12 @@ def set_auto_parallel_context(**kwargs):
|
|
|
777
801
|
distributed alone in the pipeline. The total devices will be divided into 'pipeline_stags'
|
|
778
802
|
stages.
|
|
779
803
|
Default: ``1`` .
|
|
804
|
+
pipeline_config (dict): A dict contains the keys and values for setting the pipeline parallelism configuration.
|
|
805
|
+
It supports the following keys:
|
|
806
|
+
|
|
807
|
+
- pipeline_interleave(bool): Indicates whether to enable the interleaved execution mode.
|
|
808
|
+
- pipeline_scheduler(str): Indicates the scheduling mode for pipeline parallelism. Only support
|
|
809
|
+
``gpipe/1f1b``.
|
|
780
810
|
parallel_optimizer_config (dict): A dict contains the keys and values for setting the parallel optimizer
|
|
781
811
|
configure. The configure provides more detailed behavior control about parallel training
|
|
782
812
|
when parallel optimizer is enabled. The configure will be effective when we use
|
|
@@ -4051,6 +4051,15 @@ class ConcatDataset(UnionBaseDataset):
|
|
|
4051
4051
|
|
|
4052
4052
|
self._sampler = sampler
|
|
4053
4053
|
self._children_sizes = [c.get_dataset_size() for c in self.children]
|
|
4054
|
+
|
|
4055
|
+
# Recursive access to other child concat nodes
|
|
4056
|
+
def set_child(node):
|
|
4057
|
+
for c in node.children:
|
|
4058
|
+
if isinstance(c, ConcatDataset):
|
|
4059
|
+
c.use_sampler(sampler)
|
|
4060
|
+
set_child(c)
|
|
4061
|
+
set_child(self)
|
|
4062
|
+
|
|
4054
4063
|
return
|
|
4055
4064
|
|
|
4056
4065
|
if sampler.is_shuffled():
|
|
@@ -4186,6 +4195,12 @@ class _ToDevice:
|
|
|
4186
4195
|
"""
|
|
4187
4196
|
return self._to_device.GetDataInfo()
|
|
4188
4197
|
|
|
4198
|
+
def get_mbuf_queue_size(self):
|
|
4199
|
+
"""
|
|
4200
|
+
Get element numbers inside mbuf.
|
|
4201
|
+
"""
|
|
4202
|
+
return self._to_device.GetMbufQueueSize()
|
|
4203
|
+
|
|
4189
4204
|
def get_send_info(self):
|
|
4190
4205
|
"""
|
|
4191
4206
|
In sink mode, it returns the send information of dataset at this moment.
|
|
@@ -4300,6 +4315,14 @@ class TransferDataset(Dataset):
|
|
|
4300
4315
|
return self._to_device.get_data_info()
|
|
4301
4316
|
raise RuntimeError("Calling get_data_info with bad state.")
|
|
4302
4317
|
|
|
4318
|
+
def get_mbuf_queue_size(self):
|
|
4319
|
+
"""
|
|
4320
|
+
Get element numbers inside mbuf.
|
|
4321
|
+
"""
|
|
4322
|
+
if self._to_device is not None:
|
|
4323
|
+
return self._to_device.get_mbuf_queue_size()
|
|
4324
|
+
raise RuntimeError("Device queue is not init, call get_mbuf_queue_size failed.")
|
|
4325
|
+
|
|
4303
4326
|
def get_send_info(self):
|
|
4304
4327
|
"""
|
|
4305
4328
|
In sink mode, it returns the send information of dataset at this moment.
|
|
@@ -1021,7 +1021,7 @@ def check_minddataset(method):
|
|
|
1021
1021
|
dataset_file = param_dict.get('dataset_files')
|
|
1022
1022
|
if isinstance(dataset_file, list):
|
|
1023
1023
|
if len(dataset_file) > 4096:
|
|
1024
|
-
|
|
1024
|
+
log.warning("The number of MindRecord files greater than 4096 may cause slow dataset initialization.")
|
|
1025
1025
|
for f in dataset_file:
|
|
1026
1026
|
check_file(f)
|
|
1027
1027
|
else:
|
|
@@ -1032,8 +1032,8 @@ def perspective(img, start_points, end_points, interpolation=Inter.BICUBIC):
|
|
|
1032
1032
|
for pt1, pt2 in zip(transformed_points, original_points):
|
|
1033
1033
|
matrix.append([pt1[0], pt1[1], 1, 0, 0, 0, -pt2[0] * pt1[0], -pt2[0] * pt1[1]])
|
|
1034
1034
|
matrix.append([0, 0, 0, pt1[0], pt1[1], 1, -pt2[1] * pt1[0], -pt2[1] * pt1[1]])
|
|
1035
|
-
matrix_a = np.array(matrix, dtype=
|
|
1036
|
-
matrix_b = np.array(original_points, dtype=
|
|
1035
|
+
matrix_a = np.array(matrix, dtype=float)
|
|
1036
|
+
matrix_b = np.array(original_points, dtype=float).reshape(8)
|
|
1037
1037
|
res = np.linalg.lstsq(matrix_a, matrix_b, rcond=None)[0]
|
|
1038
1038
|
return res.tolist()
|
|
1039
1039
|
|
|
@@ -64,12 +64,11 @@ class LRScheduler:
|
|
|
64
64
|
... super(ConstantLR, self).__init__(optimizer, last_epoch)
|
|
65
65
|
...
|
|
66
66
|
... def get_lr(self):
|
|
67
|
-
... lrs = [lr.value() for lr in self._last_lr]
|
|
68
67
|
... if self.last_epoch == 0:
|
|
69
|
-
... return [lr * self.factor for lr in
|
|
68
|
+
... return [lr * self.factor for lr in self._last_lr]
|
|
70
69
|
... if self.last_epoch != self.total_iters:
|
|
71
|
-
... return
|
|
72
|
-
... return
|
|
70
|
+
... return [lr * 1. for lr in self._last_lr]
|
|
71
|
+
... return [lr / self.factor for lr in self._last_lr]
|
|
73
72
|
>>>
|
|
74
73
|
>>> net = nn.Dense(8, 2)
|
|
75
74
|
>>> optimizer = optim.SGD(net.trainable_params(), 0.01)
|
|
@@ -913,7 +912,7 @@ class ReduceLROnPlateau:
|
|
|
913
912
|
>>> metrics = [1, 1.5, 1.8, 0.4, 0.5]
|
|
914
913
|
>>> for i in range(5):
|
|
915
914
|
... scheduler.step(metrics[i])
|
|
916
|
-
... current_lr = scheduler.
|
|
915
|
+
... current_lr = scheduler.get_last_lr()
|
|
917
916
|
... print(current_lr)
|
|
918
917
|
[Tensor(shape=[], dtype=Float32, value= 0.1)]
|
|
919
918
|
[Tensor(shape=[], dtype=Float32, value= 0.01)]
|
|
@@ -1258,7 +1257,7 @@ class CosineAnnealingWarmRestarts(LRScheduler):
|
|
|
1258
1257
|
>>> optimizer = optim.SGD(net.trainable_params(), lr=0.1, momentum=0.9)
|
|
1259
1258
|
>>> scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, 2)
|
|
1260
1259
|
>>> iters = 3
|
|
1261
|
-
>>> for epoch in range(
|
|
1260
|
+
>>> for epoch in range(2):
|
|
1262
1261
|
... for i in range(iters):
|
|
1263
1262
|
... scheduler.step(epoch + i / iters)
|
|
1264
1263
|
... current_lr = scheduler.get_last_lr()
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
|
@@ -18,7 +18,6 @@ Cifar100 convert tool for MindRecord.
|
|
|
18
18
|
|
|
19
19
|
from importlib import import_module
|
|
20
20
|
import os
|
|
21
|
-
import numpy as np
|
|
22
21
|
|
|
23
22
|
from mindspore import log as logger
|
|
24
23
|
from .cifar100 import Cifar100
|
|
@@ -26,10 +25,6 @@ from ..common.exceptions import PathNotExistsError
|
|
|
26
25
|
from ..filewriter import FileWriter
|
|
27
26
|
from ..shardutils import check_filename, ExceptionThread, SUCCESS, FAILED
|
|
28
27
|
|
|
29
|
-
try:
|
|
30
|
-
cv_import = import_module("cv2")
|
|
31
|
-
except ModuleNotFoundError:
|
|
32
|
-
cv_import = None
|
|
33
28
|
|
|
34
29
|
__all__ = ['Cifar100ToMR']
|
|
35
30
|
|
|
@@ -57,6 +52,8 @@ class Cifar100ToMR:
|
|
|
57
52
|
"""
|
|
58
53
|
|
|
59
54
|
def __init__(self, source, destination):
|
|
55
|
+
self.cv_import = import_module("cv2")
|
|
56
|
+
|
|
60
57
|
check_filename(source)
|
|
61
58
|
self.source = source
|
|
62
59
|
|
|
@@ -100,12 +97,12 @@ class Cifar100ToMR:
|
|
|
100
97
|
test_coarse_labels = cifar100_data.Test.coarse_labels
|
|
101
98
|
logger.info("test images coarse label: {}".format(coarse_labels.shape))
|
|
102
99
|
|
|
103
|
-
data_list = _construct_raw_data(images, fine_labels, coarse_labels)
|
|
104
|
-
test_data_list = _construct_raw_data(test_images, test_fine_labels, test_coarse_labels)
|
|
100
|
+
data_list = self._construct_raw_data(images, fine_labels, coarse_labels)
|
|
101
|
+
test_data_list = self._construct_raw_data(test_images, test_fine_labels, test_coarse_labels)
|
|
105
102
|
|
|
106
|
-
if _generate_mindrecord(self.destination, data_list, fields, "img_train") != SUCCESS:
|
|
103
|
+
if self._generate_mindrecord(self.destination, data_list, fields, "img_train") != SUCCESS:
|
|
107
104
|
return FAILED
|
|
108
|
-
if _generate_mindrecord(self.destination + "_test", test_data_list, fields, "img_test") != SUCCESS:
|
|
105
|
+
if self._generate_mindrecord(self.destination + "_test", test_data_list, fields, "img_test") != SUCCESS:
|
|
109
106
|
return FAILED
|
|
110
107
|
return SUCCESS
|
|
111
108
|
|
|
@@ -142,57 +139,52 @@ class Cifar100ToMR:
|
|
|
142
139
|
raise t.exception
|
|
143
140
|
return t.res
|
|
144
141
|
|
|
142
|
+
def _construct_raw_data(self, images, fine_labels, coarse_labels):
|
|
143
|
+
"""
|
|
144
|
+
Construct raw data from cifar100 data.
|
|
145
145
|
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
146
|
+
Args:
|
|
147
|
+
images (list): image list from cifar100.
|
|
148
|
+
fine_labels (list): fine label list from cifar100.
|
|
149
|
+
coarse_labels (list): coarse label list from cifar100.
|
|
149
150
|
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
151
|
+
Returns:
|
|
152
|
+
list[dict], data dictionary constructed from cifar100.
|
|
153
|
+
"""
|
|
154
|
+
raw_data = []
|
|
155
|
+
for i, img in enumerate(images):
|
|
156
|
+
fine_label = fine_labels[i][0]
|
|
157
|
+
coarse_label = coarse_labels[i][0]
|
|
158
|
+
_, img = self.cv_import.imencode(".jpeg", img[..., [2, 1, 0]])
|
|
159
|
+
row_data = {"id": int(i),
|
|
160
|
+
"data": img.tobytes(),
|
|
161
|
+
"fine_label": int(fine_label),
|
|
162
|
+
"coarse_label": int(coarse_label)}
|
|
163
|
+
raw_data.append(row_data)
|
|
164
|
+
return raw_data
|
|
165
|
+
|
|
166
|
+
def _generate_mindrecord(self, file_name, raw_data, fields, schema_desc):
|
|
167
|
+
"""
|
|
168
|
+
Generate MindRecord file from raw data.
|
|
154
169
|
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
raw_data = []
|
|
162
|
-
for i, img in enumerate(images):
|
|
163
|
-
fine_label = np.int(fine_labels[i][0])
|
|
164
|
-
coarse_label = np.int(coarse_labels[i][0])
|
|
165
|
-
_, img = cv_import.imencode(".jpeg", img[..., [2, 1, 0]])
|
|
166
|
-
row_data = {"id": int(i),
|
|
167
|
-
"data": img.tobytes(),
|
|
168
|
-
"fine_label": int(fine_label),
|
|
169
|
-
"coarse_label": int(coarse_label)}
|
|
170
|
-
raw_data.append(row_data)
|
|
171
|
-
return raw_data
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
def _generate_mindrecord(file_name, raw_data, fields, schema_desc):
|
|
175
|
-
"""
|
|
176
|
-
Generate MindRecord file from raw data.
|
|
170
|
+
Args:
|
|
171
|
+
file_name (str): File name of MindRecord File.
|
|
172
|
+
fields (list[str]): Fields would be set as index which
|
|
173
|
+
could not belong to blob fields and type could not be 'array' or 'bytes'.
|
|
174
|
+
raw_data (dict): Dict of raw data.
|
|
175
|
+
schema_desc (str): String of schema description.
|
|
177
176
|
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
schema_desc (str): String of schema description.
|
|
184
|
-
|
|
185
|
-
Returns:
|
|
186
|
-
MSRStatus, SUCCESS or FAILED.
|
|
187
|
-
"""
|
|
188
|
-
schema = {"id": {"type": "int64"}, "fine_label": {"type": "int64"},
|
|
189
|
-
"coarse_label": {"type": "int64"}, "data": {"type": "bytes"}}
|
|
177
|
+
Returns:
|
|
178
|
+
MSRStatus, SUCCESS or FAILED.
|
|
179
|
+
"""
|
|
180
|
+
schema = {"id": {"type": "int64"}, "fine_label": {"type": "int64"},
|
|
181
|
+
"coarse_label": {"type": "int64"}, "data": {"type": "bytes"}}
|
|
190
182
|
|
|
191
|
-
|
|
183
|
+
logger.info("transformed MindRecord schema is: {}".format(schema))
|
|
192
184
|
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
185
|
+
writer = FileWriter(file_name, 1)
|
|
186
|
+
writer.add_schema(schema, schema_desc)
|
|
187
|
+
if fields and isinstance(fields, list):
|
|
188
|
+
writer.add_index(fields)
|
|
189
|
+
writer.write_raw_data(raw_data)
|
|
190
|
+
return writer.commit()
|
|
@@ -18,7 +18,6 @@ Cifar10 convert tool for MindRecord.
|
|
|
18
18
|
|
|
19
19
|
from importlib import import_module
|
|
20
20
|
import os
|
|
21
|
-
import numpy as np
|
|
22
21
|
|
|
23
22
|
from mindspore import log as logger
|
|
24
23
|
from .cifar10 import Cifar10
|
|
@@ -26,10 +25,6 @@ from ..common.exceptions import PathNotExistsError
|
|
|
26
25
|
from ..filewriter import FileWriter
|
|
27
26
|
from ..shardutils import check_filename, ExceptionThread, SUCCESS, FAILED
|
|
28
27
|
|
|
29
|
-
try:
|
|
30
|
-
cv_import = import_module("cv2")
|
|
31
|
-
except ModuleNotFoundError:
|
|
32
|
-
cv_import = None
|
|
33
28
|
|
|
34
29
|
__all__ = ['Cifar10ToMR']
|
|
35
30
|
|
|
@@ -57,6 +52,8 @@ class Cifar10ToMR:
|
|
|
57
52
|
"""
|
|
58
53
|
|
|
59
54
|
def __init__(self, source, destination):
|
|
55
|
+
self.cv_import = import_module("cv2")
|
|
56
|
+
|
|
60
57
|
check_filename(source)
|
|
61
58
|
self.source = source
|
|
62
59
|
|
|
@@ -96,12 +93,12 @@ class Cifar10ToMR:
|
|
|
96
93
|
test_labels = cifar10_data.Test.labels
|
|
97
94
|
logger.info("test images label: {}".format(test_labels.shape))
|
|
98
95
|
|
|
99
|
-
data_list = _construct_raw_data(images, labels)
|
|
100
|
-
test_data_list = _construct_raw_data(test_images, test_labels)
|
|
96
|
+
data_list = self._construct_raw_data(images, labels)
|
|
97
|
+
test_data_list = self._construct_raw_data(test_images, test_labels)
|
|
101
98
|
|
|
102
|
-
if _generate_mindrecord(self.destination, data_list, fields, "img_train") != SUCCESS:
|
|
99
|
+
if self._generate_mindrecord(self.destination, data_list, fields, "img_train") != SUCCESS:
|
|
103
100
|
return FAILED
|
|
104
|
-
if _generate_mindrecord(self.destination + "_test", test_data_list, fields, "img_test") != SUCCESS:
|
|
101
|
+
if self._generate_mindrecord(self.destination + "_test", test_data_list, fields, "img_test") != SUCCESS:
|
|
105
102
|
return FAILED
|
|
106
103
|
return SUCCESS
|
|
107
104
|
|
|
@@ -137,56 +134,50 @@ class Cifar10ToMR:
|
|
|
137
134
|
raise t.exception
|
|
138
135
|
return t.res
|
|
139
136
|
|
|
137
|
+
def _construct_raw_data(self, images, labels):
|
|
138
|
+
"""
|
|
139
|
+
Construct raw data from cifar10 data.
|
|
140
140
|
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
Args:
|
|
146
|
-
images (list): image list from cifar10.
|
|
147
|
-
labels (list): label list from cifar10.
|
|
148
|
-
|
|
149
|
-
Returns:
|
|
150
|
-
list[dict], data dictionary constructed from cifar10.
|
|
151
|
-
"""
|
|
152
|
-
|
|
153
|
-
if not cv_import:
|
|
154
|
-
raise ModuleNotFoundError("opencv-python module not found, please use pip install it.")
|
|
155
|
-
|
|
156
|
-
raw_data = []
|
|
157
|
-
for i, img in enumerate(images):
|
|
158
|
-
label = np.int(labels[i][0])
|
|
159
|
-
_, img = cv_import.imencode(".jpeg", img[..., [2, 1, 0]])
|
|
160
|
-
row_data = {"id": int(i),
|
|
161
|
-
"data": img.tobytes(),
|
|
162
|
-
"label": int(label)}
|
|
163
|
-
raw_data.append(row_data)
|
|
164
|
-
return raw_data
|
|
141
|
+
Args:
|
|
142
|
+
images (list): image list from cifar10.
|
|
143
|
+
labels (list): label list from cifar10.
|
|
165
144
|
|
|
145
|
+
Returns:
|
|
146
|
+
list[dict], data dictionary constructed from cifar10.
|
|
147
|
+
"""
|
|
148
|
+
raw_data = []
|
|
149
|
+
for i, img in enumerate(images):
|
|
150
|
+
label = labels[i][0]
|
|
151
|
+
_, img = self.cv_import.imencode(".jpeg", img[..., [2, 1, 0]])
|
|
152
|
+
row_data = {"id": int(i),
|
|
153
|
+
"data": img.tobytes(),
|
|
154
|
+
"label": int(label)}
|
|
155
|
+
raw_data.append(row_data)
|
|
156
|
+
return raw_data
|
|
157
|
+
|
|
158
|
+
def _generate_mindrecord(self, file_name, raw_data, fields, schema_desc):
|
|
159
|
+
"""
|
|
160
|
+
Generate MindRecord file from raw data.
|
|
166
161
|
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
162
|
+
Args:
|
|
163
|
+
file_name (str): File name of MindRecord File.
|
|
164
|
+
fields (list[str]): Fields would be set as index which
|
|
165
|
+
could not belong to blob fields and type could not be 'array' or 'bytes'.
|
|
166
|
+
raw_data (dict): dict of raw data.
|
|
167
|
+
schema_desc (str): String of schema description.
|
|
170
168
|
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
could not belong to blob fields and type could not be 'array' or 'bytes'.
|
|
175
|
-
raw_data (dict): dict of raw data.
|
|
176
|
-
schema_desc (str): String of schema description.
|
|
177
|
-
|
|
178
|
-
Returns:
|
|
179
|
-
MSRStatus, SUCCESS or FAILED.
|
|
180
|
-
"""
|
|
169
|
+
Returns:
|
|
170
|
+
MSRStatus, SUCCESS or FAILED.
|
|
171
|
+
"""
|
|
181
172
|
|
|
182
|
-
|
|
183
|
-
|
|
173
|
+
schema = {"id": {"type": "int64"}, "label": {"type": "int64"},
|
|
174
|
+
"data": {"type": "bytes"}}
|
|
184
175
|
|
|
185
|
-
|
|
176
|
+
logger.info("transformed MindRecord schema is: {}".format(schema))
|
|
186
177
|
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
178
|
+
writer = FileWriter(file_name, 1)
|
|
179
|
+
writer.add_schema(schema, schema_desc)
|
|
180
|
+
if fields and isinstance(fields, list):
|
|
181
|
+
writer.add_index(fields)
|
|
182
|
+
writer.write_raw_data(raw_data)
|
|
183
|
+
return writer.commit()
|
|
@@ -22,10 +22,6 @@ from mindspore import log as logger
|
|
|
22
22
|
from ..filewriter import FileWriter
|
|
23
23
|
from ..shardutils import check_filename, ExceptionThread
|
|
24
24
|
|
|
25
|
-
try:
|
|
26
|
-
pd = import_module("pandas")
|
|
27
|
-
except ModuleNotFoundError:
|
|
28
|
-
pd = None
|
|
29
25
|
|
|
30
26
|
__all__ = ['CsvToMR']
|
|
31
27
|
|
|
@@ -55,8 +51,7 @@ class CsvToMR:
|
|
|
55
51
|
"""
|
|
56
52
|
|
|
57
53
|
def __init__(self, source, destination, columns_list=None, partition_number=1):
|
|
58
|
-
|
|
59
|
-
raise Exception("Module pandas is not found, please use pip install it.")
|
|
54
|
+
self.pd = import_module("pandas")
|
|
60
55
|
if isinstance(source, str):
|
|
61
56
|
check_filename(source)
|
|
62
57
|
self.source = source
|
|
@@ -135,8 +130,8 @@ class CsvToMR:
|
|
|
135
130
|
if not os.path.exists(self.source):
|
|
136
131
|
raise IOError("Csv file {} do not exist.".format(self.source))
|
|
137
132
|
|
|
138
|
-
pd.set_option('display.max_columns', None)
|
|
139
|
-
df = pd.read_csv(self.source)
|
|
133
|
+
self.pd.set_option('display.max_columns', None)
|
|
134
|
+
df = self.pd.read_csv(self.source)
|
|
140
135
|
|
|
141
136
|
csv_schema = self._get_schema(df)
|
|
142
137
|
|
|
@@ -25,10 +25,6 @@ from mindspore import log as logger
|
|
|
25
25
|
from ..filewriter import FileWriter
|
|
26
26
|
from ..shardutils import check_filename, ExceptionThread, SUCCESS, FAILED
|
|
27
27
|
|
|
28
|
-
try:
|
|
29
|
-
cv_import = import_module("cv2")
|
|
30
|
-
except ModuleNotFoundError:
|
|
31
|
-
cv_import = None
|
|
32
28
|
|
|
33
29
|
__all__ = ['MnistToMR']
|
|
34
30
|
|
|
@@ -58,6 +54,8 @@ class MnistToMR:
|
|
|
58
54
|
"""
|
|
59
55
|
|
|
60
56
|
def __init__(self, source, destination, partition_number=1):
|
|
57
|
+
self.cv_import = import_module("cv2")
|
|
58
|
+
|
|
61
59
|
self.image_size = 28
|
|
62
60
|
self.num_channels = 1
|
|
63
61
|
|
|
@@ -89,9 +87,6 @@ class MnistToMR:
|
|
|
89
87
|
|
|
90
88
|
# pylint: disable=missing-docstring
|
|
91
89
|
def run(self):
|
|
92
|
-
if not cv_import:
|
|
93
|
-
raise ModuleNotFoundError("opencv-python module not found, please use pip install it.")
|
|
94
|
-
|
|
95
90
|
if self._transform_train() == FAILED:
|
|
96
91
|
return FAILED
|
|
97
92
|
if self._transform_test() == FAILED:
|
|
@@ -155,7 +150,7 @@ class MnistToMR:
|
|
|
155
150
|
train_data = self._extract_images(self.train_data_filename_)
|
|
156
151
|
train_labels = self._extract_labels(self.train_labels_filename_)
|
|
157
152
|
for data, label in zip(train_data, train_labels):
|
|
158
|
-
_, img = cv_import.imencode(".jpeg", data)
|
|
153
|
+
_, img = self.cv_import.imencode(".jpeg", data)
|
|
159
154
|
yield {"label": int(label), "data": img.tobytes()}
|
|
160
155
|
|
|
161
156
|
def _mnist_test_iterator(self):
|
|
@@ -168,7 +163,7 @@ class MnistToMR:
|
|
|
168
163
|
test_data = self._extract_images(self.test_data_filename_)
|
|
169
164
|
test_labels = self._extract_labels(self.test_labels_filename_)
|
|
170
165
|
for data, label in zip(test_data, test_labels):
|
|
171
|
-
_, img = cv_import.imencode(".jpeg", data)
|
|
166
|
+
_, img = self.cv_import.imencode(".jpeg", data)
|
|
172
167
|
yield {"label": int(label), "data": img.tobytes()}
|
|
173
168
|
|
|
174
169
|
def _transform_train(self):
|
|
@@ -97,10 +97,7 @@ class TFRecordToMR:
|
|
|
97
97
|
"""
|
|
98
98
|
|
|
99
99
|
def __init__(self, source, destination, feature_dict, bytes_fields=None):
|
|
100
|
-
|
|
101
|
-
self.tf = import_module("tensorflow") # just used to convert tfrecord to mindrecord
|
|
102
|
-
except ModuleNotFoundError:
|
|
103
|
-
raise Exception("Module tensorflow is not found, please use pip install it.")
|
|
100
|
+
self.tf = import_module("tensorflow") # just used to convert tfrecord to mindrecord
|
|
104
101
|
|
|
105
102
|
if self.tf.__version__ < SupportedTensorFlowVersion:
|
|
106
103
|
raise Exception("Module tensorflow version must be greater or equal {}.".format(SupportedTensorFlowVersion))
|
mindspore/nn/layer/activation.py
CHANGED
|
@@ -782,7 +782,7 @@ class Tanhshrink(Cell):
|
|
|
782
782
|
``Ascend`` ``GPU`` ``CPU``
|
|
783
783
|
|
|
784
784
|
Examples:
|
|
785
|
-
>>> import mindspore
|
|
785
|
+
>>> import mindspore as ms
|
|
786
786
|
>>> from mindspore import Tensor, nn
|
|
787
787
|
>>> import numpy as np
|
|
788
788
|
>>> x = Tensor(np.array([1, 2, 3, 2, 1]), ms.float16)
|
mindspore/nn/layer/embedding.py
CHANGED
|
@@ -522,12 +522,12 @@ class MultiFieldEmbeddingLookup(EmbeddingLookup):
|
|
|
522
522
|
this interface. Type is Int32, Int64.
|
|
523
523
|
- **input_values** (Tensor) - The shape of tensor is :math:`(batch\_size, seq\_length)`.
|
|
524
524
|
Specifies the weights of elements of the input_indices. The lookout vector will multiply with
|
|
525
|
-
the input_values. Type is
|
|
525
|
+
the input_values. Type is float32.
|
|
526
526
|
- **field_ids** (Tensor) - The shape of tensor is :math:`(batch\_size, seq\_length)`.
|
|
527
527
|
Specifies the field id of elements of the input_indices. Type is Int32.
|
|
528
528
|
|
|
529
529
|
Outputs:
|
|
530
|
-
Tensor, the shape of tensor is :math:`(batch\_size, field\_size, embedding\_size)`. Type is
|
|
530
|
+
Tensor, the shape of tensor is :math:`(batch\_size, field\_size, embedding\_size)`. Type is float32.
|
|
531
531
|
|
|
532
532
|
Raises:
|
|
533
533
|
TypeError: If `vocab_size` or `embedding_size` or `field_size` is not an int.
|
mindspore/nn/loss/loss.py
CHANGED
|
@@ -1996,7 +1996,7 @@ class FocalLoss(LossBase):
|
|
|
1996
1996
|
>>> import mindspore.nn as nn
|
|
1997
1997
|
>>> logits = ms.Tensor([[0.8, 1.4], [0.5, 0.9], [1.2, 0.9]], ms.float32)
|
|
1998
1998
|
>>> labels = ms.Tensor([[1], [1], [0]], ms.int32)
|
|
1999
|
-
>>> focalloss = nn.FocalLoss(weight=Tensor([1, 2]), gamma=2.0, reduction='mean')
|
|
1999
|
+
>>> focalloss = nn.FocalLoss(weight=ms.Tensor([1, 2]), gamma=2.0, reduction='mean')
|
|
2000
2000
|
>>> output = focalloss(logits, labels)
|
|
2001
2001
|
>>> print(output)
|
|
2002
2002
|
0.12516622
|