mindspore 2.3.0__cp310-cp310-win_amd64.whl → 2.4.0__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
- mindspore/Newtonsoft.Json.dll +0 -0
- mindspore/__init__.py +3 -1
- mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +50 -9
- mindspore/_extends/parse/compile_config.py +41 -0
- mindspore/_extends/parse/parser.py +9 -7
- mindspore/_extends/parse/standard_method.py +52 -14
- mindspore/_extends/pijit/pijit_func_white_list.py +350 -24
- mindspore/amp.py +24 -10
- mindspore/atlprov.dll +0 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/c1.dll +0 -0
- mindspore/c1xx.dll +0 -0
- mindspore/c2.dll +0 -0
- mindspore/common/__init__.py +6 -4
- mindspore/common/_pijit_context.py +190 -0
- mindspore/common/_register_for_tensor.py +2 -1
- mindspore/common/_tensor_overload.py +139 -0
- mindspore/common/api.py +102 -87
- mindspore/common/dump.py +5 -6
- mindspore/common/generator.py +1 -7
- mindspore/common/hook_handle.py +14 -26
- mindspore/common/mindir_util.py +2 -2
- mindspore/common/parameter.py +46 -13
- mindspore/common/recompute.py +39 -9
- mindspore/common/sparse_tensor.py +7 -3
- mindspore/common/tensor.py +209 -29
- mindspore/communication/__init__.py +1 -1
- mindspore/communication/_comm_helper.py +38 -3
- mindspore/communication/comm_func.py +310 -55
- mindspore/communication/management.py +14 -14
- mindspore/context.py +123 -22
- mindspore/dataset/__init__.py +1 -1
- mindspore/dataset/audio/__init__.py +1 -1
- mindspore/dataset/core/config.py +7 -0
- mindspore/dataset/core/validator_helpers.py +7 -0
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +72 -44
- mindspore/dataset/engine/datasets_audio.py +7 -7
- mindspore/dataset/engine/datasets_standard_format.py +53 -3
- mindspore/dataset/engine/datasets_text.py +20 -20
- mindspore/dataset/engine/datasets_user_defined.py +174 -104
- mindspore/dataset/engine/datasets_vision.py +33 -33
- mindspore/dataset/engine/iterators.py +29 -0
- mindspore/dataset/engine/obs/util.py +7 -0
- mindspore/dataset/engine/queue.py +114 -60
- mindspore/dataset/engine/serializer_deserializer.py +2 -2
- mindspore/dataset/engine/validators.py +34 -14
- mindspore/dataset/text/__init__.py +1 -4
- mindspore/dataset/transforms/__init__.py +0 -3
- mindspore/dataset/utils/line_reader.py +2 -0
- mindspore/dataset/vision/__init__.py +1 -4
- mindspore/dataset/vision/utils.py +1 -1
- mindspore/dataset/vision/validators.py +2 -1
- mindspore/dnnl.dll +0 -0
- mindspore/dpcmi.dll +0 -0
- mindspore/{nn/extend → experimental/es}/__init__.py +4 -11
- mindspore/experimental/es/embedding_service.py +883 -0
- mindspore/{nn/layer → experimental/es}/embedding_service_layer.py +218 -30
- mindspore/experimental/llm_boost/__init__.py +21 -0
- mindspore/{nn/extend/layer → experimental/llm_boost/atb}/__init__.py +4 -8
- mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
- mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
- mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
- mindspore/experimental/llm_boost/register.py +129 -0
- mindspore/experimental/llm_boost/utils.py +31 -0
- mindspore/experimental/optim/adamw.py +85 -0
- mindspore/experimental/optim/optimizer.py +3 -0
- mindspore/hal/__init__.py +3 -3
- mindspore/hal/contiguous_tensors_handle.py +175 -0
- mindspore/hal/stream.py +18 -0
- mindspore/include/api/model_group.h +13 -1
- mindspore/include/api/types.h +10 -10
- mindspore/include/dataset/config.h +2 -2
- mindspore/include/dataset/constants.h +2 -2
- mindspore/include/dataset/execute.h +2 -2
- mindspore/include/dataset/vision.h +4 -0
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +1 -1
- mindspore/mindrecord/filewriter.py +68 -51
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mint/__init__.py +495 -46
- mindspore/mint/distributed/__init__.py +31 -0
- mindspore/mint/distributed/distributed.py +254 -0
- mindspore/mint/nn/__init__.py +266 -21
- mindspore/mint/nn/functional.py +125 -19
- mindspore/mint/nn/layer/__init__.py +39 -0
- mindspore/mint/nn/layer/activation.py +133 -0
- mindspore/mint/nn/layer/normalization.py +477 -0
- mindspore/mint/nn/layer/pooling.py +110 -0
- mindspore/mint/optim/adamw.py +28 -7
- mindspore/mint/special/__init__.py +63 -0
- mindspore/msobj140.dll +0 -0
- mindspore/mspdb140.dll +0 -0
- mindspore/mspdbcore.dll +0 -0
- mindspore/mspdbst.dll +0 -0
- mindspore/mspft140.dll +0 -0
- mindspore/msvcdis140.dll +0 -0
- mindspore/msvcp140_1.dll +0 -0
- mindspore/msvcp140_2.dll +0 -0
- mindspore/msvcp140_atomic_wait.dll +0 -0
- mindspore/msvcp140_codecvt_ids.dll +0 -0
- mindspore/multiprocessing/__init__.py +2 -1
- mindspore/nn/__init__.py +0 -1
- mindspore/nn/cell.py +275 -93
- mindspore/nn/layer/activation.py +211 -44
- mindspore/nn/layer/basic.py +113 -3
- mindspore/nn/layer/embedding.py +120 -2
- mindspore/nn/layer/normalization.py +101 -5
- mindspore/nn/layer/padding.py +34 -48
- mindspore/nn/layer/pooling.py +161 -7
- mindspore/nn/layer/transformer.py +3 -3
- mindspore/nn/loss/__init__.py +2 -2
- mindspore/nn/loss/loss.py +84 -6
- mindspore/nn/optim/__init__.py +2 -1
- mindspore/nn/optim/adadelta.py +1 -1
- mindspore/nn/optim/adam.py +1 -1
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/tft_wrapper.py +127 -0
- mindspore/nn/wrap/cell_wrapper.py +12 -23
- mindspore/nn/wrap/grad_reducer.py +5 -5
- mindspore/nn/wrap/loss_scale.py +17 -3
- mindspore/numpy/__init__.py +1 -1
- mindspore/numpy/array_creations.py +65 -68
- mindspore/numpy/array_ops.py +64 -60
- mindspore/numpy/fft.py +610 -75
- mindspore/numpy/logic_ops.py +11 -10
- mindspore/numpy/math_ops.py +85 -84
- mindspore/numpy/utils_const.py +4 -4
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +6 -4
- mindspore/ops/_grad_experimental/grad_comm_ops.py +47 -3
- mindspore/ops/_grad_experimental/grad_math_ops.py +0 -22
- mindspore/ops/_vmap/vmap_array_ops.py +2 -4
- mindspore/ops/_vmap/vmap_math_ops.py +17 -1
- mindspore/ops/_vmap/vmap_nn_ops.py +43 -2
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +85 -7
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +2 -0
- mindspore/ops/auto_generate/gen_extend_func.py +734 -13
- mindspore/ops/auto_generate/gen_ops_def.py +2420 -381
- mindspore/ops/auto_generate/gen_ops_prim.py +5196 -1659
- mindspore/ops/auto_generate/pyboost_inner_prim.py +176 -56
- mindspore/ops/composite/base.py +85 -48
- mindspore/ops/composite/multitype_ops/_compile_utils.py +1 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -2
- mindspore/ops/function/__init__.py +22 -0
- mindspore/ops/function/array_func.py +490 -153
- mindspore/ops/function/debug_func.py +113 -1
- mindspore/ops/function/fft_func.py +15 -2
- mindspore/ops/function/grad/grad_func.py +3 -2
- mindspore/ops/function/math_func.py +558 -207
- mindspore/ops/function/nn_func.py +817 -383
- mindspore/ops/function/other_func.py +3 -2
- mindspore/ops/function/random_func.py +184 -8
- mindspore/ops/function/reshard_func.py +13 -11
- mindspore/ops/function/sparse_unary_func.py +1 -1
- mindspore/ops/function/vmap_func.py +3 -2
- mindspore/ops/functional.py +24 -14
- mindspore/ops/op_info_register.py +3 -3
- mindspore/ops/operations/__init__.py +6 -1
- mindspore/ops/operations/_grad_ops.py +2 -76
- mindspore/ops/operations/_infer_ops.py +1 -1
- mindspore/ops/operations/_inner_ops.py +71 -94
- mindspore/ops/operations/array_ops.py +12 -146
- mindspore/ops/operations/comm_ops.py +42 -53
- mindspore/ops/operations/custom_ops.py +83 -19
- mindspore/ops/operations/debug_ops.py +42 -10
- mindspore/ops/operations/manually_defined/_inner.py +12 -0
- mindspore/ops/operations/manually_defined/ops_def.py +265 -10
- mindspore/ops/operations/math_ops.py +12 -223
- mindspore/ops/operations/nn_ops.py +20 -114
- mindspore/ops/operations/other_ops.py +7 -4
- mindspore/ops/operations/random_ops.py +46 -1
- mindspore/ops/primitive.py +18 -6
- mindspore/ops_generate/arg_dtype_cast.py +2 -0
- mindspore/ops_generate/gen_aclnn_implement.py +11 -11
- mindspore/ops_generate/gen_constants.py +36 -0
- mindspore/ops_generate/gen_ops.py +67 -52
- mindspore/ops_generate/gen_ops_inner_prim.py +1 -1
- mindspore/ops_generate/gen_pyboost_func.py +131 -47
- mindspore/ops_generate/op_proto.py +10 -3
- mindspore/ops_generate/pyboost_utils.py +14 -1
- mindspore/ops_generate/template.py +43 -21
- mindspore/parallel/__init__.py +3 -1
- mindspore/parallel/_auto_parallel_context.py +28 -8
- mindspore/parallel/_cell_wrapper.py +83 -0
- mindspore/parallel/_parallel_serialization.py +47 -19
- mindspore/parallel/_tensor.py +81 -11
- mindspore/parallel/_utils.py +13 -1
- mindspore/parallel/algo_parameter_config.py +5 -5
- mindspore/parallel/checkpoint_transform.py +46 -39
- mindspore/parallel/cluster/process_entity/__init__.py +1 -1
- mindspore/parallel/cluster/process_entity/_api.py +31 -23
- mindspore/parallel/cluster/process_entity/_utils.py +2 -27
- mindspore/parallel/parameter_broadcast.py +3 -4
- mindspore/parallel/shard.py +162 -31
- mindspore/parallel/transform_safetensors.py +993 -0
- mindspore/pgodb140.dll +0 -0
- mindspore/pgort140.dll +0 -0
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/common/constant.py +29 -0
- mindspore/profiler/common/registry.py +47 -0
- mindspore/profiler/common/util.py +28 -0
- mindspore/profiler/dynamic_profiler.py +694 -0
- mindspore/profiler/envprofiling.py +17 -19
- mindspore/profiler/parser/ascend_analysis/constant.py +18 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +25 -4
- mindspore/profiler/parser/ascend_analysis/function_event.py +43 -19
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +31 -26
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +56 -10
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +55 -8
- mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +27 -20
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +9 -2
- mindspore/profiler/parser/ascend_msprof_exporter.py +5 -4
- mindspore/profiler/parser/ascend_timeline_generator.py +27 -25
- mindspore/profiler/parser/base_timeline_generator.py +19 -25
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +25 -12
- mindspore/profiler/parser/framework_parser.py +1 -391
- mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
- mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
- mindspore/profiler/parser/memory_usage_parser.py +0 -154
- mindspore/profiler/parser/profiler_info.py +78 -6
- mindspore/profiler/profiler.py +153 -0
- mindspore/profiler/profiling.py +280 -412
- mindspore/rewrite/__init__.py +1 -2
- mindspore/rewrite/common/namespace.py +4 -4
- mindspore/rewrite/symbol_tree/symbol_tree.py +3 -3
- mindspore/run_check/_check_version.py +36 -103
- mindspore/safeguard/rewrite_obfuscation.py +591 -247
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tbbmalloc.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +4 -3
- mindspore/train/_utils.py +28 -2
- mindspore/train/amp.py +171 -53
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +85 -22
- mindspore/train/callback/_cluster_monitor.py +1 -1
- mindspore/train/callback/_flops_collector.py +1 -0
- mindspore/train/callback/_loss_monitor.py +3 -3
- mindspore/train/callback/_on_request_exit.py +134 -31
- mindspore/train/callback/_summary_collector.py +5 -5
- mindspore/train/callback/_tft_register.py +352 -0
- mindspore/train/dataset_helper.py +7 -3
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/roc.py +4 -4
- mindspore/train/mind_ir_pb2.py +44 -39
- mindspore/train/model.py +134 -58
- mindspore/train/serialization.py +336 -112
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +21 -0
- mindspore/utils/utils.py +60 -0
- mindspore/vcmeta.dll +0 -0
- mindspore/vcruntime140.dll +0 -0
- mindspore/vcruntime140_1.dll +0 -0
- mindspore/version.py +1 -1
- {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/METADATA +6 -2
- {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/RECORD +281 -275
- mindspore/include/c_api/ms/abstract.h +0 -67
- mindspore/include/c_api/ms/attribute.h +0 -197
- mindspore/include/c_api/ms/base/handle_types.h +0 -43
- mindspore/include/c_api/ms/base/macros.h +0 -32
- mindspore/include/c_api/ms/base/status.h +0 -33
- mindspore/include/c_api/ms/base/types.h +0 -283
- mindspore/include/c_api/ms/context.h +0 -102
- mindspore/include/c_api/ms/graph.h +0 -160
- mindspore/include/c_api/ms/node.h +0 -606
- mindspore/include/c_api/ms/tensor.h +0 -161
- mindspore/include/c_api/ms/value.h +0 -84
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/nn/extend/basic.py +0 -140
- mindspore/nn/extend/embedding.py +0 -143
- mindspore/nn/extend/layer/normalization.py +0 -109
- mindspore/nn/extend/pooling.py +0 -117
- mindspore/nn/layer/embedding_service.py +0 -531
- mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +0 -93
- mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +0 -66
- mindspore/ops/extend/__init__.py +0 -53
- mindspore/ops/extend/array_func.py +0 -218
- mindspore/ops/extend/math_func.py +0 -76
- mindspore/ops/extend/nn_func.py +0 -308
- mindspore/ops/silent_check.py +0 -162
- mindspore/profiler/parser/msadvisor_analyzer.py +0 -82
- mindspore/profiler/parser/msadvisor_parser.py +0 -240
- mindspore/train/callback/_mindio_ttp.py +0 -443
- {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/WHEEL +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/entry_points.txt +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
# This is the Python alloc contiguous memory handle.
|
|
2
|
+
#
|
|
3
|
+
# Copyright 2024-2025 Huawei Technologies Co., Ltd
|
|
4
|
+
#
|
|
5
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
6
|
+
# you may not use this file except in compliance with the License.
|
|
7
|
+
# You may obtain a copy of the License at
|
|
8
|
+
#
|
|
9
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
10
|
+
#
|
|
11
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
12
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
13
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
14
|
+
# See the License for the specific language governing permissions and
|
|
15
|
+
# limitations under the License.
|
|
16
|
+
# ============================================================================
|
|
17
|
+
"""Contiguous memory handle."""
|
|
18
|
+
from mindspore.common.tensor import Tensor
|
|
19
|
+
from mindspore.common.api import _convert_python_data
|
|
20
|
+
from mindspore.common.dtype import type_size_in_bytes
|
|
21
|
+
from mindspore._c_expression import slice_by_tensor_index, slice_by_padding_shape, \
|
|
22
|
+
combine_tensor_list_contiguous as combine_tensor_list, Tensor as Tensor_
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def combine_tensor_list_contiguous(tensor_list, enable_mem_align=True):
|
|
26
|
+
r"""
|
|
27
|
+
Return a contiguous memory handle where contiguous memory has been requested and slicing functionality is provided.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
tensor_list (list[Tensor], Tuple[Tensor]): The tensor list to be stored.
|
|
31
|
+
enable_mem_align (bool, optional): Whether to enable the memory alignment function.
|
|
32
|
+
False is not supported. Default: ``True`` .
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
ContiguousTensorsHandle, a manager with contiguous memory.
|
|
36
|
+
|
|
37
|
+
Examples:
|
|
38
|
+
>>> import numpy as np
|
|
39
|
+
>>> import mindspore as ms
|
|
40
|
+
>>> from mindspore import Tensor
|
|
41
|
+
>>> from mindspore.hal.contiguous_tensors_handle import combine_tensor_list_contiguous
|
|
42
|
+
>>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
|
|
43
|
+
>>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
|
|
44
|
+
>>> handle = combine_tensor_list_contiguous([x, y], True)
|
|
45
|
+
>>> print(handle[0].shape)
|
|
46
|
+
[1]
|
|
47
|
+
>>> print(handle[1: 3].asnumpy())
|
|
48
|
+
[2, 3]
|
|
49
|
+
>>> print(output.slice_by_tensor_index(0, 1).asnumpy())
|
|
50
|
+
[1, 2, 3]
|
|
51
|
+
"""
|
|
52
|
+
return ContiguousTensorsHandle(tensor_list, enable_mem_align)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class ContiguousTensorsHandle:
|
|
56
|
+
r"""
|
|
57
|
+
ContiguousTensorsHandle is a handle manage continuous memory.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
tensor_list (list[Tensor], Tuple[Tensor]): The tensor list to be stored.
|
|
61
|
+
enable_mem_align (bool, optional): Whether to enable the memory alignment function.
|
|
62
|
+
False is not supported. Default: ``True`` .
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
ContiguousTensorsHandle, a manager with contiguous memory.
|
|
66
|
+
|
|
67
|
+
Examples:
|
|
68
|
+
>>> import numpy as np
|
|
69
|
+
>>> import mindspore as ms
|
|
70
|
+
>>> from mindspore import Tensor
|
|
71
|
+
>>> from mindspore.hal.contiguous_tensors_handle import ContiguousTensorsHandle
|
|
72
|
+
>>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
|
|
73
|
+
>>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
|
|
74
|
+
>>> handle = ContiguousTensorsHandle([x, y], True)
|
|
75
|
+
>>> print(handle[0].shape)
|
|
76
|
+
[1]
|
|
77
|
+
>>> print(handle[1: 3].asnumpy())
|
|
78
|
+
[2, 3]
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
def __init__(self, tensor_list, enable_mem_align=True):
|
|
82
|
+
if isinstance(tensor_list, (list, tuple)):
|
|
83
|
+
for tensor in tensor_list:
|
|
84
|
+
if not isinstance(tensor, (Tensor, Tensor_)):
|
|
85
|
+
raise TypeError(f"input list must be [Tensor, ...].")
|
|
86
|
+
if isinstance(tensor_list, list):
|
|
87
|
+
self.tensor_list = tuple(tensor_list)
|
|
88
|
+
else:
|
|
89
|
+
self.tensor_list = tensor_list
|
|
90
|
+
else:
|
|
91
|
+
raise TypeError(f"input list must be [Tensor, ...].")
|
|
92
|
+
if not isinstance(enable_mem_align, bool):
|
|
93
|
+
raise TypeError(f"enable_mem_align must be bool.")
|
|
94
|
+
padding_sizes_pair = combine_tensor_list(self.tensor_list, enable_mem_align)
|
|
95
|
+
self.before_padding_sizes = padding_sizes_pair[0]
|
|
96
|
+
self.after_padding_sizes = padding_sizes_pair[1]
|
|
97
|
+
self.total_padding_size = sum(self.after_padding_sizes)
|
|
98
|
+
self.handle_shape = self.total_padding_size / type_size_in_bytes(self.tensor_list[0].dtype)
|
|
99
|
+
self.enable_mem_align = enable_mem_align
|
|
100
|
+
|
|
101
|
+
def __getitem__(self, item):
|
|
102
|
+
"""
|
|
103
|
+
item is sliced by shape
|
|
104
|
+
:param item:
|
|
105
|
+
:return: Tensor
|
|
106
|
+
"""
|
|
107
|
+
start = 0
|
|
108
|
+
end = int(self.handle_shape)
|
|
109
|
+
if isinstance(item, slice):
|
|
110
|
+
if item.start is not None:
|
|
111
|
+
start = item.start
|
|
112
|
+
if item.stop is not None:
|
|
113
|
+
end = item.stop
|
|
114
|
+
if not isinstance(start, int) or not isinstance(end, int):
|
|
115
|
+
raise TypeError(f"slice input error.")
|
|
116
|
+
if start < 0 or end > self.handle_shape or start >= end:
|
|
117
|
+
raise ValueError(f"slice input error.")
|
|
118
|
+
return _convert_python_data(slice_by_padding_shape(self.tensor_list[0], start, end))
|
|
119
|
+
if not isinstance(item, int):
|
|
120
|
+
raise TypeError(f"slice input must be "
|
|
121
|
+
f"1.index -> int."
|
|
122
|
+
f"2.[start: end: step] -> [int: int: int].")
|
|
123
|
+
if item < 0 or item > self.handle_shape:
|
|
124
|
+
raise ValueError(f"slice input is out of tensor_list size.")
|
|
125
|
+
return _convert_python_data(slice_by_padding_shape(self.tensor_list[0], item, item + 1))
|
|
126
|
+
|
|
127
|
+
def __str__(self):
|
|
128
|
+
list_str = "Handle total size: " + str(self.total_padding_size) + "\n"
|
|
129
|
+
index = 0
|
|
130
|
+
for tensor in self.tensor_list:
|
|
131
|
+
list_str = list_str + "Tensor[" + str(index) + "]: " + str(tensor.asnumpy()) + "\n"
|
|
132
|
+
index += 1
|
|
133
|
+
return list_str
|
|
134
|
+
|
|
135
|
+
def slice_by_tensor_index(self, start=None, end=None):
|
|
136
|
+
"""
|
|
137
|
+
Return the tensor which is sliced by tensor index.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
start(int, None): Starting position. Default:``None``.
|
|
141
|
+
end(int, None): Deadline position. Default:``None``.
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
Tensor,is sliced by tensor index.
|
|
145
|
+
|
|
146
|
+
Raises:
|
|
147
|
+
TypeError: If `start` or `end`, is neither an 'int' nor a 'none'.
|
|
148
|
+
ValueError: If values of `start` or `end` are negative, or out of the list range, or start >= end.
|
|
149
|
+
|
|
150
|
+
Examples:
|
|
151
|
+
>>> import numpy as np
|
|
152
|
+
>>> import mindspore as ms
|
|
153
|
+
>>> from mindspore import Tensor
|
|
154
|
+
>>> from mindspore.hal.contiguous_tensors_handle import ContiguousTensorsHandle
|
|
155
|
+
>>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
|
|
156
|
+
>>> y = Tensor(np.array([4, 5, 6]).astype(np.float32))
|
|
157
|
+
>>> handle = ContiguousTensorsHandle([x, y], True)
|
|
158
|
+
>>> print(output.slice_by_tensor_index(0, 1).asnumpy())
|
|
159
|
+
[1, 2, 3]
|
|
160
|
+
"""
|
|
161
|
+
index_start = 0
|
|
162
|
+
index_end = len(self.tensor_list)
|
|
163
|
+
if start is not None:
|
|
164
|
+
index_start = start
|
|
165
|
+
if end is None:
|
|
166
|
+
index_end = index_start + 1
|
|
167
|
+
if end is not None:
|
|
168
|
+
index_end = end
|
|
169
|
+
if not isinstance(index_start, int) or not isinstance(index_end, int):
|
|
170
|
+
raise TypeError(f"slice input error.")
|
|
171
|
+
|
|
172
|
+
if index_start < 0 or index_end > len(self.tensor_list) or index_start >= index_end:
|
|
173
|
+
raise ValueError(f"slice input error.")
|
|
174
|
+
return _convert_python_data(slice_by_tensor_index(self.tensor_list, self.before_padding_sizes,
|
|
175
|
+
self.after_padding_sizes, index_start, index_end))
|
mindspore/hal/stream.py
CHANGED
|
@@ -18,6 +18,8 @@ from mindspore._c_expression import set_cur_stream as set_cur_stream_
|
|
|
18
18
|
from mindspore._c_expression import synchronize as synchronize_
|
|
19
19
|
from mindspore._c_expression import current_stream as current_stream_
|
|
20
20
|
from mindspore._c_expression import default_stream as default_stream_
|
|
21
|
+
from mindspore._c_expression import communication_stream as communication_stream_
|
|
22
|
+
|
|
21
23
|
from mindspore import _checkparam as Validator
|
|
22
24
|
from .event import Event
|
|
23
25
|
|
|
@@ -293,6 +295,22 @@ def default_stream():
|
|
|
293
295
|
return Stream(stream=default_stream_())
|
|
294
296
|
|
|
295
297
|
|
|
298
|
+
def communication_stream():
|
|
299
|
+
r"""
|
|
300
|
+
Return default stream on this device.
|
|
301
|
+
|
|
302
|
+
Returns:
|
|
303
|
+
stream (Stream), default stream.
|
|
304
|
+
|
|
305
|
+
TODO
|
|
306
|
+
Examples:
|
|
307
|
+
>>> import mindspore as ms
|
|
308
|
+
>>> cur_stream = ms.hal.current_stream()
|
|
309
|
+
>>> assert cur_stream == ms.hal.default_stream()
|
|
310
|
+
"""
|
|
311
|
+
return Stream(stream=communication_stream_())
|
|
312
|
+
|
|
313
|
+
|
|
296
314
|
class StreamCtx():
|
|
297
315
|
r"""
|
|
298
316
|
Context-manager that selects a given stream.
|
|
@@ -33,8 +33,10 @@ class ModelGroupImpl;
|
|
|
33
33
|
/// multiple models to share workspace memory.
|
|
34
34
|
|
|
35
35
|
enum class ModelGroupFlag : int {
|
|
36
|
+
kUnknown = 0x0000,
|
|
36
37
|
kShareWeight = 0x0001,
|
|
37
38
|
kShareWorkspace = 0x0002,
|
|
39
|
+
kShareWeightAndWorkspace = 0x0003,
|
|
38
40
|
};
|
|
39
41
|
|
|
40
42
|
class MS_API ModelGroup {
|
|
@@ -47,7 +49,7 @@ class MS_API ModelGroup {
|
|
|
47
49
|
/// \param[in] model_path_list Define the list of model path.
|
|
48
50
|
///
|
|
49
51
|
/// \return Status.
|
|
50
|
-
Status AddModel(const std::vector<std::string> &model_path_list);
|
|
52
|
+
inline Status AddModel(const std::vector<std::string> &model_path_list);
|
|
51
53
|
|
|
52
54
|
/// \brief Add models that require shared workspace memory.
|
|
53
55
|
///
|
|
@@ -74,6 +76,16 @@ class MS_API ModelGroup {
|
|
|
74
76
|
|
|
75
77
|
private:
|
|
76
78
|
std::shared_ptr<ModelGroupImpl> impl_;
|
|
79
|
+
Status AddModel(const std::vector<std::vector<char>> &model_path_list);
|
|
77
80
|
};
|
|
81
|
+
|
|
82
|
+
Status ModelGroup::AddModel(const std::vector<std::string> &model_path_list) {
|
|
83
|
+
std::vector<std::vector<char>> model_path_list_str;
|
|
84
|
+
for (auto str : model_path_list) {
|
|
85
|
+
model_path_list_str.push_back(StringToChar(str));
|
|
86
|
+
}
|
|
87
|
+
return AddModel(model_path_list_str);
|
|
88
|
+
}
|
|
89
|
+
|
|
78
90
|
} // namespace mindspore
|
|
79
91
|
#endif // MINDSPORE_INCLUDE_API_MODEL_GROUP_H
|
mindspore/include/api/types.h
CHANGED
|
@@ -60,6 +60,16 @@ struct QuantParam {
|
|
|
60
60
|
double max; ///< Quantization max value
|
|
61
61
|
};
|
|
62
62
|
|
|
63
|
+
using Key = struct MS_API Key {
|
|
64
|
+
size_t max_key_len = 32;
|
|
65
|
+
size_t len = 0;
|
|
66
|
+
unsigned char key[32] = {0};
|
|
67
|
+
Key() : len(0) {}
|
|
68
|
+
explicit Key(const char *dec_key, size_t key_len);
|
|
69
|
+
};
|
|
70
|
+
|
|
71
|
+
constexpr char kDecModeAesGcm[] = "AES-GCM";
|
|
72
|
+
|
|
63
73
|
class Allocator;
|
|
64
74
|
/// \brief The MSTensor class defines a tensor in MindSpore.
|
|
65
75
|
class MS_API MSTensor {
|
|
@@ -402,16 +412,6 @@ std::string MSTensor::Name() const { return CharToString(CharName()); }
|
|
|
402
412
|
|
|
403
413
|
void MSTensor::SetTensorName(const std::string &name) { SetTensorName(StringToChar(name)); }
|
|
404
414
|
|
|
405
|
-
using Key = struct MS_API Key {
|
|
406
|
-
const size_t max_key_len = 32;
|
|
407
|
-
size_t len = 0;
|
|
408
|
-
unsigned char key[32] = {0};
|
|
409
|
-
Key() : len(0) {}
|
|
410
|
-
explicit Key(const char *dec_key, size_t key_len);
|
|
411
|
-
};
|
|
412
|
-
|
|
413
|
-
constexpr char kDecModeAesGcm[] = "AES-GCM";
|
|
414
|
-
|
|
415
415
|
/// \brief CallBackParam defined input arguments for callBack function.
|
|
416
416
|
struct MSCallBackParam {
|
|
417
417
|
std::string node_name; /**< node name argument */
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* Copyright 2020-
|
|
2
|
+
* Copyright 2020-2024 Huawei Technologies Co., Ltd
|
|
3
3
|
*
|
|
4
4
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
5
|
* you may not use this file except in compliance with the License.
|
|
@@ -22,7 +22,7 @@
|
|
|
22
22
|
#include <vector>
|
|
23
23
|
|
|
24
24
|
#include "include/api/dual_abi_helper.h"
|
|
25
|
-
#include "include/api/
|
|
25
|
+
#include "include/api/visible.h"
|
|
26
26
|
|
|
27
27
|
namespace mindspore {
|
|
28
28
|
namespace dataset {
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* Copyright 2020-
|
|
2
|
+
* Copyright 2020-2024 Huawei Technologies Co., Ltd
|
|
3
3
|
*
|
|
4
4
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
5
|
* you may not use this file except in compliance with the License.
|
|
@@ -20,7 +20,7 @@
|
|
|
20
20
|
#include <limits>
|
|
21
21
|
#include <random>
|
|
22
22
|
|
|
23
|
-
#include "include/api/
|
|
23
|
+
#include "include/api/visible.h"
|
|
24
24
|
|
|
25
25
|
namespace mindspore {
|
|
26
26
|
namespace dataset {
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* Copyright 2020-
|
|
2
|
+
* Copyright 2020-2024 Huawei Technologies Co., Ltd
|
|
3
3
|
*
|
|
4
4
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
5
|
* you may not use this file except in compliance with the License.
|
|
@@ -23,7 +23,7 @@
|
|
|
23
23
|
#include <vector>
|
|
24
24
|
|
|
25
25
|
#include "include/api/context.h"
|
|
26
|
-
#include "include/api/
|
|
26
|
+
#include "include/api/visible.h"
|
|
27
27
|
#include "include/dataset/constants.h"
|
|
28
28
|
#include "include/dataset/transforms.h"
|
|
29
29
|
#if !defined(BUILD_LITE) && defined(ENABLE_D)
|
|
@@ -441,6 +441,7 @@ class DATASET_API CutOut final : public TensorTransform {
|
|
|
441
441
|
std::shared_ptr<Data> data_;
|
|
442
442
|
};
|
|
443
443
|
|
|
444
|
+
#ifdef ENABLE_FFMPEG
|
|
444
445
|
/// \brief Decode the input video.
|
|
445
446
|
class DATASET_API DecodeVideo final : public TensorTransform {
|
|
446
447
|
public:
|
|
@@ -470,6 +471,7 @@ class DATASET_API DecodeVideo final : public TensorTransform {
|
|
|
470
471
|
/// \return Shared pointer to TensorOperation object.
|
|
471
472
|
std::shared_ptr<TensorOperation> Parse() override;
|
|
472
473
|
};
|
|
474
|
+
#endif
|
|
473
475
|
|
|
474
476
|
/// \brief Encode the image as JPEG data.
|
|
475
477
|
/// \param[in] image The image to be encoded.
|
|
@@ -1717,6 +1719,7 @@ Status DATASET_API ReadFile(const std::string &filename, mindspore::MSTensor *ou
|
|
|
1717
1719
|
Status DATASET_API ReadImage(const std::string &filename, mindspore::MSTensor *output,
|
|
1718
1720
|
ImageReadMode mode = ImageReadMode::kUNCHANGED);
|
|
1719
1721
|
|
|
1722
|
+
#ifdef ENABLE_FFMPEG
|
|
1720
1723
|
/// \brief Read the video, audio, metadata from a video file. It supports AVI, H264, H265, MOV, MP4, WMV file formats.
|
|
1721
1724
|
/// \param[in] filename The path to the videoe file to be read.
|
|
1722
1725
|
/// \param[out] video_output The video frames of the video file.
|
|
@@ -1737,6 +1740,7 @@ Status DATASET_API ReadVideo(const std::string &filename, mindspore::MSTensor *v
|
|
|
1737
1740
|
/// \return The status code.
|
|
1738
1741
|
Status DATASET_API ReadVideoTimestamps(const std::string &filename, std::tuple<std::vector<float>, float> *output,
|
|
1739
1742
|
const std::string &pts_unit = "pts");
|
|
1743
|
+
#endif
|
|
1740
1744
|
|
|
1741
1745
|
/// \brief Crop the given image and zoom to the specified size.
|
|
1742
1746
|
class DATASET_API ResizedCrop final : public TensorTransform {
|
mindspore/jpeg62.dll
CHANGED
|
Binary file
|
mindspore/log.py
CHANGED
|
@@ -507,7 +507,7 @@ def _create_logfile_dir(kwargs):
|
|
|
507
507
|
rank_id = _get_rank_id()
|
|
508
508
|
log_dir += '/rank_' + rank_id + '/logs'
|
|
509
509
|
if not os.path.exists(log_dir):
|
|
510
|
-
os.makedirs(log_dir, exist_ok=True)
|
|
510
|
+
os.makedirs(log_dir, mode=0o700, exist_ok=True)
|
|
511
511
|
return log_dir
|
|
512
512
|
|
|
513
513
|
|
|
@@ -113,8 +113,10 @@ class FileWriter:
|
|
|
113
113
|
# parallel write mode
|
|
114
114
|
self._parallel_writer = None
|
|
115
115
|
self._writers = None
|
|
116
|
-
self.
|
|
116
|
+
self._queues = [] # the data queue from main process to worker process
|
|
117
|
+
self._msg_queues = [] # the err msg from worker process to main process
|
|
117
118
|
self._workers = None
|
|
119
|
+
self._workers_loop_index = 0
|
|
118
120
|
self._index_workers = None
|
|
119
121
|
|
|
120
122
|
@classmethod
|
|
@@ -340,42 +342,53 @@ class FileWriter:
|
|
|
340
342
|
# init the _writers and launch the workers
|
|
341
343
|
if self._writers is None:
|
|
342
344
|
self._writers = [None] * len(self._paths) # writers used by worker
|
|
343
|
-
self.
|
|
345
|
+
self._queues = [mp.Queue(2) for _ in range(len(self._paths))] # data queue for worker
|
|
346
|
+
self._msg_queues = [mp.Queue(2) for _ in range(len(self._paths))] # msg queue for worker
|
|
344
347
|
self._workers = [None] * len(self._paths) # worker process
|
|
348
|
+
worker_pid_list = []
|
|
345
349
|
for i, path in enumerate(self._paths):
|
|
346
350
|
self._writers[i] = ShardWriter()
|
|
347
351
|
self._writers[i].open([path], self._overwrite)
|
|
348
352
|
self._writers[i].set_shard_header(self._header)
|
|
349
353
|
|
|
350
354
|
# launch the workers for parallel write
|
|
351
|
-
self.
|
|
352
|
-
|
|
355
|
+
self._queues[i]._joincancelled = True # pylint: disable=W0212
|
|
356
|
+
self._msg_queues[i]._joincancelled = True # pylint: disable=W0212
|
|
357
|
+
p = mp.Process(target=self._write_worker, name="WriterWorker" + str(i),
|
|
358
|
+
args=(i, self._queues[i], self._msg_queues[i]))
|
|
353
359
|
p.daemon = True
|
|
354
360
|
p.start()
|
|
355
361
|
logger.info("Start worker process(pid:{}) to parallel write.".format(p.pid))
|
|
356
362
|
self._workers[i] = p
|
|
363
|
+
worker_pid_list.append(self._workers[i].pid)
|
|
357
364
|
|
|
358
|
-
# fill the self.
|
|
365
|
+
# fill the self._queues
|
|
359
366
|
check_interval = 0.5 # 0.5s
|
|
360
367
|
start_time = time.time()
|
|
361
368
|
while True:
|
|
362
369
|
try:
|
|
363
|
-
self.
|
|
370
|
+
self._queues[self._workers_loop_index].put(raw_data, block=False)
|
|
364
371
|
except queue.Full:
|
|
365
372
|
if time.time() - start_time > check_interval:
|
|
366
373
|
start_time = time.time()
|
|
367
374
|
logger.warning("Because there are too few MindRecord file shards, the efficiency of parallel " \
|
|
368
375
|
"writing is too low. You can stop the current task and add the parameter " \
|
|
369
376
|
"`shard_num` of `FileWriter` to upgrade the task.")
|
|
370
|
-
|
|
371
|
-
# check the status of worker process
|
|
372
|
-
for i in range(len(self._paths)):
|
|
373
|
-
if not self._workers[i].is_alive():
|
|
374
|
-
raise RuntimeError("Worker process(pid:{}) has stopped abnormally. Please check " \
|
|
375
|
-
"the above log".format(self._workers[i].pid))
|
|
377
|
+
self.check_worker_status(self._workers_loop_index)
|
|
376
378
|
continue
|
|
379
|
+
self._workers_loop_index = (self._workers_loop_index + 1) % len(self._paths)
|
|
377
380
|
return
|
|
378
381
|
|
|
382
|
+
def check_all_worker_status(self):
|
|
383
|
+
for index in range(len(self._msg_queues)):
|
|
384
|
+
self.check_worker_status(index)
|
|
385
|
+
|
|
386
|
+
def check_worker_status(self, index):
|
|
387
|
+
if not self._msg_queues[index].empty():
|
|
388
|
+
if self._msg_queues[index].get() == "Error":
|
|
389
|
+
raise RuntimeError("Worker process(pid:{}) has stopped abnormally. Please check " \
|
|
390
|
+
"the above log".format(self._workers[index].pid))
|
|
391
|
+
|
|
379
392
|
def set_header_size(self, header_size):
|
|
380
393
|
"""
|
|
381
394
|
Set the size of header which contains shard information, schema information, \
|
|
@@ -495,52 +508,45 @@ class FileWriter:
|
|
|
495
508
|
|
|
496
509
|
def _parallel_commit(self):
|
|
497
510
|
"""Parallel commit"""
|
|
498
|
-
#
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
511
|
+
# waiting for all the self._queues had been writed by workers
|
|
512
|
+
for index in range(len(self._paths)):
|
|
513
|
+
while not self._queues[index].empty():
|
|
514
|
+
logger.info("Waiting for the data is writed by worker: {}.".format(self._workers[index].pid))
|
|
515
|
+
time.sleep(1)
|
|
516
|
+
|
|
517
|
+
# check the workers status
|
|
518
|
+
self.check_worker_status(index)
|
|
505
519
|
|
|
506
520
|
# send EOF to worker process
|
|
507
|
-
for
|
|
521
|
+
for index in range(len(self._paths)):
|
|
522
|
+
logger.info("Send EOF flag to worker: {}.".format(self._workers[index].pid))
|
|
508
523
|
while True:
|
|
509
524
|
try:
|
|
510
|
-
self.
|
|
525
|
+
self._queues[index].put("EOF", block=False)
|
|
511
526
|
except queue.Full:
|
|
527
|
+
# check the workers status
|
|
528
|
+
self.check_worker_status(index)
|
|
529
|
+
|
|
512
530
|
time.sleep(1)
|
|
513
|
-
if not self._workers[i].is_alive():
|
|
514
|
-
raise RuntimeError("Worker process(pid:{}) has stopped abnormally. Please check " \
|
|
515
|
-
"the above log".format(self._workers[i].pid))
|
|
516
531
|
continue
|
|
517
532
|
break
|
|
518
533
|
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
alive_count = 0
|
|
522
|
-
for i in range(len(self._paths)):
|
|
523
|
-
if self._workers[i].is_alive():
|
|
524
|
-
alive_count += 1
|
|
525
|
-
if alive_count == 0:
|
|
526
|
-
break
|
|
527
|
-
time.sleep(1)
|
|
528
|
-
logger.info("Waiting for all the parallel workers to finish.")
|
|
529
|
-
|
|
530
|
-
del self._queue
|
|
531
|
-
|
|
532
|
-
# wait for worker process stop
|
|
533
|
-
for index in range(len(self._paths)):
|
|
534
|
+
worker_sucess = 0
|
|
535
|
+
for index in range(len(self._msg_queues)):
|
|
534
536
|
while True:
|
|
535
|
-
logger.info("Waiting for the worker
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
537
|
+
logger.info("Waiting for the worker: {} to exit.".format(self._workers[index].pid))
|
|
538
|
+
if not self._msg_queues[index].empty():
|
|
539
|
+
ret = self._msg_queues[index].get()
|
|
540
|
+
if ret == "Success":
|
|
541
|
+
worker_sucess += 1
|
|
542
|
+
break
|
|
543
|
+
else:
|
|
544
|
+
raise RuntimeError("Worker process(pid:{}) has stopped abnormally. Please check " \
|
|
545
|
+
"the above log".format(self._workers[index].pid))
|
|
546
|
+
time.sleep(1)
|
|
547
|
+
|
|
548
|
+
del self._queues
|
|
549
|
+
del self._msg_queues
|
|
544
550
|
|
|
545
551
|
if self._index_generator:
|
|
546
552
|
# use parallel index workers to generator index
|
|
@@ -678,7 +684,7 @@ class FileWriter:
|
|
|
678
684
|
return False, error
|
|
679
685
|
return True, error
|
|
680
686
|
|
|
681
|
-
def _write_worker(self, i, in_queue):
|
|
687
|
+
def _write_worker(self, i, in_queue, msg_queue):
|
|
682
688
|
"""The worker do the data check and write to disk for parallel mode"""
|
|
683
689
|
while True:
|
|
684
690
|
# try to get new raw_data from master
|
|
@@ -691,15 +697,26 @@ class FileWriter:
|
|
|
691
697
|
if raw_data == "EOF":
|
|
692
698
|
ret = self._writers[i].commit()
|
|
693
699
|
if ret != SUCCESS:
|
|
700
|
+
msg_queue.put("Error")
|
|
694
701
|
raise RuntimeError("Commit the {}th shard of MindRecord file failed.".format(i))
|
|
702
|
+
|
|
703
|
+
logger.info("Send Success flag from worker: {} to master: {}.".format(os.getpid(), os.getppid()))
|
|
704
|
+
msg_queue.put("Success")
|
|
705
|
+
|
|
695
706
|
break
|
|
696
707
|
|
|
697
708
|
# check the raw_data
|
|
698
709
|
if not isinstance(raw_data, list):
|
|
710
|
+
msg_queue.put("Error")
|
|
699
711
|
raise ParamTypeError('raw_data', 'list')
|
|
700
712
|
for each_raw in raw_data:
|
|
701
713
|
if not isinstance(each_raw, dict):
|
|
714
|
+
msg_queue.put("Error")
|
|
702
715
|
raise ParamTypeError('raw_data item', 'dict')
|
|
703
716
|
|
|
704
|
-
|
|
705
|
-
|
|
717
|
+
try:
|
|
718
|
+
self._verify_based_on_schema(raw_data)
|
|
719
|
+
self._writers[i].write_raw_data(raw_data, True, False)
|
|
720
|
+
except Exception as e:
|
|
721
|
+
msg_queue.put("Error")
|
|
722
|
+
raise e
|
mindspore/mindspore_backend.dll
CHANGED
|
Binary file
|
mindspore/mindspore_common.dll
CHANGED
|
Binary file
|
mindspore/mindspore_core.dll
CHANGED
|
Binary file
|
mindspore/mindspore_glog.dll
CHANGED
|
Binary file
|
mindspore/mindspore_np_dtype.dll
CHANGED
|
Binary file
|
|
Binary file
|