mindspore 2.7.0__cp310-cp310-win_amd64.whl → 2.7.0rc1__cp310-cp310-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +1 -1
- mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +2 -2
- mindspore/_extends/builtin_operations.py +3 -3
- mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
- mindspore/_extends/parse/__init__.py +3 -3
- mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -0
- mindspore/_extends/parse/parser.py +22 -28
- mindspore/_extends/parse/standard_method.py +1 -15
- mindspore/_extends/pijit/pijit_func_white_list.py +5 -2
- mindspore/_extends/remote/kernel_build_server_ascend.py +75 -0
- mindspore/amp.py +18 -0
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/common/__init__.py +12 -18
- mindspore/common/_tensor_cpp_method.py +1 -1
- mindspore/common/_tensor_docs.py +38 -102
- mindspore/common/_utils.py +1 -9
- mindspore/common/api.py +106 -155
- mindspore/common/{dynamic_shape/auto_dynamic_shape.py → auto_dynamic_shape.py} +23 -17
- mindspore/common/dtype.py +57 -98
- mindspore/common/dump.py +1 -1
- mindspore/common/file_system.py +9 -59
- mindspore/common/hook_handle.py +3 -22
- mindspore/common/np_dtype.py +3 -3
- mindspore/common/parameter.py +20 -4
- mindspore/common/recompute.py +4 -2
- mindspore/common/tensor.py +52 -38
- mindspore/communication/_hccl_management.py +297 -0
- mindspore/context.py +21 -15
- mindspore/dataset/__init__.py +1 -1
- mindspore/dataset/audio/transforms.py +1 -1
- mindspore/dataset/core/config.py +1 -35
- mindspore/dataset/engine/datasets.py +315 -330
- mindspore/dataset/engine/datasets_user_defined.py +22 -38
- mindspore/dataset/transforms/c_transforms.py +2 -2
- mindspore/dataset/transforms/transforms.py +3 -3
- mindspore/dataset/vision/__init__.py +1 -1
- mindspore/dataset/vision/py_transforms.py +8 -8
- mindspore/dataset/vision/transforms.py +5 -17
- mindspore/dataset/vision/utils.py +21 -632
- mindspore/device_context/ascend/op_tuning.py +1 -35
- mindspore/dnnl.dll +0 -0
- mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -3
- mindspore/include/api/cell.h +4 -28
- mindspore/include/api/cfg.h +7 -24
- mindspore/include/api/context.h +0 -1
- mindspore/include/api/delegate.h +2 -0
- mindspore/include/api/dual_abi_helper.h +19 -100
- mindspore/include/api/graph.h +1 -14
- mindspore/include/api/kernel.h +3 -16
- mindspore/include/api/kernel_api.h +1 -9
- mindspore/include/api/metrics/accuracy.h +0 -9
- mindspore/include/api/model.h +1 -5
- mindspore/include/api/model_group.h +0 -4
- mindspore/include/api/model_parallel_runner.h +0 -2
- mindspore/include/api/status.h +10 -48
- mindspore/include/api/types.h +1 -6
- mindspore/include/dataset/constants.h +0 -9
- mindspore/jpeg62.dll +0 -0
- mindspore/mindrecord/tools/cifar10.py +2 -3
- mindspore/mindrecord/tools/cifar10_to_mr.py +5 -5
- mindspore/mindspore_backend_common.dll +0 -0
- mindspore/mindspore_backend_manager.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_cpu_res_manager.dll +0 -0
- mindspore/mindspore_dump.dll +0 -0
- mindspore/mindspore_frontend.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_memory_pool.dll +0 -0
- mindspore/mindspore_ms_backend.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mindspore_ops_host.dll +0 -0
- mindspore/mindspore_ops_kernel_common.dll +0 -0
- mindspore/mindspore_profiler.dll +0 -0
- mindspore/mindspore_pyboost.dll +0 -0
- mindspore/mindspore_pynative.dll +0 -0
- mindspore/mindspore_res_manager.dll +0 -0
- mindspore/mindspore_runtime_pipeline.dll +0 -0
- mindspore/mint/distributed/__init__.py +0 -4
- mindspore/mint/distributed/distributed.py +14 -217
- mindspore/mint/nn/layer/_functions.py +2 -1
- mindspore/mint/nn/layer/conv.py +6 -6
- mindspore/mint/nn/layer/normalization.py +3 -3
- mindspore/nn/cell.py +174 -216
- mindspore/nn/layer/activation.py +2 -4
- mindspore/nn/layer/basic.py +13 -7
- mindspore/nn/layer/image.py +1 -1
- mindspore/nn/optim/adam.py +3 -1
- mindspore/nn/optim/lamb.py +3 -1
- mindspore/nn/optim/tft_wrapper.py +3 -2
- mindspore/nn/probability/distribution/_utils/utils.py +2 -2
- mindspore/nn/wrap/cell_wrapper.py +5 -39
- mindspore/nn/wrap/grad_reducer.py +15 -0
- mindspore/numpy/array_creations.py +2 -2
- mindspore/numpy/utils_const.py +1 -1
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
- mindspore/ops/_op_impl/cpu/__init__.py +0 -1
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +2 -12
- mindspore/ops/auto_generate/gen_extend_func.py +4 -4
- mindspore/ops/auto_generate/gen_ops_def.py +16 -290
- mindspore/ops/auto_generate/gen_ops_prim.py +76 -563
- mindspore/ops/composite/base.py +1 -1
- mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
- mindspore/ops/function/__init__.py +0 -1
- mindspore/ops/function/array_func.py +6 -10
- mindspore/ops/function/debug_func.py +2 -4
- mindspore/ops/function/grad/grad_func.py +12 -4
- mindspore/ops/function/math_func.py +32 -44
- mindspore/ops/function/nn_func.py +20 -18
- mindspore/ops/functional.py +1 -2
- mindspore/ops/functional_overload.py +12 -23
- mindspore/ops/operations/_inner_ops.py +12 -11
- mindspore/ops/operations/array_ops.py +50 -4
- mindspore/ops/operations/comm_ops.py +15 -1
- mindspore/ops/operations/custom_ops.py +4 -10
- mindspore/ops/operations/debug_ops.py +6 -6
- mindspore/ops/operations/manually_defined/ops_def.py +12 -12
- mindspore/ops/operations/math_ops.py +5 -5
- mindspore/ops/operations/nn_ops.py +1 -1
- mindspore/ops/primitive.py +10 -3
- mindspore/ops/tensor_method.py +7 -16
- mindspore/ops_generate/pyboost/gen_pyboost_func.py +16 -0
- mindspore/parallel/_auto_parallel_context.py +15 -5
- mindspore/parallel/_parallel_serialization.py +2 -3
- mindspore/parallel/_ps_context.py +2 -2
- mindspore/parallel/_transformer/transformer.py +4 -4
- mindspore/parallel/_utils.py +11 -5
- mindspore/parallel/auto_parallel.py +9 -23
- mindspore/parallel/checkpoint_transform.py +0 -2
- mindspore/parallel/cluster/process_entity/_api.py +1 -4
- mindspore/parallel/cluster/run.py +3 -5
- mindspore/parallel/function/reshard_func.py +5 -6
- mindspore/parallel/nn/parallel_cell_wrapper.py +3 -40
- mindspore/parallel/nn/parallel_grad_reducer.py +8 -0
- mindspore/parallel/shard.py +21 -7
- mindspore/parallel/transform_safetensors.py +4 -10
- mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +9 -10
- mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +1 -1
- mindspore/profiler/common/msprof_cmd_tool.py +2 -2
- mindspore/profiler/common/path_manager.py +0 -9
- mindspore/profiler/common/profiler_context.py +2 -25
- mindspore/profiler/common/profiler_meta_data.py +0 -1
- mindspore/profiler/common/profiler_op_analyse.py +6 -10
- mindspore/{ops/_op_impl/cpu/joinedstr_op.py → profiler/common/validator/__init__.py} +1 -15
- mindspore/profiler/common/validator/validate_path.py +84 -0
- mindspore/profiler/dynamic_profiler.py +46 -91
- mindspore/profiler/envprofiler.py +5 -30
- mindspore/profiler/experimental_config.py +1 -16
- mindspore/profiler/platform/cpu_profiler.py +4 -10
- mindspore/profiler/platform/npu_profiler.py +1 -1
- mindspore/profiler/profiler.py +145 -193
- mindspore/profiler/profiler_action_controller.py +1 -1
- mindspore/profiler/profiler_interface.py +2 -2
- mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
- mindspore/runtime/__init__.py +4 -6
- mindspore/runtime/executor.py +0 -27
- mindspore/runtime/memory.py +0 -1
- mindspore/runtime/thread_bind_core.py +1 -1
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/_utils.py +3 -3
- mindspore/train/amp.py +3 -0
- mindspore/train/callback/_callback.py +1 -2
- mindspore/train/callback/_checkpoint.py +8 -1
- mindspore/train/callback/_flops_collector.py +6 -10
- mindspore/train/callback/_train_fault_tolerance.py +7 -3
- mindspore/train/data_sink.py +4 -4
- mindspore/train/dataset_helper.py +5 -5
- mindspore/train/model.py +20 -4
- mindspore/train/serialization.py +15 -35
- mindspore/train/train_thor/model_thor.py +2 -2
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/hooks.py +81 -0
- mindspore/utils/utils.py +8 -8
- mindspore/version.py +1 -1
- {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +1 -1
- {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +193 -192
- mindspore/_extends/parallel_compile/akg_compiler/custom.py +0 -1109
- mindspore/common/dynamic_shape/__init__.py +0 -0
- mindspore/common/dynamic_shape/enable_dynamic.py +0 -197
- /mindspore/common/{dynamic_shape/_auto_dynamic.py → _auto_dynamic.py} +0 -0
- {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
- {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
- {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
mindspore/.commit_id
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__commit_id__ = ''[sha1]:
|
|
1
|
+
__commit_id__ = ''[sha1]:e69e001d,[branch]:(HEAD->r2.7.rc1,origin/r2.7.rc1)''
|
mindspore/__init__.py
CHANGED
|
@@ -19,7 +19,7 @@ from mindspore.run_check import run_check
|
|
|
19
19
|
from mindspore import common, dataset, mindrecord, train, log, amp, device_manager
|
|
20
20
|
from mindspore import profiler, communication, numpy, parallel, hal, runtime, device_context
|
|
21
21
|
from mindspore.common import *
|
|
22
|
-
from mindspore.common import _tensor_docs
|
|
22
|
+
from mindspore.common import _tensor_docs
|
|
23
23
|
del _tensor_docs
|
|
24
24
|
from mindspore.mindrecord import *
|
|
25
25
|
from mindspore.ops import _op_impl, grad, value_and_grad, vjp, jvp, jacfwd, jacrev, vmap, get_grad, constexpr
|
|
Binary file
|
|
Binary file
|
|
Binary file
|
mindspore/_checkparam.py
CHANGED
|
@@ -787,9 +787,9 @@ def check_astype_dtype(dtype):
|
|
|
787
787
|
if isinstance(dtype, str):
|
|
788
788
|
if dtype.lower() not in all_types:
|
|
789
789
|
raise TypeError(f"For Tensor.astype, the input type must be one of {all_types}, but got '{dtype}'.")
|
|
790
|
-
dtype = mstype.
|
|
790
|
+
dtype = mstype.pytype_to_dtype(np.dtype(dtype.lower()))
|
|
791
791
|
elif isinstance(dtype, type):
|
|
792
|
-
dtype = mstype.
|
|
792
|
+
dtype = mstype.pytype_to_dtype(dtype)
|
|
793
793
|
elif not dtype in mstype.number_type + (mstype.bool_,):
|
|
794
794
|
raise TypeError(f"For Tensor.astype, the input type must be one of {mstype.number_type + (mstype.bool_,)}," \
|
|
795
795
|
f" but got '{dtype}'.")
|
|
@@ -19,7 +19,7 @@ from mindspore.ops import functional as F
|
|
|
19
19
|
from mindspore.ops import composite as C
|
|
20
20
|
from mindspore.common.tensor import Tensor
|
|
21
21
|
import mindspore.common.dtype as mstype
|
|
22
|
-
from mindspore.common.dtype import
|
|
22
|
+
from mindspore.common.dtype import dtype_to_nptype, get_py_obj_dtype
|
|
23
23
|
from mindspore._c_expression import TensorPy as CTensor
|
|
24
24
|
|
|
25
25
|
|
|
@@ -164,7 +164,7 @@ def Load(value, u=None):
|
|
|
164
164
|
|
|
165
165
|
def scalar_cast(x, t):
|
|
166
166
|
"""Implement scalar_cast."""
|
|
167
|
-
np_type =
|
|
167
|
+
np_type = dtype_to_nptype(t)
|
|
168
168
|
value = np_type(x)
|
|
169
169
|
cast_value = np.ndarray.item(value)
|
|
170
170
|
return cast_value
|
|
@@ -172,7 +172,7 @@ def scalar_cast(x, t):
|
|
|
172
172
|
|
|
173
173
|
def typeof(x):
|
|
174
174
|
"""Implement typeof."""
|
|
175
|
-
return
|
|
175
|
+
return get_py_obj_dtype(x)
|
|
176
176
|
|
|
177
177
|
|
|
178
178
|
def tuple_to_array(x):
|
|
@@ -81,7 +81,7 @@ def gen_custom_op_files(config_dir, dsl_dir):
|
|
|
81
81
|
f.write(json.dumps(ops_info, indent=4))
|
|
82
82
|
|
|
83
83
|
# custom akg op dsl file
|
|
84
|
-
custom_py = os.path.join(cur_path, "custom.py")
|
|
84
|
+
custom_py = os.path.join(cur_path, "../../../../../lite/tools/kernel_builder/ascend/akg/custom.py")
|
|
85
85
|
if not os.path.isfile(custom_py):
|
|
86
86
|
raise RuntimeError("custom.py path is invalid: {}".format(custom_py))
|
|
87
87
|
shutil.copy(custom_py, dsl_dir)
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2020-
|
|
1
|
+
# Copyright 2020-2024 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -30,7 +30,7 @@ from .parser import (Parser, create_instance, is_supported_create_instance_type,
|
|
|
30
30
|
get_obj_defined_from_obj_type, is_from_third_party_library, get_const_abs, get_const_round,
|
|
31
31
|
get_const_len, convert_to_namedtuple, check_attrs, generate_lambda_object,
|
|
32
32
|
check_is_subclass, check_attr_is_property, get_method_info, can_constant_fold,
|
|
33
|
-
convert_to_mutable, get_ast_augassign_namespace_symbol
|
|
33
|
+
convert_to_mutable, get_ast_augassign_namespace_symbol)
|
|
34
34
|
|
|
35
35
|
__all__ = ['Parser', 'create_instance', 'is_supported_create_instance_type', 'generate_scope', 'get_attr_from_object',
|
|
36
36
|
'get_bprop_method_of_class', 'get_class_instance_type', 'get_class_member_namespace_symbol',
|
|
@@ -45,4 +45,4 @@ __all__ = ['Parser', 'create_instance', 'is_supported_create_instance_type', 'ge
|
|
|
45
45
|
'is_class_member_recursive', 'get_obj_defined_from_obj_type',
|
|
46
46
|
'is_from_third_party_library', 'get_const_abs', 'get_const_round', 'get_const_len', 'get_method_info',
|
|
47
47
|
'convert_to_namedtuple', 'check_attrs', 'generate_lambda_object', 'check_is_subclass', 'check_attr_is_property',
|
|
48
|
-
'can_constant_fold', 'convert_to_mutable', 'get_ast_augassign_namespace_symbol'
|
|
48
|
+
'can_constant_fold', 'convert_to_mutable', 'get_ast_augassign_namespace_symbol']
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# This is the Python adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
|
|
2
2
|
#
|
|
3
|
-
# Copyright 2020-
|
|
3
|
+
# Copyright 2020-2024 Huawei Technologies Co., Ltd
|
|
4
4
|
#
|
|
5
5
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
6
6
|
# you may not use this file except in compliance with the License.
|
|
@@ -103,6 +103,17 @@ parse_expr_statement_white_list = (
|
|
|
103
103
|
"append", "insert", "clear", "reverse", "extend", "update", "register_hook",
|
|
104
104
|
)
|
|
105
105
|
|
|
106
|
+
# Methods that need to reorder after it's caller is used before
|
|
107
|
+
# e.g. We need to reorder `x.register_hook` after x is used in `out = x + 1` when `register_hook` is called.
|
|
108
|
+
# def construct(x):
|
|
109
|
+
# out = x + 1
|
|
110
|
+
# x.register_hook(hook_fn)
|
|
111
|
+
# return out
|
|
112
|
+
# equals to:
|
|
113
|
+
# def construct(x):
|
|
114
|
+
# x = x.register_hook(hook_fn) # register_hook will return itself when it is called in the graph (in `GRAPH_MODE`).
|
|
115
|
+
# out = x + 1
|
|
116
|
+
# return out
|
|
106
117
|
_need_reorder_methods = (
|
|
107
118
|
"register_hook",
|
|
108
119
|
)
|
|
@@ -114,6 +125,10 @@ _unsupported_python_builtin_type = (
|
|
|
114
125
|
set, dict, slice, complex, reversed, type,
|
|
115
126
|
)
|
|
116
127
|
|
|
128
|
+
# Unsupported python builtin type in JIT Fallback.
|
|
129
|
+
_fallback_unsupported_python_builtin_type = (
|
|
130
|
+
compile, eval, exec
|
|
131
|
+
)
|
|
117
132
|
|
|
118
133
|
_global_params = {}
|
|
119
134
|
|
|
@@ -182,7 +197,10 @@ def get_parse_method_of_class(obj, parse_method=None):
|
|
|
182
197
|
if parse_method is not None:
|
|
183
198
|
method_name = parse_method
|
|
184
199
|
elif isinstance(obj, nn.Cell):
|
|
185
|
-
|
|
200
|
+
if obj._backward_hook:
|
|
201
|
+
method_name = "_backward_hook_construct"
|
|
202
|
+
else:
|
|
203
|
+
method_name = "construct"
|
|
186
204
|
|
|
187
205
|
return get_attr_from_object(obj, method_name)
|
|
188
206
|
|
|
@@ -787,12 +805,6 @@ def eval_script(exp_str, params):
|
|
|
787
805
|
local_params = params[1]
|
|
788
806
|
try:
|
|
789
807
|
local_params = _convert_python_data(local_params)
|
|
790
|
-
# There are two sources of scripts:
|
|
791
|
-
# 1. The user's original Python script code, which is directly passed back to Python for execution,
|
|
792
|
-
# and its behavior is guaranteed by the user.
|
|
793
|
-
# 2. Internally provided Python expression code, similar to
|
|
794
|
-
# `__iternal_sequence_input__[__internal_sequence_index__]`.
|
|
795
|
-
# In addition, MindIR load and export do not involve the use of the `eval_script` function.
|
|
796
808
|
res = eval(exp_str, global_params, local_params)
|
|
797
809
|
except Exception as e:
|
|
798
810
|
error_info = f"When eval '{exp_str}' by using JIT Fallback feature, an error occurred: " + str(e)
|
|
@@ -899,26 +911,6 @@ def can_constant_fold(obj):
|
|
|
899
911
|
return obj in constant_fold_functions
|
|
900
912
|
|
|
901
913
|
|
|
902
|
-
def hook_wrapper(hook_fn):
|
|
903
|
-
"""
|
|
904
|
-
Decorator wrapper for gradient hook functions.
|
|
905
|
-
Handles custom logic when the hook returns None to ensure execution dependencies.
|
|
906
|
-
|
|
907
|
-
Args:
|
|
908
|
-
hook_fn (function): The original hook function to be wrapped.
|
|
909
|
-
|
|
910
|
-
Returns:
|
|
911
|
-
function: Wrapped inner hook function with dependency handling logic.
|
|
912
|
-
"""
|
|
913
|
-
def inner(dout):
|
|
914
|
-
fdout = hook_fn(dout)
|
|
915
|
-
if fdout is None:
|
|
916
|
-
dout = ops.Depend()(dout, fdout)
|
|
917
|
-
return dout
|
|
918
|
-
return fdout
|
|
919
|
-
return inner
|
|
920
|
-
|
|
921
|
-
|
|
922
914
|
class Parser:
|
|
923
915
|
"""
|
|
924
916
|
Parser python code to ast tree.
|
|
@@ -952,6 +944,8 @@ class Parser:
|
|
|
952
944
|
"""To check if not supported for namespace"""
|
|
953
945
|
unsupported = isinstance(value, _builtin_function_or_method_type) and value not in convert_object_map
|
|
954
946
|
logger.debug(f"'{value}' unsupported: {unsupported}.")
|
|
947
|
+
if unsupported and value in _fallback_unsupported_python_builtin_type:
|
|
948
|
+
raise TypeError(f"'{value}' is not supported both in JIT Fallback and graph mode.")
|
|
955
949
|
return unsupported
|
|
956
950
|
|
|
957
951
|
@staticmethod
|
|
@@ -1114,7 +1114,7 @@ def copy_(self, src, non_blocking=False):
|
|
|
1114
1114
|
"""
|
|
1115
1115
|
Copies the elements from src into self tensor and returns self.
|
|
1116
1116
|
"""
|
|
1117
|
-
return inplace_copy_op(self, src
|
|
1117
|
+
return inplace_copy_op(self, src)
|
|
1118
1118
|
|
|
1119
1119
|
|
|
1120
1120
|
def max(input, axis=None, keepdims=False, *, initial=None, # pylint: disable=redefined-builtin
|
|
@@ -3709,13 +3709,6 @@ def bernoulli(input, *, generator=None):
|
|
|
3709
3709
|
return F.bernoulli_ext(input, generator=generator)
|
|
3710
3710
|
|
|
3711
3711
|
|
|
3712
|
-
def bernoulli_(input, p=0.5, *, generator=None):
|
|
3713
|
-
"""
|
|
3714
|
-
Randomly draws binary numbers from a Bernoulli distribution.
|
|
3715
|
-
"""
|
|
3716
|
-
return F.bernoulli_(input, p, generator=generator)
|
|
3717
|
-
|
|
3718
|
-
|
|
3719
3712
|
def gather_nd(input_x, indices):
|
|
3720
3713
|
r"""
|
|
3721
3714
|
Gathers slices from a tensor by indices.
|
|
@@ -4542,13 +4535,6 @@ def index_put(input, indices, values, accumulate=False):
|
|
|
4542
4535
|
return _index_put(input, values, indices)
|
|
4543
4536
|
|
|
4544
4537
|
|
|
4545
|
-
def move_to(input, to, blocking=True):
|
|
4546
|
-
r"""
|
|
4547
|
-
Copy Tensor to target device synchronously or asynchronously, default synchronously. only support PyNative mode.
|
|
4548
|
-
"""
|
|
4549
|
-
raise ValueError(f"The method 'move_to' is not supported in jit.")
|
|
4550
|
-
|
|
4551
|
-
|
|
4552
4538
|
def index_put_(input, indices, values, accumulate=False):
|
|
4553
4539
|
r"""
|
|
4554
4540
|
For details, please refer to :func:`mindspore.Tensor.index_put_`.
|
|
@@ -37,7 +37,7 @@ from mindspore.common.initializer import Zero
|
|
|
37
37
|
from mindspore.ops.function import array_func
|
|
38
38
|
from mindspore.ops import operations as P
|
|
39
39
|
from mindspore.ops import functional as F
|
|
40
|
-
from mindspore._c_expression.np_dtypes import
|
|
40
|
+
from mindspore._c_expression.np_dtypes import np_version_valid
|
|
41
41
|
from mindspore.common.dtype import type_size_in_bytes
|
|
42
42
|
from mindspore.communication._comm_helper import _is_initialized, _get_rank_helper, _get_local_rank_helper, \
|
|
43
43
|
_get_size_helper, _get_local_size_helper, _get_world_rank_from_group_rank_helper, _get_group_ranks, \
|
|
@@ -129,6 +129,7 @@ from mindspore.dataset.vision.transforms import AdjustBrightness, AdjustContrast
|
|
|
129
129
|
ResizeWithBBox as VResizeWithBBox, Rotate as VRotate, SlicePatches as VSlicePatches, Solarize, ToTensor,\
|
|
130
130
|
TrivialAugmentWide, UniformAugment as VUniformAugment, VerticalFlip as VVerticalFlip
|
|
131
131
|
from mindspore.profiler.profiler import Profiler
|
|
132
|
+
from mindspore.communication._hccl_management import get_rank_size, get_rank_id
|
|
132
133
|
from mindspore.communication._comm_helper import _create_group_helper, _destroy_group_helper
|
|
133
134
|
from mindspore.communication.management import _set_rank_from_mpi, init as cinit, release as crelease
|
|
134
135
|
from mindspore.hal.stream import Stream, synchronize, set_cur_stream, current_stream, default_stream
|
|
@@ -491,11 +492,13 @@ _func_map = {
|
|
|
491
492
|
function_id(validator.check_is_int): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
492
493
|
function_id(validator.check_is_number): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
493
494
|
function_id(validator.check_positive_int_sequence): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
494
|
-
function_id(
|
|
495
|
+
function_id(np_version_valid): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
495
496
|
function_id(_is_initialized): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
496
497
|
function_id(_set_elegant_exit_handle): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
497
498
|
function_id(_cost_model_context.get_cost_model_context): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
498
499
|
function_id(Stream.__repr__): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
500
|
+
function_id(get_rank_size): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
501
|
+
function_id(get_rank_id): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
499
502
|
function_id(offload_context): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
500
503
|
function_id(_is_in_data_parallel_mode): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
501
504
|
function_id(check_version_and_env_config): FUNC_KEY_PIJIT_CONSTEXPR,
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
# Copyright 2020-2021 Huawei Technologies Co., Ltd
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
# ============================================================================
|
|
15
|
+
"""kernel build server for ascend"""
|
|
16
|
+
import sys
|
|
17
|
+
import warnings
|
|
18
|
+
import json
|
|
19
|
+
|
|
20
|
+
from mindspore._extends.parallel_compile.tbe_compiler.tbe_job_manager import TbeJobManager
|
|
21
|
+
from mindspore._extends.remote.kernel_build_server import Messager, get_logger, AkgBuilder
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class AscendMessager(Messager):
|
|
25
|
+
"""
|
|
26
|
+
Ascend Messager
|
|
27
|
+
It works as a server, communicating with c++ client.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(self, fdin, fdout):
|
|
31
|
+
super().__init__(fdin, fdout)
|
|
32
|
+
get_logger().info("[TRACE] Ascend Messager init...")
|
|
33
|
+
self.tbe_builder = TbeJobManager()
|
|
34
|
+
self.akg_builder = AkgBuilder("ASCEND")
|
|
35
|
+
|
|
36
|
+
def handle(self):
|
|
37
|
+
"""
|
|
38
|
+
Communicate with remote client.
|
|
39
|
+
Reference protocol between them at PR#3821 and PR#3935
|
|
40
|
+
"""
|
|
41
|
+
arg = self.get_message()
|
|
42
|
+
if arg.startswith('AKG'):
|
|
43
|
+
self.akg_builder.handle(self, arg)
|
|
44
|
+
else:
|
|
45
|
+
job_json = dict()
|
|
46
|
+
try:
|
|
47
|
+
job_json = json.loads(arg)
|
|
48
|
+
except json.decoder.JSONDecodeError:
|
|
49
|
+
get_logger().error("[TRACE] Request is not a json message: {}".format(arg))
|
|
50
|
+
self.send_ack(False)
|
|
51
|
+
self.exit()
|
|
52
|
+
finally:
|
|
53
|
+
pass
|
|
54
|
+
|
|
55
|
+
if "job_type" in job_json:
|
|
56
|
+
res = self.tbe_builder.job_handler(arg)
|
|
57
|
+
self.send_res(res)
|
|
58
|
+
else:
|
|
59
|
+
get_logger().error("[TRACE] Request is not a TBE Job message: {}".format(arg))
|
|
60
|
+
self.send_ack(False)
|
|
61
|
+
self.exit()
|
|
62
|
+
|
|
63
|
+
def exit(self):
|
|
64
|
+
self.tbe_builder.reset()
|
|
65
|
+
get_logger().info("[TRACE] Ascend Messager Exit...")
|
|
66
|
+
exit()
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
if __name__ == '__main__':
|
|
70
|
+
warnings.simplefilter("ignore")
|
|
71
|
+
if len(sys.argv) != 3:
|
|
72
|
+
raise Exception('Incorrect argv: {}'.format(sys.argv))
|
|
73
|
+
get_logger().debug(f"[TRACE] argv: {str(sys.argv)}")
|
|
74
|
+
messager = AscendMessager(int(sys.argv[1]), int(sys.argv[2]))
|
|
75
|
+
messager.run()
|
mindspore/amp.py
CHANGED
|
@@ -167,6 +167,9 @@ def all_finite(inputs):
|
|
|
167
167
|
>>> x = (Tensor(np.array([np.log(-1), 1, np.log(0)])), Tensor(np.array([1.0])))
|
|
168
168
|
>>> output = amp.all_finite(x)
|
|
169
169
|
|
|
170
|
+
Tutorial Examples:
|
|
171
|
+
- `Automatic Mix Precision - Loss Scaling
|
|
172
|
+
<https://mindspore.cn/tutorials/en/master/beginner/mixed_precision.html#loss-scaling>`_
|
|
170
173
|
"""
|
|
171
174
|
inputs = mutable(inputs)
|
|
172
175
|
_check_overflow_mode = os.environ.get('MS_ASCEND_CHECK_OVERFLOW_MODE')
|
|
@@ -182,6 +185,9 @@ class LossScaler(ABC):
|
|
|
182
185
|
to scale and unscale the loss value and gradients to avoid overflow, `adjust` is used to update the
|
|
183
186
|
loss scale value.
|
|
184
187
|
|
|
188
|
+
For more information, refer to the `tutorials <https://mindspore.cn/tutorials/en/master/beginner/
|
|
189
|
+
mixed_precision.html#loss-scaling>`_.
|
|
190
|
+
|
|
185
191
|
.. warning::
|
|
186
192
|
This is an experimental API that is subject to change or deletion.
|
|
187
193
|
|
|
@@ -371,6 +377,10 @@ class DynamicLossScaler(LossScaler):
|
|
|
371
377
|
|
|
372
378
|
Returns:
|
|
373
379
|
Union(Tensor, tuple(Tensor)), the scaled value.
|
|
380
|
+
|
|
381
|
+
Tutorial Examples:
|
|
382
|
+
- `Automatic Mix Precision - Loss Scaling
|
|
383
|
+
<https://mindspore.cn/tutorials/en/master/beginner/mixed_precision.html#loss-scaling>`_
|
|
374
384
|
"""
|
|
375
385
|
inputs = mutable(inputs)
|
|
376
386
|
return _grad_scale_map(self.scale_value, inputs)
|
|
@@ -384,6 +394,10 @@ class DynamicLossScaler(LossScaler):
|
|
|
384
394
|
|
|
385
395
|
Returns:
|
|
386
396
|
Union(Tensor, tuple(Tensor)), the unscaled value.
|
|
397
|
+
|
|
398
|
+
Tutorial Examples:
|
|
399
|
+
- `Automatic Mix Precision - Loss Scaling
|
|
400
|
+
<https://mindspore.cn/tutorials/en/master/beginner/mixed_precision.html#loss-scaling>`_
|
|
387
401
|
"""
|
|
388
402
|
inputs = mutable(inputs)
|
|
389
403
|
return _grad_unscale_map(self.scale_value, inputs)
|
|
@@ -394,6 +408,10 @@ class DynamicLossScaler(LossScaler):
|
|
|
394
408
|
|
|
395
409
|
Args:
|
|
396
410
|
grads_finite (Tensor): a scalar bool Tensor indicating whether the grads are finite.
|
|
411
|
+
|
|
412
|
+
Tutorial Examples:
|
|
413
|
+
- `Automatic Mix Precision - Loss Scaling
|
|
414
|
+
<https://mindspore.cn/tutorials/en/master/beginner/mixed_precision.html#loss-scaling>`_
|
|
397
415
|
"""
|
|
398
416
|
one = ops.ones((), self.scale_value.dtype)
|
|
399
417
|
scale_mul_factor = self.scale_value * self.scale_factor
|
mindspore/avcodec-59.dll
CHANGED
|
Binary file
|
mindspore/avdevice-59.dll
CHANGED
|
Binary file
|
mindspore/avfilter-8.dll
CHANGED
|
Binary file
|
mindspore/avformat-59.dll
CHANGED
|
Binary file
|
mindspore/avutil-57.dll
CHANGED
|
Binary file
|
mindspore/common/__init__.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2020-
|
|
1
|
+
# Copyright 2020-2024 Huawei Technologies Co., Ltd
|
|
2
2
|
#
|
|
3
3
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
4
|
# you may not use this file except in compliance with the License.
|
|
@@ -17,15 +17,14 @@ from __future__ import absolute_import
|
|
|
17
17
|
from mindspore.common import dtype
|
|
18
18
|
from mindspore.common.api import ms_memory_recycle, jit, jit_class, _no_grad, \
|
|
19
19
|
flops_collection, set_recursion_limit
|
|
20
|
-
from mindspore.common.dtype import Type, int8, byte, int16, short,
|
|
20
|
+
from mindspore.common.dtype import Type, int8, byte, int16, short, int32, intc, int64, intp, \
|
|
21
21
|
uint8, ubyte, uint16, ushort, uint32, uintc, uint64, uintp, float16, half, \
|
|
22
|
-
|
|
22
|
+
float32, single, float64, bfloat16, double, bool_, float_, list_, tuple_, int_, \
|
|
23
23
|
uint, number, tensor_type, string, type_none, TensorType, Int, \
|
|
24
|
-
|
|
24
|
+
complex64, complex128, dtype_to_nptype, _null, _NullType, \
|
|
25
25
|
dtype_to_pytype, pytype_to_dtype, get_py_obj_dtype, QuantDtype, qint4x2, \
|
|
26
26
|
float8_e4m3fn, float8_e5m2, hifloat8
|
|
27
27
|
from mindspore.common.dump import set_dump
|
|
28
|
-
from mindspore.common.file_system import set_mindio_server_info, mindio_preload
|
|
29
28
|
from mindspore.common.parameter import Parameter, ParameterTuple
|
|
30
29
|
from mindspore.common.seed import set_seed, get_seed
|
|
31
30
|
from mindspore.common.tensor import Tensor, tensor
|
|
@@ -42,7 +41,6 @@ from mindspore.common.generator import (
|
|
|
42
41
|
Generator, default_generator, seed, manual_seed, initial_seed, get_rng_state, set_rng_state)
|
|
43
42
|
from mindspore.ops.function.array_func import is_tensor, from_numpy
|
|
44
43
|
from mindspore.common._grad_function import _Function
|
|
45
|
-
from mindspore.common.dynamic_shape.enable_dynamic import enable_dynamic
|
|
46
44
|
|
|
47
45
|
try:
|
|
48
46
|
import triton
|
|
@@ -68,13 +66,11 @@ except ImportError:
|
|
|
68
66
|
pass
|
|
69
67
|
|
|
70
68
|
# symbols from dtype
|
|
71
|
-
# bool, int, float are not defined in __all__ to avoid conflict with built-in types.
|
|
72
69
|
__all__ = [
|
|
73
|
-
"bool_",
|
|
74
70
|
"int8", "byte",
|
|
75
71
|
"int16", "short",
|
|
76
72
|
"int32", "intc",
|
|
77
|
-
"int64", "
|
|
73
|
+
"int64", "intp",
|
|
78
74
|
"uint8", "ubyte",
|
|
79
75
|
"uint16", "ushort",
|
|
80
76
|
"uint32", "uintc",
|
|
@@ -82,20 +78,20 @@ __all__ = [
|
|
|
82
78
|
"float16", "half",
|
|
83
79
|
"float32", "single",
|
|
84
80
|
"float64", "double",
|
|
85
|
-
"
|
|
81
|
+
"bool_", "float_",
|
|
82
|
+
"list_", "tuple_",
|
|
86
83
|
"int_", "uint",
|
|
87
84
|
"number", "tensor_type",
|
|
88
85
|
"string", "type_none",
|
|
89
86
|
"_null",
|
|
90
87
|
"TensorType", "QuantDtype",
|
|
91
88
|
"Type", "Int", "_NullType",
|
|
92
|
-
"complex64", "
|
|
93
|
-
"complex128", "cdouble",
|
|
94
|
-
"bfloat16", "qint4x2",
|
|
95
|
-
"float8_e4m3fn", "float8_e5m2", "hifloat8",
|
|
89
|
+
"complex64", "complex128",
|
|
96
90
|
# __method__ from dtype
|
|
97
91
|
"dtype_to_nptype", "dtype_to_pytype",
|
|
98
|
-
"pytype_to_dtype", "get_py_obj_dtype"
|
|
92
|
+
"pytype_to_dtype", "get_py_obj_dtype",
|
|
93
|
+
"bfloat16", "qint4x2",
|
|
94
|
+
"float8_e4m3fn", "float8_e5m2", "hifloat8"
|
|
99
95
|
]
|
|
100
96
|
|
|
101
97
|
__all__.extend([
|
|
@@ -108,13 +104,11 @@ __all__.extend([
|
|
|
108
104
|
"ms_memory_recycle",
|
|
109
105
|
"set_recursion_limit",
|
|
110
106
|
"mutable", "JitConfig",
|
|
111
|
-
"enable_dynamic",
|
|
112
107
|
"flops_collection",
|
|
113
108
|
"lazy_inline", "load_mindir", "save_mindir",
|
|
114
109
|
"no_inline",
|
|
115
110
|
"Symbol",
|
|
116
111
|
"recompute",
|
|
117
|
-
"is_tensor", "from_numpy", "_Function"
|
|
118
|
-
"set_mindio_server_info", "mindio_preload"
|
|
112
|
+
"is_tensor", "from_numpy", "_Function"
|
|
119
113
|
])
|
|
120
114
|
__all__.extend(generator.__all__)
|
|
@@ -14,4 +14,4 @@
|
|
|
14
14
|
# ============================================================================
|
|
15
15
|
"""Add tensor cpp methods for stub tensor"""
|
|
16
16
|
|
|
17
|
-
tensor_cpp_methods = ['abs', 'absolute', '__abs__', 'acos', 'arccos', 'acosh', 'arccosh', 'add', '__add__', 'addbmm', 'addcdiv', 'addmm', 'addmv', 'add_', '__iadd__', 'all', 'allclose', 'any', 'argmax', 'argmin', 'argsort', 'asin', 'arcsin', 'asinh', 'arcsinh', 'atan', 'arctan', 'atan2', 'arctan2', 'atanh', 'arctanh', 'baddbmm', 'bincount', 'bitwise_and', '__and__', 'bitwise_not', 'bitwise_or', '__or__', 'bitwise_xor', '__xor__', 'ceil', 'chunk', 'clamp', 'clip', 'clone', 'copy_', 'cos', 'cosh', 'count_nonzero', 'cumsum', 'diag', 'div', 'divide', 'div_', '__itruediv__', 'dot', 'eq', 'erf', 'erfc', 'exp', 'expand_as', 'expm1', 'exp_', 'fill_', 'fill_diagonal_', 'flatten', 'floor', 'floor_divide', 'floor_divide_', '__ifloordiv__', 'fmod', 'frac', 'gather', 'gcd', 'greater', 'gt', 'greater_equal', 'ge', 'hardshrink', 'histc', 'index_add', 'index_select', 'inverse', 'isclose', 'isfinite', 'isinf', 'isneginf', 'kthvalue', 'lerp', 'less', 'lt', 'less_equal', 'le', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logsumexp', 'log_', 'masked_fill', 'masked_fill_', '
|
|
17
|
+
tensor_cpp_methods = ['abs', 'absolute', '__abs__', 'acos', 'arccos', 'acosh', 'arccosh', 'add', '__add__', 'addbmm', 'addcdiv', 'addmm', 'addmv', 'add_', '__iadd__', 'all', 'allclose', 'any', 'argmax', 'argmin', 'argsort', 'asin', 'arcsin', 'asinh', 'arcsinh', 'atan', 'arctan', 'atan2', 'arctan2', 'atanh', 'arctanh', 'baddbmm', 'bincount', 'bitwise_and', '__and__', 'bitwise_not', 'bitwise_or', '__or__', 'bitwise_xor', '__xor__', 'ceil', 'chunk', 'clamp', 'clip', 'clone', 'copy_', 'cos', 'cosh', 'count_nonzero', 'cumsum', 'diag', 'div', 'divide', 'div_', '__itruediv__', 'dot', 'eq', 'erf', 'erfc', 'exp', 'expand_as', 'expm1', 'exp_', 'fill_', 'fill_diagonal_', 'flatten', 'floor', 'floor_divide', 'floor_divide_', '__ifloordiv__', 'fmod', 'frac', 'gather', 'gcd', 'greater', 'gt', 'greater_equal', 'ge', 'hardshrink', 'histc', 'index_add', 'index_select', 'inverse', 'isclose', 'isfinite', 'isinf', 'isneginf', 'kthvalue', 'lerp', 'less', 'lt', 'less_equal', 'le', 'log', 'log10', 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logsumexp', 'log_', 'masked_fill', 'masked_fill_', 'masked_select', 'matmul', 'max', 'maximum', 'mean', 'median', 'min', 'minimum', 'mm', 'mul', 'mul_', '__imul__', 'nansum', 'nan_to_num', 'narrow', 'neg', 'negative', 'new_empty', 'new_full', 'new_ones', 'new_zeros', 'not_equal', 'ne', 'outer', 'pow', '__pow__', 'prod', 'put_', 'reciprocal', 'remainder', 'repeat', 'repeat_interleave', 'reshape', 'roll', 'round', 'rsqrt', 'scatter', 'scatter_', 'scatter_add', 'select', 'sigmoid', 'sin', 'sinc', 'sinh', 'sort', 'split', 'sqrt', 'square', 'std', 'sub', '__sub__', 'subtract', 'sub_', '__isub__', 'sum', 't', 'take', 'tan', 'tanh', 'tile', 'topk', 'transpose', 'tril', 'triu', 'true_divide', 'trunc', 'type_as', 'unbind', 'unique', 'unsqueeze', 'var', 'view_as', 'where', 'xlogy', '_to', '__mod__']
|