mindspore 2.7.0__cp310-cp310-macosx_11_0_arm64.whl → 2.7.0rc1__cp310-cp310-macosx_11_0_arm64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (187) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +1 -1
  3. mindspore/_c_dataengine.cpython-310-darwin.so +0 -0
  4. mindspore/_c_expression.cpython-310-darwin.so +0 -0
  5. mindspore/_c_mindrecord.cpython-310-darwin.so +0 -0
  6. mindspore/_checkparam.py +2 -2
  7. mindspore/_extends/builtin_operations.py +3 -3
  8. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  9. mindspore/_extends/parse/__init__.py +3 -3
  10. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -0
  11. mindspore/_extends/parse/parser.py +22 -28
  12. mindspore/_extends/parse/standard_method.py +1 -15
  13. mindspore/_extends/pijit/pijit_func_white_list.py +5 -2
  14. mindspore/_extends/remote/kernel_build_server_ascend.py +75 -0
  15. mindspore/amp.py +18 -0
  16. mindspore/common/__init__.py +12 -18
  17. mindspore/common/_tensor_cpp_method.py +1 -1
  18. mindspore/common/_tensor_docs.py +38 -102
  19. mindspore/common/_utils.py +1 -9
  20. mindspore/common/api.py +106 -155
  21. mindspore/common/{dynamic_shape/auto_dynamic_shape.py → auto_dynamic_shape.py} +23 -17
  22. mindspore/common/dtype.py +57 -98
  23. mindspore/common/dump.py +1 -1
  24. mindspore/common/file_system.py +9 -59
  25. mindspore/common/hook_handle.py +3 -22
  26. mindspore/common/np_dtype.py +3 -3
  27. mindspore/common/parameter.py +20 -4
  28. mindspore/common/recompute.py +4 -2
  29. mindspore/common/tensor.py +52 -38
  30. mindspore/communication/_hccl_management.py +297 -0
  31. mindspore/context.py +21 -15
  32. mindspore/dataset/__init__.py +1 -1
  33. mindspore/dataset/audio/transforms.py +1 -1
  34. mindspore/dataset/core/config.py +1 -35
  35. mindspore/dataset/engine/datasets.py +315 -330
  36. mindspore/dataset/engine/datasets_user_defined.py +22 -38
  37. mindspore/dataset/transforms/c_transforms.py +2 -2
  38. mindspore/dataset/transforms/transforms.py +3 -3
  39. mindspore/dataset/vision/__init__.py +1 -1
  40. mindspore/dataset/vision/py_transforms.py +8 -8
  41. mindspore/dataset/vision/transforms.py +5 -17
  42. mindspore/dataset/vision/utils.py +21 -632
  43. mindspore/device_context/ascend/op_tuning.py +1 -35
  44. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -3
  45. mindspore/include/api/cell.h +4 -28
  46. mindspore/include/api/cfg.h +7 -24
  47. mindspore/include/api/context.h +0 -1
  48. mindspore/include/api/delegate.h +2 -0
  49. mindspore/include/api/dual_abi_helper.h +19 -100
  50. mindspore/include/api/graph.h +1 -14
  51. mindspore/include/api/kernel.h +3 -16
  52. mindspore/include/api/kernel_api.h +1 -9
  53. mindspore/include/api/metrics/accuracy.h +0 -9
  54. mindspore/include/api/model.h +1 -5
  55. mindspore/include/api/model_group.h +0 -4
  56. mindspore/include/api/model_parallel_runner.h +0 -2
  57. mindspore/include/api/status.h +10 -48
  58. mindspore/include/api/types.h +1 -6
  59. mindspore/include/dataset/constants.h +0 -9
  60. mindspore/lib/libmindspore_backend_common.dylib +0 -0
  61. mindspore/lib/libmindspore_backend_manager.dylib +0 -0
  62. mindspore/lib/libmindspore_common.dylib +0 -0
  63. mindspore/lib/libmindspore_core.dylib +0 -0
  64. mindspore/lib/libmindspore_dump.dylib +0 -0
  65. mindspore/lib/libmindspore_frontend.dylib +0 -0
  66. mindspore/lib/libmindspore_glog.0.dylib +0 -0
  67. mindspore/lib/libmindspore_gpr.15.dylib +0 -0
  68. mindspore/lib/libmindspore_grpc++.1.dylib +0 -0
  69. mindspore/lib/libmindspore_grpc.15.dylib +0 -0
  70. mindspore/lib/libmindspore_memory_pool.dylib +0 -0
  71. mindspore/lib/libmindspore_ms_backend.dylib +0 -0
  72. mindspore/lib/libmindspore_ops.dylib +0 -0
  73. mindspore/lib/libmindspore_ops_kernel_common.dylib +0 -0
  74. mindspore/lib/libmindspore_profiler.dylib +0 -0
  75. mindspore/lib/libmindspore_pyboost.dylib +0 -0
  76. mindspore/lib/libmindspore_pynative.dylib +0 -0
  77. mindspore/lib/libmindspore_res_manager.dylib +0 -0
  78. mindspore/lib/libmindspore_runtime_pipeline.dylib +0 -0
  79. mindspore/lib/libnnacl.dylib +0 -0
  80. mindspore/lib/plugin/cpu/libmindspore_cpu_res_manager.dylib +0 -0
  81. mindspore/lib/plugin/libmindspore_ops_host.dylib +0 -0
  82. mindspore/mindrecord/tools/cifar10.py +2 -3
  83. mindspore/mindrecord/tools/cifar10_to_mr.py +5 -5
  84. mindspore/mint/distributed/__init__.py +0 -4
  85. mindspore/mint/distributed/distributed.py +14 -217
  86. mindspore/mint/nn/layer/_functions.py +2 -1
  87. mindspore/mint/nn/layer/conv.py +6 -6
  88. mindspore/mint/nn/layer/normalization.py +3 -3
  89. mindspore/nn/cell.py +174 -216
  90. mindspore/nn/layer/activation.py +2 -4
  91. mindspore/nn/layer/basic.py +13 -7
  92. mindspore/nn/layer/image.py +1 -1
  93. mindspore/nn/optim/adam.py +3 -1
  94. mindspore/nn/optim/lamb.py +3 -1
  95. mindspore/nn/optim/tft_wrapper.py +3 -2
  96. mindspore/nn/probability/distribution/_utils/utils.py +2 -2
  97. mindspore/nn/wrap/cell_wrapper.py +5 -39
  98. mindspore/nn/wrap/grad_reducer.py +15 -0
  99. mindspore/numpy/array_creations.py +2 -2
  100. mindspore/numpy/utils_const.py +1 -1
  101. mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
  102. mindspore/ops/_op_impl/cpu/__init__.py +0 -1
  103. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +3 -13
  104. mindspore/ops/auto_generate/gen_extend_func.py +4 -4
  105. mindspore/ops/auto_generate/gen_ops_def.py +16 -290
  106. mindspore/ops/auto_generate/gen_ops_prim.py +146 -633
  107. mindspore/ops/auto_generate/pyboost_inner_prim.py +10 -10
  108. mindspore/ops/composite/base.py +1 -1
  109. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
  110. mindspore/ops/function/__init__.py +0 -1
  111. mindspore/ops/function/array_func.py +6 -10
  112. mindspore/ops/function/debug_func.py +2 -4
  113. mindspore/ops/function/grad/grad_func.py +12 -4
  114. mindspore/ops/function/math_func.py +32 -44
  115. mindspore/ops/function/nn_func.py +20 -18
  116. mindspore/ops/functional.py +1 -2
  117. mindspore/ops/functional_overload.py +12 -23
  118. mindspore/ops/operations/_inner_ops.py +12 -11
  119. mindspore/ops/operations/array_ops.py +50 -4
  120. mindspore/ops/operations/comm_ops.py +15 -1
  121. mindspore/ops/operations/custom_ops.py +4 -10
  122. mindspore/ops/operations/debug_ops.py +6 -6
  123. mindspore/ops/operations/manually_defined/ops_def.py +12 -12
  124. mindspore/ops/operations/math_ops.py +5 -5
  125. mindspore/ops/operations/nn_ops.py +1 -1
  126. mindspore/ops/primitive.py +10 -3
  127. mindspore/ops/tensor_method.py +7 -16
  128. mindspore/ops_generate/pyboost/gen_pyboost_func.py +16 -0
  129. mindspore/parallel/_auto_parallel_context.py +15 -5
  130. mindspore/parallel/_parallel_serialization.py +2 -3
  131. mindspore/parallel/_ps_context.py +2 -2
  132. mindspore/parallel/_transformer/transformer.py +4 -4
  133. mindspore/parallel/_utils.py +11 -5
  134. mindspore/parallel/auto_parallel.py +9 -23
  135. mindspore/parallel/checkpoint_transform.py +0 -2
  136. mindspore/parallel/cluster/process_entity/_api.py +1 -4
  137. mindspore/parallel/cluster/run.py +3 -5
  138. mindspore/parallel/function/reshard_func.py +5 -6
  139. mindspore/parallel/nn/parallel_cell_wrapper.py +3 -40
  140. mindspore/parallel/nn/parallel_grad_reducer.py +8 -0
  141. mindspore/parallel/shard.py +21 -7
  142. mindspore/parallel/transform_safetensors.py +4 -10
  143. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +9 -10
  144. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +1 -1
  145. mindspore/profiler/common/msprof_cmd_tool.py +2 -2
  146. mindspore/profiler/common/path_manager.py +0 -9
  147. mindspore/profiler/common/profiler_context.py +2 -25
  148. mindspore/profiler/common/profiler_meta_data.py +0 -1
  149. mindspore/profiler/common/profiler_op_analyse.py +6 -10
  150. mindspore/{ops/_op_impl/cpu/joinedstr_op.py → profiler/common/validator/__init__.py} +1 -15
  151. mindspore/profiler/common/validator/validate_path.py +84 -0
  152. mindspore/profiler/dynamic_profiler.py +46 -91
  153. mindspore/profiler/envprofiler.py +5 -30
  154. mindspore/profiler/experimental_config.py +1 -16
  155. mindspore/profiler/platform/cpu_profiler.py +4 -10
  156. mindspore/profiler/platform/npu_profiler.py +1 -1
  157. mindspore/profiler/profiler.py +145 -193
  158. mindspore/profiler/profiler_action_controller.py +1 -1
  159. mindspore/profiler/profiler_interface.py +2 -2
  160. mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
  161. mindspore/runtime/__init__.py +4 -6
  162. mindspore/runtime/executor.py +0 -27
  163. mindspore/runtime/memory.py +0 -1
  164. mindspore/runtime/thread_bind_core.py +1 -1
  165. mindspore/train/_utils.py +3 -3
  166. mindspore/train/amp.py +3 -0
  167. mindspore/train/callback/_callback.py +1 -2
  168. mindspore/train/callback/_checkpoint.py +8 -1
  169. mindspore/train/callback/_flops_collector.py +6 -10
  170. mindspore/train/callback/_train_fault_tolerance.py +7 -3
  171. mindspore/train/data_sink.py +4 -4
  172. mindspore/train/dataset_helper.py +5 -5
  173. mindspore/train/model.py +20 -4
  174. mindspore/train/serialization.py +15 -35
  175. mindspore/train/train_thor/model_thor.py +2 -2
  176. mindspore/utils/hooks.py +81 -0
  177. mindspore/utils/utils.py +8 -8
  178. mindspore/version.py +1 -1
  179. {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +1 -1
  180. {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +184 -183
  181. mindspore/_extends/parallel_compile/akg_compiler/custom.py +0 -1109
  182. mindspore/common/dynamic_shape/__init__.py +0 -0
  183. mindspore/common/dynamic_shape/enable_dynamic.py +0 -197
  184. /mindspore/common/{dynamic_shape/_auto_dynamic.py → _auto_dynamic.py} +0 -0
  185. {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
  186. {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
  187. {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
mindspore/.commit_id CHANGED
@@ -1 +1 @@
1
- __commit_id__ = ''[sha1]:7edec76e,[branch]:(HEAD,origin/r2.7,r2.7)''
1
+ __commit_id__ = ''[sha1]:e69e001d,[branch]:(HEAD,origin/r2.7.rc1,r2.7.rc1)''
mindspore/__init__.py CHANGED
@@ -19,7 +19,7 @@ from mindspore.run_check import run_check
19
19
  from mindspore import common, dataset, mindrecord, train, log, amp, device_manager
20
20
  from mindspore import profiler, communication, numpy, parallel, hal, runtime, device_context
21
21
  from mindspore.common import *
22
- from mindspore.common import _tensor_docs, bool, int, float
22
+ from mindspore.common import _tensor_docs
23
23
  del _tensor_docs
24
24
  from mindspore.mindrecord import *
25
25
  from mindspore.ops import _op_impl, grad, value_and_grad, vjp, jvp, jacfwd, jacrev, vmap, get_grad, constexpr
mindspore/_checkparam.py CHANGED
@@ -787,9 +787,9 @@ def check_astype_dtype(dtype):
787
787
  if isinstance(dtype, str):
788
788
  if dtype.lower() not in all_types:
789
789
  raise TypeError(f"For Tensor.astype, the input type must be one of {all_types}, but got '{dtype}'.")
790
- dtype = mstype._pytype_to_dtype(np.dtype(dtype.lower())) # pylint:disable=protected-access
790
+ dtype = mstype.pytype_to_dtype(np.dtype(dtype.lower()))
791
791
  elif isinstance(dtype, type):
792
- dtype = mstype._pytype_to_dtype(dtype) # pylint:disable=protected-access
792
+ dtype = mstype.pytype_to_dtype(dtype)
793
793
  elif not dtype in mstype.number_type + (mstype.bool_,):
794
794
  raise TypeError(f"For Tensor.astype, the input type must be one of {mstype.number_type + (mstype.bool_,)}," \
795
795
  f" but got '{dtype}'.")
@@ -19,7 +19,7 @@ from mindspore.ops import functional as F
19
19
  from mindspore.ops import composite as C
20
20
  from mindspore.common.tensor import Tensor
21
21
  import mindspore.common.dtype as mstype
22
- from mindspore.common.dtype import _dtype_to_nptype, _get_py_obj_dtype
22
+ from mindspore.common.dtype import dtype_to_nptype, get_py_obj_dtype
23
23
  from mindspore._c_expression import TensorPy as CTensor
24
24
 
25
25
 
@@ -164,7 +164,7 @@ def Load(value, u=None):
164
164
 
165
165
  def scalar_cast(x, t):
166
166
  """Implement scalar_cast."""
167
- np_type = _dtype_to_nptype(t) # pylint:disable=protected-access
167
+ np_type = dtype_to_nptype(t)
168
168
  value = np_type(x)
169
169
  cast_value = np.ndarray.item(value)
170
170
  return cast_value
@@ -172,7 +172,7 @@ def scalar_cast(x, t):
172
172
 
173
173
  def typeof(x):
174
174
  """Implement typeof."""
175
- return _get_py_obj_dtype(x) # pylint:disable=protected-access
175
+ return get_py_obj_dtype(x)
176
176
 
177
177
 
178
178
  def tuple_to_array(x):
@@ -81,7 +81,7 @@ def gen_custom_op_files(config_dir, dsl_dir):
81
81
  f.write(json.dumps(ops_info, indent=4))
82
82
 
83
83
  # custom akg op dsl file
84
- custom_py = os.path.join(cur_path, "custom.py")
84
+ custom_py = os.path.join(cur_path, "../../../../../lite/tools/kernel_builder/ascend/akg/custom.py")
85
85
  if not os.path.isfile(custom_py):
86
86
  raise RuntimeError("custom.py path is invalid: {}".format(custom_py))
87
87
  shutil.copy(custom_py, dsl_dir)
@@ -1,4 +1,4 @@
1
- # Copyright 2020-2025 Huawei Technologies Co., Ltd
1
+ # Copyright 2020-2024 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -30,7 +30,7 @@ from .parser import (Parser, create_instance, is_supported_create_instance_type,
30
30
  get_obj_defined_from_obj_type, is_from_third_party_library, get_const_abs, get_const_round,
31
31
  get_const_len, convert_to_namedtuple, check_attrs, generate_lambda_object,
32
32
  check_is_subclass, check_attr_is_property, get_method_info, can_constant_fold,
33
- convert_to_mutable, get_ast_augassign_namespace_symbol, hook_wrapper)
33
+ convert_to_mutable, get_ast_augassign_namespace_symbol)
34
34
 
35
35
  __all__ = ['Parser', 'create_instance', 'is_supported_create_instance_type', 'generate_scope', 'get_attr_from_object',
36
36
  'get_bprop_method_of_class', 'get_class_instance_type', 'get_class_member_namespace_symbol',
@@ -45,4 +45,4 @@ __all__ = ['Parser', 'create_instance', 'is_supported_create_instance_type', 'ge
45
45
  'is_class_member_recursive', 'get_obj_defined_from_obj_type',
46
46
  'is_from_third_party_library', 'get_const_abs', 'get_const_round', 'get_const_len', 'get_method_info',
47
47
  'convert_to_namedtuple', 'check_attrs', 'generate_lambda_object', 'check_is_subclass', 'check_attr_is_property',
48
- 'can_constant_fold', 'convert_to_mutable', 'get_ast_augassign_namespace_symbol', 'hook_wrapper']
48
+ 'can_constant_fold', 'convert_to_mutable', 'get_ast_augassign_namespace_symbol']
@@ -338,6 +338,7 @@ deprecated_tensor_method_map = {
338
338
  "atan": "deprecated_tensor_atan",
339
339
  "arctan": "deprecated_tensor_arctan",
340
340
  "dot": "deprecated_tensor_dot",
341
+ "copy_": "deprecated_tensor_copy_",
341
342
 
342
343
  # 153
343
344
  "logsumexp": "deprecated_tensor_logsumexp",
@@ -1,6 +1,6 @@
1
1
  # This is the Python adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
2
2
  #
3
- # Copyright 2020-2025 Huawei Technologies Co., Ltd
3
+ # Copyright 2020-2024 Huawei Technologies Co., Ltd
4
4
  #
5
5
  # Licensed under the Apache License, Version 2.0 (the "License");
6
6
  # you may not use this file except in compliance with the License.
@@ -103,6 +103,17 @@ parse_expr_statement_white_list = (
103
103
  "append", "insert", "clear", "reverse", "extend", "update", "register_hook",
104
104
  )
105
105
 
106
+ # Methods that need to reorder after it's caller is used before
107
+ # e.g. We need to reorder `x.register_hook` after x is used in `out = x + 1` when `register_hook` is called.
108
+ # def construct(x):
109
+ # out = x + 1
110
+ # x.register_hook(hook_fn)
111
+ # return out
112
+ # equals to:
113
+ # def construct(x):
114
+ # x = x.register_hook(hook_fn) # register_hook will return itself when it is called in the graph (in `GRAPH_MODE`).
115
+ # out = x + 1
116
+ # return out
106
117
  _need_reorder_methods = (
107
118
  "register_hook",
108
119
  )
@@ -114,6 +125,10 @@ _unsupported_python_builtin_type = (
114
125
  set, dict, slice, complex, reversed, type,
115
126
  )
116
127
 
128
+ # Unsupported python builtin type in JIT Fallback.
129
+ _fallback_unsupported_python_builtin_type = (
130
+ compile, eval, exec
131
+ )
117
132
 
118
133
  _global_params = {}
119
134
 
@@ -182,7 +197,10 @@ def get_parse_method_of_class(obj, parse_method=None):
182
197
  if parse_method is not None:
183
198
  method_name = parse_method
184
199
  elif isinstance(obj, nn.Cell):
185
- method_name = "construct"
200
+ if obj._backward_hook:
201
+ method_name = "_backward_hook_construct"
202
+ else:
203
+ method_name = "construct"
186
204
 
187
205
  return get_attr_from_object(obj, method_name)
188
206
 
@@ -787,12 +805,6 @@ def eval_script(exp_str, params):
787
805
  local_params = params[1]
788
806
  try:
789
807
  local_params = _convert_python_data(local_params)
790
- # There are two sources of scripts:
791
- # 1. The user's original Python script code, which is directly passed back to Python for execution,
792
- # and its behavior is guaranteed by the user.
793
- # 2. Internally provided Python expression code, similar to
794
- # `__iternal_sequence_input__[__internal_sequence_index__]`.
795
- # In addition, MindIR load and export do not involve the use of the `eval_script` function.
796
808
  res = eval(exp_str, global_params, local_params)
797
809
  except Exception as e:
798
810
  error_info = f"When eval '{exp_str}' by using JIT Fallback feature, an error occurred: " + str(e)
@@ -899,26 +911,6 @@ def can_constant_fold(obj):
899
911
  return obj in constant_fold_functions
900
912
 
901
913
 
902
- def hook_wrapper(hook_fn):
903
- """
904
- Decorator wrapper for gradient hook functions.
905
- Handles custom logic when the hook returns None to ensure execution dependencies.
906
-
907
- Args:
908
- hook_fn (function): The original hook function to be wrapped.
909
-
910
- Returns:
911
- function: Wrapped inner hook function with dependency handling logic.
912
- """
913
- def inner(dout):
914
- fdout = hook_fn(dout)
915
- if fdout is None:
916
- dout = ops.Depend()(dout, fdout)
917
- return dout
918
- return fdout
919
- return inner
920
-
921
-
922
914
  class Parser:
923
915
  """
924
916
  Parser python code to ast tree.
@@ -952,6 +944,8 @@ class Parser:
952
944
  """To check if not supported for namespace"""
953
945
  unsupported = isinstance(value, _builtin_function_or_method_type) and value not in convert_object_map
954
946
  logger.debug(f"'{value}' unsupported: {unsupported}.")
947
+ if unsupported and value in _fallback_unsupported_python_builtin_type:
948
+ raise TypeError(f"'{value}' is not supported both in JIT Fallback and graph mode.")
955
949
  return unsupported
956
950
 
957
951
  @staticmethod
@@ -1114,7 +1114,7 @@ def copy_(self, src, non_blocking=False):
1114
1114
  """
1115
1115
  Copies the elements from src into self tensor and returns self.
1116
1116
  """
1117
- return inplace_copy_op(self, src, non_blocking)
1117
+ return inplace_copy_op(self, src)
1118
1118
 
1119
1119
 
1120
1120
  def max(input, axis=None, keepdims=False, *, initial=None, # pylint: disable=redefined-builtin
@@ -3709,13 +3709,6 @@ def bernoulli(input, *, generator=None):
3709
3709
  return F.bernoulli_ext(input, generator=generator)
3710
3710
 
3711
3711
 
3712
- def bernoulli_(input, p=0.5, *, generator=None):
3713
- """
3714
- Randomly draws binary numbers from a Bernoulli distribution.
3715
- """
3716
- return F.bernoulli_(input, p, generator=generator)
3717
-
3718
-
3719
3712
  def gather_nd(input_x, indices):
3720
3713
  r"""
3721
3714
  Gathers slices from a tensor by indices.
@@ -4542,13 +4535,6 @@ def index_put(input, indices, values, accumulate=False):
4542
4535
  return _index_put(input, values, indices)
4543
4536
 
4544
4537
 
4545
- def move_to(input, to, blocking=True):
4546
- r"""
4547
- Copy Tensor to target device synchronously or asynchronously, default synchronously. only support PyNative mode.
4548
- """
4549
- raise ValueError(f"The method 'move_to' is not supported in jit.")
4550
-
4551
-
4552
4538
  def index_put_(input, indices, values, accumulate=False):
4553
4539
  r"""
4554
4540
  For details, please refer to :func:`mindspore.Tensor.index_put_`.
@@ -37,7 +37,7 @@ from mindspore.common.initializer import Zero
37
37
  from mindspore.ops.function import array_func
38
38
  from mindspore.ops import operations as P
39
39
  from mindspore.ops import functional as F
40
- from mindspore._c_expression.np_dtypes import np_dtype_valid
40
+ from mindspore._c_expression.np_dtypes import np_version_valid
41
41
  from mindspore.common.dtype import type_size_in_bytes
42
42
  from mindspore.communication._comm_helper import _is_initialized, _get_rank_helper, _get_local_rank_helper, \
43
43
  _get_size_helper, _get_local_size_helper, _get_world_rank_from_group_rank_helper, _get_group_ranks, \
@@ -129,6 +129,7 @@ from mindspore.dataset.vision.transforms import AdjustBrightness, AdjustContrast
129
129
  ResizeWithBBox as VResizeWithBBox, Rotate as VRotate, SlicePatches as VSlicePatches, Solarize, ToTensor,\
130
130
  TrivialAugmentWide, UniformAugment as VUniformAugment, VerticalFlip as VVerticalFlip
131
131
  from mindspore.profiler.profiler import Profiler
132
+ from mindspore.communication._hccl_management import get_rank_size, get_rank_id
132
133
  from mindspore.communication._comm_helper import _create_group_helper, _destroy_group_helper
133
134
  from mindspore.communication.management import _set_rank_from_mpi, init as cinit, release as crelease
134
135
  from mindspore.hal.stream import Stream, synchronize, set_cur_stream, current_stream, default_stream
@@ -491,11 +492,13 @@ _func_map = {
491
492
  function_id(validator.check_is_int): FUNC_KEY_PIJIT_CONSTEXPR,
492
493
  function_id(validator.check_is_number): FUNC_KEY_PIJIT_CONSTEXPR,
493
494
  function_id(validator.check_positive_int_sequence): FUNC_KEY_PIJIT_CONSTEXPR,
494
- function_id(np_dtype_valid): FUNC_KEY_PIJIT_CONSTEXPR,
495
+ function_id(np_version_valid): FUNC_KEY_PIJIT_CONSTEXPR,
495
496
  function_id(_is_initialized): FUNC_KEY_PIJIT_CONSTEXPR,
496
497
  function_id(_set_elegant_exit_handle): FUNC_KEY_PIJIT_CONSTEXPR,
497
498
  function_id(_cost_model_context.get_cost_model_context): FUNC_KEY_PIJIT_CONSTEXPR,
498
499
  function_id(Stream.__repr__): FUNC_KEY_PIJIT_CONSTEXPR,
500
+ function_id(get_rank_size): FUNC_KEY_PIJIT_CONSTEXPR,
501
+ function_id(get_rank_id): FUNC_KEY_PIJIT_CONSTEXPR,
499
502
  function_id(offload_context): FUNC_KEY_PIJIT_CONSTEXPR,
500
503
  function_id(_is_in_data_parallel_mode): FUNC_KEY_PIJIT_CONSTEXPR,
501
504
  function_id(check_version_and_env_config): FUNC_KEY_PIJIT_CONSTEXPR,
@@ -0,0 +1,75 @@
1
+ # Copyright 2020-2021 Huawei Technologies Co., Ltd
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ============================================================================
15
+ """kernel build server for ascend"""
16
+ import sys
17
+ import warnings
18
+ import json
19
+
20
+ from mindspore._extends.parallel_compile.tbe_compiler.tbe_job_manager import TbeJobManager
21
+ from mindspore._extends.remote.kernel_build_server import Messager, get_logger, AkgBuilder
22
+
23
+
24
+ class AscendMessager(Messager):
25
+ """
26
+ Ascend Messager
27
+ It works as a server, communicating with c++ client.
28
+ """
29
+
30
+ def __init__(self, fdin, fdout):
31
+ super().__init__(fdin, fdout)
32
+ get_logger().info("[TRACE] Ascend Messager init...")
33
+ self.tbe_builder = TbeJobManager()
34
+ self.akg_builder = AkgBuilder("ASCEND")
35
+
36
+ def handle(self):
37
+ """
38
+ Communicate with remote client.
39
+ Reference protocol between them at PR#3821 and PR#3935
40
+ """
41
+ arg = self.get_message()
42
+ if arg.startswith('AKG'):
43
+ self.akg_builder.handle(self, arg)
44
+ else:
45
+ job_json = dict()
46
+ try:
47
+ job_json = json.loads(arg)
48
+ except json.decoder.JSONDecodeError:
49
+ get_logger().error("[TRACE] Request is not a json message: {}".format(arg))
50
+ self.send_ack(False)
51
+ self.exit()
52
+ finally:
53
+ pass
54
+
55
+ if "job_type" in job_json:
56
+ res = self.tbe_builder.job_handler(arg)
57
+ self.send_res(res)
58
+ else:
59
+ get_logger().error("[TRACE] Request is not a TBE Job message: {}".format(arg))
60
+ self.send_ack(False)
61
+ self.exit()
62
+
63
+ def exit(self):
64
+ self.tbe_builder.reset()
65
+ get_logger().info("[TRACE] Ascend Messager Exit...")
66
+ exit()
67
+
68
+
69
+ if __name__ == '__main__':
70
+ warnings.simplefilter("ignore")
71
+ if len(sys.argv) != 3:
72
+ raise Exception('Incorrect argv: {}'.format(sys.argv))
73
+ get_logger().debug(f"[TRACE] argv: {str(sys.argv)}")
74
+ messager = AscendMessager(int(sys.argv[1]), int(sys.argv[2]))
75
+ messager.run()
mindspore/amp.py CHANGED
@@ -167,6 +167,9 @@ def all_finite(inputs):
167
167
  >>> x = (Tensor(np.array([np.log(-1), 1, np.log(0)])), Tensor(np.array([1.0])))
168
168
  >>> output = amp.all_finite(x)
169
169
 
170
+ Tutorial Examples:
171
+ - `Automatic Mix Precision - Loss Scaling
172
+ <https://mindspore.cn/tutorials/en/master/beginner/mixed_precision.html#loss-scaling>`_
170
173
  """
171
174
  inputs = mutable(inputs)
172
175
  _check_overflow_mode = os.environ.get('MS_ASCEND_CHECK_OVERFLOW_MODE')
@@ -182,6 +185,9 @@ class LossScaler(ABC):
182
185
  to scale and unscale the loss value and gradients to avoid overflow, `adjust` is used to update the
183
186
  loss scale value.
184
187
 
188
+ For more information, refer to the `tutorials <https://mindspore.cn/tutorials/en/master/beginner/
189
+ mixed_precision.html#loss-scaling>`_.
190
+
185
191
  .. warning::
186
192
  This is an experimental API that is subject to change or deletion.
187
193
 
@@ -371,6 +377,10 @@ class DynamicLossScaler(LossScaler):
371
377
 
372
378
  Returns:
373
379
  Union(Tensor, tuple(Tensor)), the scaled value.
380
+
381
+ Tutorial Examples:
382
+ - `Automatic Mix Precision - Loss Scaling
383
+ <https://mindspore.cn/tutorials/en/master/beginner/mixed_precision.html#loss-scaling>`_
374
384
  """
375
385
  inputs = mutable(inputs)
376
386
  return _grad_scale_map(self.scale_value, inputs)
@@ -384,6 +394,10 @@ class DynamicLossScaler(LossScaler):
384
394
 
385
395
  Returns:
386
396
  Union(Tensor, tuple(Tensor)), the unscaled value.
397
+
398
+ Tutorial Examples:
399
+ - `Automatic Mix Precision - Loss Scaling
400
+ <https://mindspore.cn/tutorials/en/master/beginner/mixed_precision.html#loss-scaling>`_
387
401
  """
388
402
  inputs = mutable(inputs)
389
403
  return _grad_unscale_map(self.scale_value, inputs)
@@ -394,6 +408,10 @@ class DynamicLossScaler(LossScaler):
394
408
 
395
409
  Args:
396
410
  grads_finite (Tensor): a scalar bool Tensor indicating whether the grads are finite.
411
+
412
+ Tutorial Examples:
413
+ - `Automatic Mix Precision - Loss Scaling
414
+ <https://mindspore.cn/tutorials/en/master/beginner/mixed_precision.html#loss-scaling>`_
397
415
  """
398
416
  one = ops.ones((), self.scale_value.dtype)
399
417
  scale_mul_factor = self.scale_value * self.scale_factor
@@ -1,4 +1,4 @@
1
- # Copyright 2020-2025 Huawei Technologies Co., Ltd
1
+ # Copyright 2020-2024 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -17,15 +17,14 @@ from __future__ import absolute_import
17
17
  from mindspore.common import dtype
18
18
  from mindspore.common.api import ms_memory_recycle, jit, jit_class, _no_grad, \
19
19
  flops_collection, set_recursion_limit
20
- from mindspore.common.dtype import Type, int8, byte, int16, short, int, int32, intc, long, int64, intp, \
20
+ from mindspore.common.dtype import Type, int8, byte, int16, short, int32, intc, int64, intp, \
21
21
  uint8, ubyte, uint16, ushort, uint32, uintc, uint64, uintp, float16, half, \
22
- float, float32, single, float64, bfloat16, double, bool, bool_, float_, list_, tuple_, int_, \
22
+ float32, single, float64, bfloat16, double, bool_, float_, list_, tuple_, int_, \
23
23
  uint, number, tensor_type, string, type_none, TensorType, Int, \
24
- cfloat, complex64, cdouble, complex128, dtype_to_nptype, _null, _NullType, \
24
+ complex64, complex128, dtype_to_nptype, _null, _NullType, \
25
25
  dtype_to_pytype, pytype_to_dtype, get_py_obj_dtype, QuantDtype, qint4x2, \
26
26
  float8_e4m3fn, float8_e5m2, hifloat8
27
27
  from mindspore.common.dump import set_dump
28
- from mindspore.common.file_system import set_mindio_server_info, mindio_preload
29
28
  from mindspore.common.parameter import Parameter, ParameterTuple
30
29
  from mindspore.common.seed import set_seed, get_seed
31
30
  from mindspore.common.tensor import Tensor, tensor
@@ -42,7 +41,6 @@ from mindspore.common.generator import (
42
41
  Generator, default_generator, seed, manual_seed, initial_seed, get_rng_state, set_rng_state)
43
42
  from mindspore.ops.function.array_func import is_tensor, from_numpy
44
43
  from mindspore.common._grad_function import _Function
45
- from mindspore.common.dynamic_shape.enable_dynamic import enable_dynamic
46
44
 
47
45
  try:
48
46
  import triton
@@ -68,13 +66,11 @@ except ImportError:
68
66
  pass
69
67
 
70
68
  # symbols from dtype
71
- # bool, int, float are not defined in __all__ to avoid conflict with built-in types.
72
69
  __all__ = [
73
- "bool_",
74
70
  "int8", "byte",
75
71
  "int16", "short",
76
72
  "int32", "intc",
77
- "int64", "long", "intp",
73
+ "int64", "intp",
78
74
  "uint8", "ubyte",
79
75
  "uint16", "ushort",
80
76
  "uint32", "uintc",
@@ -82,20 +78,20 @@ __all__ = [
82
78
  "float16", "half",
83
79
  "float32", "single",
84
80
  "float64", "double",
85
- "float_", "list_", "tuple_",
81
+ "bool_", "float_",
82
+ "list_", "tuple_",
86
83
  "int_", "uint",
87
84
  "number", "tensor_type",
88
85
  "string", "type_none",
89
86
  "_null",
90
87
  "TensorType", "QuantDtype",
91
88
  "Type", "Int", "_NullType",
92
- "complex64", "cfloat",
93
- "complex128", "cdouble",
94
- "bfloat16", "qint4x2",
95
- "float8_e4m3fn", "float8_e5m2", "hifloat8",
89
+ "complex64", "complex128",
96
90
  # __method__ from dtype
97
91
  "dtype_to_nptype", "dtype_to_pytype",
98
- "pytype_to_dtype", "get_py_obj_dtype"
92
+ "pytype_to_dtype", "get_py_obj_dtype",
93
+ "bfloat16", "qint4x2",
94
+ "float8_e4m3fn", "float8_e5m2", "hifloat8"
99
95
  ]
100
96
 
101
97
  __all__.extend([
@@ -108,13 +104,11 @@ __all__.extend([
108
104
  "ms_memory_recycle",
109
105
  "set_recursion_limit",
110
106
  "mutable", "JitConfig",
111
- "enable_dynamic",
112
107
  "flops_collection",
113
108
  "lazy_inline", "load_mindir", "save_mindir",
114
109
  "no_inline",
115
110
  "Symbol",
116
111
  "recompute",
117
- "is_tensor", "from_numpy", "_Function",
118
- "set_mindio_server_info", "mindio_preload"
112
+ "is_tensor", "from_numpy", "_Function"
119
113
  ])
120
114
  __all__.extend(generator.__all__)
@@ -14,4 +14,4 @@
14
14
  # ============================================================================
15
15
  """Add tensor cpp methods for stub tensor"""
16
16
 
17
- tensor_cpp_methods = ['atan2', 'arctan2', 'greater_equal', 'ge', 'acos', 'arccos', 'argmin', 'index_select', 'erfc', 'addcdiv', 'matmul', 'eq', 'put_', 'floor', 'addmv', 'type_as', 'reshape', 'nansum', 'logical_xor', 'logsumexp', 'topk', 'view_as', 'scatter_add', 'tan', 'narrow', 'sin', 'unique', 'copy_', 'baddbmm', 'logical_or', 'scatter_', 'logical_and', 'fill_diagonal_', 'frac', 't', 'median', 'square', 'masked_fill', 'less_equal', 'le', 'true_divide', 'outer', 'sigmoid', 'sinc', 'clamp', 'clip', 'argmax', 'atanh', 'arctanh', 'std', 'div_', '__itruediv__', 'sinh', 'subtract', '_to', 'sub', '__sub__', 'floor_divide', 'rsqrt', 'not_equal', 'ne', 'dot', 'max', 'clone', 'acosh', 'arccosh', 'unsqueeze', 'lerp', 'masked_fill_', 'ceil', 'masked_select', 'index_add', 'mul_', '__imul__', 'chunk', 'unbind', 'log10', 'fmod', 'scatter', 'transpose', 'mm', 'min', 'bitwise_or', '__or__', 'diag', 'greater', 'gt', 'cumsum', 'expm1', 'abs', 'absolute', '__abs__', 'logical_not', 'log', 'gather', 'sqrt', 'where', 'inverse', 'bitwise_and', '__and__', 'new_full', 'flatten', 'argsort', 'xlogy', 'triu', 'tril', 'neg', 'negative', 'erf', 'isfinite', 'repeat', 'add', '__add__', 'logaddexp', 'div', 'divide', 'floor_divide_', '__ifloordiv__', 'bincount', 'sum', 'new_zeros', 'roll', 'var', 'exp_', 'nan_to_num', 'isneginf', 'addbmm', 'minimum', 'remainder_', '__imod__', 'isinf', 'exp', 'pow', '__pow__', 'tanh', 'gcd', 'add_', '__iadd__', 'log1p', 'expand_as', 'isclose', 'allclose', 'bitwise_xor', '__xor__', 'histc', 'cosh', 'asin', 'arcsin', 'new_empty', 'maximum', 'atan', 'arctan', 'any', 'asinh', 'arcsinh', 'masked_scatter', 'trunc', 'mul', 'select', 'bitwise_not', 'fill_', 'all', 'mean', 'sort', 'hardshrink', 'kthvalue', 'addmm', 'remainder', 'reciprocal', 'round', 'log_', 'cos', 'split', 'tile', 'new_ones', 'logaddexp2', '__mod__', 'log2', 'prod', 'less', 'lt', 'sub_', '__isub__', 'take', 'count_nonzero', 'repeat_interleave']
17
+ tensor_cpp_methods = ['atan2', 'arctan2', 'greater_equal', 'ge', 'acos', 'arccos', 'argmin', 'index_select', 'erfc', 'addcdiv', 'matmul', 'eq', 'put_', 'floor', 'addmv', 'type_as', 'reshape', 'nansum', 'logical_xor', 'logsumexp', 'topk', 'view_as', 'scatter_add', 'tan', 'narrow', 'sin', 'unique', 'copy_', 'baddbmm', 'logical_or', 'scatter_', 'logical_and', 'fill_diagonal_', 'frac', 't', 'median', 'square', 'masked_fill', 'less_equal', 'le', 'true_divide', 'outer', 'sigmoid', 'sinc', 'clamp', 'clip', 'argmax', 'atanh', 'arctanh', 'std', 'div_', '__itruediv__', 'sinh', 'subtract', '_to', 'sub', '__sub__', 'floor_divide', 'rsqrt', 'not_equal', 'ne', 'dot', 'max', 'clone', 'acosh', 'arccosh', 'unsqueeze', 'lerp', 'masked_fill_', 'ceil', 'masked_select', 'index_add', 'mul_', '__imul__', 'chunk', 'unbind', 'log10', 'fmod', 'scatter', 'transpose', 'mm', 'min', 'bitwise_or', '__or__', 'diag', 'greater', 'gt', 'cumsum', 'expm1', 'abs', 'absolute', '__abs__', 'logical_not', 'log', 'gather', 'sqrt', 'where', 'inverse', 'bitwise_and', '__and__', 'new_full', 'flatten', 'argsort', 'xlogy', 'triu', 'tril', 'neg', 'negative', 'erf', 'isfinite', 'repeat', 'add', '__add__', 'logaddexp', 'div', 'divide', 'floor_divide_', '__ifloordiv__', 'bincount', 'sum', 'new_zeros', 'roll', 'var', 'exp_', 'nan_to_num', 'isneginf', 'addbmm', 'minimum', 'isinf', 'exp', 'pow', '__pow__', 'tanh', 'gcd', 'add_', '__iadd__', 'log1p', 'expand_as', 'isclose', 'allclose', 'bitwise_xor', '__xor__', 'histc', 'cosh', 'asin', 'arcsin', 'new_empty', 'maximum', 'atan', 'arctan', 'any', 'asinh', 'arcsinh', 'trunc', 'mul', 'select', 'bitwise_not', 'fill_', 'all', 'mean', 'sort', 'hardshrink', 'kthvalue', 'addmm', 'remainder', 'reciprocal', 'round', 'log_', 'cos', 'split', 'tile', 'new_ones', 'logaddexp2', '__mod__', 'log2', 'prod', 'less', 'lt', 'sub_', '__isub__', 'take', 'count_nonzero', 'repeat_interleave']