mindspore 2.3.0rc1__cp39-cp39-manylinux1_x86_64.whl → 2.3.0rc2__cp39-cp39-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (226) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +1 -1
  3. mindspore/_akg/akg/utils/tbe_codegen_utils.py +13 -3
  4. mindspore/_c_dataengine.cpython-39-x86_64-linux-gnu.so +0 -0
  5. mindspore/_c_expression.cpython-39-x86_64-linux-gnu.so +0 -0
  6. mindspore/_checkparam.py +20 -0
  7. mindspore/_extends/parse/parser.py +1 -1
  8. mindspore/_extends/parse/standard_method.py +6 -5
  9. mindspore/_mindspore_offline_debug.cpython-39-x86_64-linux-gnu.so +0 -0
  10. mindspore/amp.py +5 -5
  11. mindspore/bin/cache_admin +0 -0
  12. mindspore/bin/cache_server +0 -0
  13. mindspore/boost/boost_cell_wrapper.py +1 -1
  14. mindspore/boost/group_loss_scale_manager.py +1 -1
  15. mindspore/common/__init__.py +4 -2
  16. mindspore/common/_register_for_recompute.py +48 -0
  17. mindspore/common/_stub_tensor.py +1 -0
  18. mindspore/common/api.py +56 -4
  19. mindspore/common/dtype.py +5 -3
  20. mindspore/common/dump.py +2 -2
  21. mindspore/common/hook_handle.py +51 -4
  22. mindspore/common/initializer.py +1 -1
  23. mindspore/common/jit_config.py +17 -6
  24. mindspore/common/parameter.py +7 -2
  25. mindspore/common/recompute.py +247 -0
  26. mindspore/common/sparse_tensor.py +2 -2
  27. mindspore/common/symbol.py +1 -1
  28. mindspore/common/tensor.py +74 -36
  29. mindspore/communication/__init__.py +3 -3
  30. mindspore/communication/management.py +30 -30
  31. mindspore/context.py +28 -15
  32. mindspore/dataset/__init__.py +5 -5
  33. mindspore/dataset/audio/__init__.py +2 -2
  34. mindspore/dataset/audio/transforms.py +51 -51
  35. mindspore/dataset/callback/ds_callback.py +2 -2
  36. mindspore/dataset/engine/cache_client.py +1 -1
  37. mindspore/dataset/engine/datasets.py +3 -3
  38. mindspore/dataset/engine/datasets_audio.py +14 -14
  39. mindspore/dataset/engine/datasets_standard_format.py +3 -3
  40. mindspore/dataset/engine/datasets_text.py +38 -38
  41. mindspore/dataset/engine/datasets_user_defined.py +3 -3
  42. mindspore/dataset/engine/datasets_vision.py +68 -68
  43. mindspore/dataset/text/__init__.py +3 -3
  44. mindspore/dataset/text/transforms.py +26 -26
  45. mindspore/dataset/transforms/__init__.py +1 -1
  46. mindspore/dataset/vision/__init__.py +3 -3
  47. mindspore/dataset/vision/transforms.py +92 -92
  48. mindspore/dataset/vision/utils.py +1 -1
  49. mindspore/experimental/optim/adadelta.py +2 -2
  50. mindspore/experimental/optim/adagrad.py +2 -2
  51. mindspore/experimental/optim/adam.py +2 -2
  52. mindspore/experimental/optim/adamax.py +2 -2
  53. mindspore/experimental/optim/adamw.py +2 -2
  54. mindspore/experimental/optim/asgd.py +2 -2
  55. mindspore/experimental/optim/lr_scheduler.py +24 -20
  56. mindspore/experimental/optim/nadam.py +2 -2
  57. mindspore/experimental/optim/optimizer.py +1 -1
  58. mindspore/experimental/optim/radam.py +2 -2
  59. mindspore/experimental/optim/rmsprop.py +2 -2
  60. mindspore/experimental/optim/rprop.py +2 -2
  61. mindspore/experimental/optim/sgd.py +2 -2
  62. mindspore/hal/stream.py +2 -0
  63. mindspore/include/mindapi/base/types.h +5 -0
  64. mindspore/lib/libdnnl.so.2 +0 -0
  65. mindspore/lib/libmindspore.so +0 -0
  66. mindspore/lib/libmindspore_backend.so +0 -0
  67. mindspore/lib/libmindspore_common.so +0 -0
  68. mindspore/lib/libmindspore_core.so +0 -0
  69. mindspore/lib/libmindspore_glog.so.0 +0 -0
  70. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  71. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  72. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  73. mindspore/lib/libmindspore_shared_lib.so +0 -0
  74. mindspore/lib/libopencv_core.so.4.5 +0 -0
  75. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  76. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  77. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  78. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6 -6
  79. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  80. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  81. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  82. mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
  83. mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
  84. mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
  85. mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
  86. mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
  87. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  88. mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
  89. mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
  90. mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
  91. mindspore/log.py +2 -2
  92. mindspore/mint/__init__.py +457 -0
  93. mindspore/mint/nn/__init__.py +430 -0
  94. mindspore/mint/nn/functional.py +424 -0
  95. mindspore/mint/optim/__init__.py +24 -0
  96. mindspore/mint/optim/adamw.py +186 -0
  97. mindspore/multiprocessing/__init__.py +4 -0
  98. mindspore/nn/__init__.py +3 -0
  99. mindspore/nn/cell.py +51 -47
  100. mindspore/nn/extend/__init__.py +29 -0
  101. mindspore/nn/extend/basic.py +140 -0
  102. mindspore/nn/extend/embedding.py +143 -0
  103. mindspore/nn/extend/layer/__init__.py +27 -0
  104. mindspore/nn/extend/layer/normalization.py +107 -0
  105. mindspore/nn/extend/pooling.py +117 -0
  106. mindspore/nn/generator.py +297 -0
  107. mindspore/nn/layer/basic.py +109 -1
  108. mindspore/nn/layer/container.py +2 -2
  109. mindspore/nn/layer/conv.py +6 -6
  110. mindspore/nn/layer/embedding.py +1 -1
  111. mindspore/nn/layer/normalization.py +21 -43
  112. mindspore/nn/layer/padding.py +4 -0
  113. mindspore/nn/optim/ada_grad.py +2 -2
  114. mindspore/nn/optim/adadelta.py +1 -1
  115. mindspore/nn/optim/adafactor.py +1 -1
  116. mindspore/nn/optim/adam.py +7 -7
  117. mindspore/nn/optim/adamax.py +2 -2
  118. mindspore/nn/optim/adasum.py +2 -2
  119. mindspore/nn/optim/asgd.py +2 -2
  120. mindspore/nn/optim/ftrl.py +1 -1
  121. mindspore/nn/optim/lamb.py +3 -3
  122. mindspore/nn/optim/lars.py +1 -1
  123. mindspore/nn/optim/lazyadam.py +2 -2
  124. mindspore/nn/optim/momentum.py +2 -2
  125. mindspore/nn/optim/optimizer.py +2 -2
  126. mindspore/nn/optim/proximal_ada_grad.py +2 -2
  127. mindspore/nn/optim/rmsprop.py +2 -2
  128. mindspore/nn/optim/rprop.py +2 -2
  129. mindspore/nn/optim/sgd.py +2 -2
  130. mindspore/nn/optim/thor.py +2 -2
  131. mindspore/nn/wrap/cell_wrapper.py +9 -9
  132. mindspore/nn/wrap/grad_reducer.py +5 -5
  133. mindspore/ops/_grad_experimental/grad_comm_ops.py +4 -2
  134. mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -2
  135. mindspore/ops/_vmap/vmap_math_ops.py +27 -8
  136. mindspore/ops/_vmap/vmap_nn_ops.py +66 -8
  137. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +73 -1
  138. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +12 -3
  139. mindspore/ops/auto_generate/gen_arg_handler.py +24 -0
  140. mindspore/ops/auto_generate/gen_extend_func.py +274 -0
  141. mindspore/ops/auto_generate/gen_ops_def.py +889 -22
  142. mindspore/ops/auto_generate/gen_ops_prim.py +3541 -253
  143. mindspore/ops/auto_generate/pyboost_inner_prim.py +282 -0
  144. mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -1
  145. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +9 -0
  146. mindspore/ops/extend/__init__.py +9 -1
  147. mindspore/ops/extend/array_func.py +134 -27
  148. mindspore/ops/extend/math_func.py +3 -3
  149. mindspore/ops/extend/nn_func.py +363 -2
  150. mindspore/ops/function/__init__.py +19 -2
  151. mindspore/ops/function/array_func.py +463 -439
  152. mindspore/ops/function/clip_func.py +7 -18
  153. mindspore/ops/function/grad/grad_func.py +5 -5
  154. mindspore/ops/function/linalg_func.py +4 -4
  155. mindspore/ops/function/math_func.py +260 -243
  156. mindspore/ops/function/nn_func.py +825 -62
  157. mindspore/ops/function/random_func.py +73 -4
  158. mindspore/ops/function/sparse_unary_func.py +1 -1
  159. mindspore/ops/function/vmap_func.py +1 -1
  160. mindspore/ops/functional.py +2 -2
  161. mindspore/ops/op_info_register.py +1 -31
  162. mindspore/ops/operations/__init__.py +2 -3
  163. mindspore/ops/operations/_grad_ops.py +2 -107
  164. mindspore/ops/operations/_inner_ops.py +5 -5
  165. mindspore/ops/operations/_sequence_ops.py +2 -2
  166. mindspore/ops/operations/array_ops.py +11 -233
  167. mindspore/ops/operations/comm_ops.py +32 -32
  168. mindspore/ops/operations/custom_ops.py +7 -89
  169. mindspore/ops/operations/manually_defined/ops_def.py +329 -4
  170. mindspore/ops/operations/math_ops.py +13 -163
  171. mindspore/ops/operations/nn_ops.py +9 -316
  172. mindspore/ops/operations/random_ops.py +1 -1
  173. mindspore/ops/operations/sparse_ops.py +3 -3
  174. mindspore/ops/primitive.py +2 -2
  175. mindspore/ops_generate/arg_dtype_cast.py +12 -3
  176. mindspore/ops_generate/arg_handler.py +24 -0
  177. mindspore/ops_generate/gen_ops_inner_prim.py +2 -0
  178. mindspore/ops_generate/gen_pyboost_func.py +13 -6
  179. mindspore/ops_generate/pyboost_utils.py +2 -17
  180. mindspore/parallel/__init__.py +3 -2
  181. mindspore/parallel/_auto_parallel_context.py +106 -1
  182. mindspore/parallel/_parallel_serialization.py +34 -2
  183. mindspore/parallel/_utils.py +16 -0
  184. mindspore/parallel/algo_parameter_config.py +4 -4
  185. mindspore/parallel/checkpoint_transform.py +249 -77
  186. mindspore/parallel/cluster/process_entity/_api.py +1 -1
  187. mindspore/parallel/parameter_broadcast.py +1 -1
  188. mindspore/parallel/shard.py +1 -1
  189. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +1 -0
  190. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +17 -5
  191. mindspore/profiler/parser/ascend_msprof_exporter.py +3 -3
  192. mindspore/profiler/parser/ascend_msprof_generator.py +10 -3
  193. mindspore/profiler/parser/ascend_op_generator.py +26 -9
  194. mindspore/profiler/parser/ascend_timeline_generator.py +7 -4
  195. mindspore/profiler/parser/profiler_info.py +11 -1
  196. mindspore/profiler/profiling.py +13 -5
  197. mindspore/rewrite/api/node.py +12 -12
  198. mindspore/rewrite/api/symbol_tree.py +11 -11
  199. mindspore/run_check/_check_version.py +1 -1
  200. mindspore/safeguard/rewrite_obfuscation.py +2 -2
  201. mindspore/train/amp.py +4 -4
  202. mindspore/train/anf_ir_pb2.py +8 -2
  203. mindspore/train/callback/_backup_and_restore.py +2 -2
  204. mindspore/train/callback/_callback.py +4 -4
  205. mindspore/train/callback/_checkpoint.py +2 -2
  206. mindspore/train/callback/_early_stop.py +2 -2
  207. mindspore/train/callback/_landscape.py +4 -4
  208. mindspore/train/callback/_loss_monitor.py +2 -2
  209. mindspore/train/callback/_on_request_exit.py +2 -2
  210. mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
  211. mindspore/train/callback/_summary_collector.py +2 -2
  212. mindspore/train/callback/_time_monitor.py +2 -2
  213. mindspore/train/dataset_helper.py +8 -3
  214. mindspore/train/loss_scale_manager.py +2 -2
  215. mindspore/train/metrics/metric.py +3 -3
  216. mindspore/train/mind_ir_pb2.py +22 -17
  217. mindspore/train/model.py +15 -15
  218. mindspore/train/serialization.py +18 -18
  219. mindspore/train/summary/summary_record.py +7 -7
  220. mindspore/train/train_thor/convert_utils.py +3 -3
  221. mindspore/version.py +1 -1
  222. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +1 -1
  223. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +226 -212
  224. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
  225. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
  226. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
mindspore/.commit_id CHANGED
@@ -1 +1 @@
1
- __commit_id__ = '[sha1]:d8944576,[branch]:(HEAD,origin/r2.3.q1,r2.3.q1)'
1
+ __commit_id__ = '[sha1]:c6a1400a,[branch]:(HEAD,origin/master,origin/HEAD,master)'
mindspore/__init__.py CHANGED
@@ -30,7 +30,7 @@ from mindspore.version import __version__
30
30
  from mindspore.profiler import Profiler, EnvProfiler
31
31
  from mindspore.parallel import set_algo_parameters, get_algo_parameters, reset_algo_parameters, \
32
32
  rank_list_for_transform, transform_checkpoint_by_rank, transform_checkpoints, merge_pipeline_strategys, shard, \
33
- Layout, sync_pipeline_shared_parameters, parameter_broadcast
33
+ Layout, sync_pipeline_shared_parameters, parameter_broadcast, load_segmented_checkpoints
34
34
  from mindspore.rewrite import SymbolTree, ScopedValue, Node, NodeType
35
35
  from mindspore.safeguard import obfuscate_ckpt, load_obf_params_into_net
36
36
  from mindspore._check_jit_forbidden_api import get_obj_module_and_name_info, is_jit_forbidden_module, \
@@ -94,11 +94,21 @@ def build_npu_for_akg(kernel_name,
94
94
  simple_mode=False):
95
95
  import tbe
96
96
  from tbe.tvm.tir import transform
97
- from tbe.tvm.driver.cce_build_module import _count_time, generate_cce_code
97
+ from tbe.tvm.driver.cce_build_module import _count_time
98
98
  from tbe.common.buildcfg import set_current_build_config
99
99
  from tbe.common.buildcfg.buildcfg_mapping import dynamic_shape, disable_vectorize, tik, enable_const_fold, \
100
100
  dynamic_tik, instrument_bound_checkers, tbe_workspace_size_list_length
101
-
101
+ # try to find generate_cce_code function, since it may have different name in different tbe.
102
+ import importlib
103
+ cce_build_module = importlib.import_module("tbe.tvm.driver.cce_build_module")
104
+ generate_cce_code_function = None
105
+ generate_cce_code_function_name = {"_generate_cce_code","generate_cce_code"}
106
+ for func_name in generate_cce_code_function_name:
107
+ if hasattr(cce_build_module, func_name):
108
+ generate_cce_code_function = getattr(cce_build_module, func_name)
109
+ if generate_cce_code_function is None:
110
+ raise ValueError("Can not find generate cce code function.")
111
+
102
112
  set_current_build_config(tbe_workspace_size_list_length,
103
113
  tbe.tvm.runtime.cce_runtime.tbe_workspace_size_list_length())
104
114
 
@@ -167,7 +177,7 @@ def build_npu_for_akg(kernel_name,
167
177
 
168
178
  _count_time(mod)
169
179
  mod = transform.SplitCoreCode()(mod)
170
- generate_cce_code(mod, "cce", None)
180
+ generate_cce_code_function(mod, "cce", None)
171
181
 
172
182
 
173
183
  def build_tbe_codegen(kernel_name, stmt_json, arg_json, attr, ascend_type=None):
mindspore/_checkparam.py CHANGED
@@ -18,6 +18,7 @@ from __future__ import absolute_import
18
18
  import re
19
19
  import inspect
20
20
  import math
21
+ from types import FunctionType, MethodType
21
22
  from functools import reduce, wraps
22
23
  from itertools import repeat
23
24
  from collections.abc import Iterable
@@ -1375,4 +1376,23 @@ def args_type_check(*type_args, **type_kwargs):
1375
1376
  return type_check
1376
1377
 
1377
1378
 
1379
+ def check_hook_fn(hook_type, hook_fn):
1380
+ """Check hook fn"""
1381
+ if context.get_context("mode") != context.PYNATIVE_MODE:
1382
+ logger.warning(f"'{hook_type}' function is only supported in pynative mode, you can use "
1383
+ f"context.set_context to set pynative mode.")
1384
+ return False
1385
+
1386
+ if not isinstance(hook_fn, (FunctionType, MethodType)):
1387
+ raise TypeError(f"When using 'hook_type(hook_fn)', the type of 'hook_fn' must be python "
1388
+ f"function, but got {type(hook_fn)}.")
1389
+
1390
+ if hook_fn.__code__.co_name == "staging_specialize":
1391
+ raise TypeError(f"Decorating hook function {hook_fn.__name__} with '@jit' is not supported.")
1392
+
1393
+ if hook_type == "register_hook" and hook_fn.__code__.co_argcount != 1:
1394
+ raise TypeError(f"Tensor hook function {hook_fn.__name__} arg num is not equal to 1.")
1395
+
1396
+ return True
1397
+
1378
1398
  _set_record = {}
@@ -486,7 +486,7 @@ def convert_class_to_function(cls_str, cls_obj):
486
486
  f"supported in 'construct' or @jit decorated function. Try to create {cls_str} "
487
487
  f"instances external such as initialized in the method '__init__' before assigning. "
488
488
  f"For more details, please refer to "
489
- f"https://www.mindspore.cn/docs/zh-CN/r2.3.q1/design/dynamic_graph_and_static_graph.html \n")
489
+ f"https://www.mindspore.cn/docs/zh-CN/master/design/dynamic_graph_and_static_graph.html \n")
490
490
  return convert_class_to_function_map.get(cls_str)
491
491
 
492
492
 
@@ -1,6 +1,6 @@
1
1
  # This is the Python adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
2
2
  #
3
- # Copyright 2020-2023 Huawei Technologies Co., Ltd
3
+ # Copyright 2020-2024 Huawei Technologies Co., Ltd
4
4
  #
5
5
  # Licensed under the Apache License, Version 2.0 (the "License");
6
6
  # you may not use this file except in compliance with the License.
@@ -101,7 +101,7 @@ def ndimension(x):
101
101
  return len(x.shape)
102
102
 
103
103
 
104
- def prod(input, axis=None, keep_dims=False):
104
+ def prod(input, axis=None, keep_dims=False, dtype=None):
105
105
  """
106
106
  Reduces a dimension of a tensor by product all elements in the dimension.
107
107
 
@@ -110,6 +110,7 @@ def prod(input, axis=None, keep_dims=False):
110
110
  axis (Union[None, int, tuple(int), list(int)]): Dimensions of reduction,
111
111
  when axis is None or empty tuple, reduce all dimensions. Default: ``None``.
112
112
  keep_dims (bool): Whether to keep the reduced dimensions. Default: False.
113
+ dtype (:class:`mindspore.dtype`): The desired data type of returned Tensor. Default: ``None`` .
113
114
 
114
115
  Returns:
115
116
  Tensor, has the same data type as input tensor.
@@ -125,7 +126,7 @@ def prod(input, axis=None, keep_dims=False):
125
126
  >>> print(output)
126
127
  6.0
127
128
  """
128
- return F.prod(input, axis, keep_dims)
129
+ return F.prod(input, axis, keep_dims, dtype)
129
130
 
130
131
 
131
132
  def addcdiv(input, tensor1, tensor2, value=1):
@@ -2421,11 +2422,11 @@ def float_func(*data):
2421
2422
  tensor_shape = F.shape(data)
2422
2423
  tensor_shape_len = len(tensor_shape)
2423
2424
  if tensor_shape_len == 0 or (tensor_shape_len == 1 and tensor_shape[0] == 1):
2424
- data = F.cast(data, mstype.float64)
2425
+ data = F.cast(data, mstype.float32)
2425
2426
  return TensorToScalar()(data)
2426
2427
  raise ValueError(f"Can not convert Tensor with more than one element to Scalar, "
2427
2428
  f"while the data's shape is: {tensor_shape}")
2428
- return F.scalar_cast(data, mstype.float64)
2429
+ return F.scalar_cast(data, mstype.float32)
2429
2430
  if isinstance(data, (CSRTensor, COOTensor, RowTensorInner)):
2430
2431
  const_utils.raise_type_error(
2431
2432
  "float() does not support sparse tensor input.")
mindspore/amp.py CHANGED
@@ -132,7 +132,7 @@ def all_finite(inputs):
132
132
 
133
133
  Tutorial Examples:
134
134
  - `Automatic Mix Precision - Loss Scaling
135
- <https://mindspore.cn/tutorials/en/r2.3.q1/advanced/mixed_precision.html#loss-scaling>`_
135
+ <https://mindspore.cn/tutorials/en/master/advanced/mixed_precision.html#loss-scaling>`_
136
136
  """
137
137
  inputs = mutable(inputs)
138
138
  _check_overflow_mode = os.environ.get('MS_ASCEND_CHECK_OVERFLOW_MODE')
@@ -148,7 +148,7 @@ class LossScaler(ABC):
148
148
  to scale and unscale the loss value and gradients to avoid overflow, `adjust` is used to update the
149
149
  loss scale value.
150
150
 
151
- For more information, refer to the `tutorials <https://mindspore.cn/tutorials/en/r2.3.q1/advanced/
151
+ For more information, refer to the `tutorials <https://mindspore.cn/tutorials/en/master/advanced/
152
152
  mixed_precision.html#loss-scaling>`_.
153
153
 
154
154
  .. warning::
@@ -340,7 +340,7 @@ class DynamicLossScaler(LossScaler):
340
340
 
341
341
  Tutorial Examples:
342
342
  - `Automatic Mix Precision - Loss Scaling
343
- <https://mindspore.cn/tutorials/en/r2.3.q1/advanced/mixed_precision.html#loss-scaling>`_
343
+ <https://mindspore.cn/tutorials/en/master/advanced/mixed_precision.html#loss-scaling>`_
344
344
  """
345
345
  inputs = mutable(inputs)
346
346
  return _grad_scale_map(self.scale_value, inputs)
@@ -357,7 +357,7 @@ class DynamicLossScaler(LossScaler):
357
357
 
358
358
  Tutorial Examples:
359
359
  - `Automatic Mix Precision - Loss Scaling
360
- <https://mindspore.cn/tutorials/en/r2.3.q1/advanced/mixed_precision.html#loss-scaling>`_
360
+ <https://mindspore.cn/tutorials/en/master/advanced/mixed_precision.html#loss-scaling>`_
361
361
  """
362
362
  inputs = mutable(inputs)
363
363
  return _grad_unscale_map(self.scale_value, inputs)
@@ -371,7 +371,7 @@ class DynamicLossScaler(LossScaler):
371
371
 
372
372
  Tutorial Examples:
373
373
  - `Automatic Mix Precision - Loss Scaling
374
- <https://mindspore.cn/tutorials/en/r2.3.q1/advanced/mixed_precision.html#loss-scaling>`_
374
+ <https://mindspore.cn/tutorials/en/master/advanced/mixed_precision.html#loss-scaling>`_
375
375
  """
376
376
  one = ops.ones((), self.scale_value.dtype)
377
377
  scale_mul_factor = self.scale_value * self.scale_factor
mindspore/bin/cache_admin CHANGED
Binary file
Binary file
@@ -136,7 +136,7 @@ class BoostTrainOneStepCell(TrainOneStepCell):
136
136
  >>> from mindspore import boost
137
137
  >>> from mindspore import nn
138
138
  >>> # Define the network structure of LeNet5. Refer to
139
- >>> # https://gitee.com/mindspore/docs/blob/r2.3.q1/docs/mindspore/code/lenet.py
139
+ >>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
140
140
  >>> net = LeNet5()
141
141
  >>> loss_fn = nn.SoftmaxCrossEntropyWithLogits()
142
142
  >>> optim = nn.Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
@@ -94,7 +94,7 @@ class GroupLossScaleManager(Cell):
94
94
  ... loss_scale_manager=loss_scale_manager,
95
95
  ... boost_level="O1", boost_config_dict=boost_config_dict)
96
96
  >>> # Create the dataset taking MNIST as an example. Refer to
97
- >>> # https://gitee.com/mindspore/docs/blob/r2.3.q1/docs/mindspore/code/mnist.py
97
+ >>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/mnist.py
98
98
  >>> dataset = create_dataset()
99
99
  >>> model.train(2, dataset)
100
100
  """
@@ -21,7 +21,7 @@ from mindspore.common.dtype import Type, int8, byte, int16, short, int32, intc,
21
21
  float32, single, float64, bfloat16, double, bool_, float_, list_, tuple_, int_, \
22
22
  uint, number, tensor_type, string, type_none, TensorType, Int, \
23
23
  complex64, complex128, dtype_to_nptype, _null, _NullType, \
24
- dtype_to_pytype, pytype_to_dtype, get_py_obj_dtype, QuantDtype
24
+ dtype_to_pytype, pytype_to_dtype, get_py_obj_dtype, QuantDtype, qint4x2
25
25
  from mindspore.common.dump import set_dump
26
26
  from mindspore.common.parameter import Parameter, ParameterTuple
27
27
  from mindspore.common.seed import set_seed, get_seed
@@ -32,6 +32,7 @@ from mindspore.common.jit_config import JitConfig
32
32
  from mindspore.common.lazy_inline import lazy_inline
33
33
  from mindspore.common.mindir_util import load_mindir, save_mindir
34
34
  from mindspore.common.symbol import Symbol
35
+ from mindspore.common.recompute import recompute
35
36
 
36
37
  # symbols from dtype
37
38
  __all__ = [
@@ -58,7 +59,7 @@ __all__ = [
58
59
  # __method__ from dtype
59
60
  "dtype_to_nptype", "dtype_to_pytype",
60
61
  "pytype_to_dtype", "get_py_obj_dtype",
61
- "bfloat16",
62
+ "bfloat16", "qint4x2"
62
63
  ]
63
64
 
64
65
  __all__.extend([
@@ -72,4 +73,5 @@ __all__.extend([
72
73
  "mutable", "JitConfig",
73
74
  "lazy_inline", "load_mindir", "save_mindir",
74
75
  "Symbol",
76
+ "recompute"
75
77
  ])
@@ -0,0 +1,48 @@
1
+ # Copyright 2024 Huawei Technologies Co., Ltd
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ============================================================================
15
+
16
+ """Registry the relation."""
17
+
18
+ from __future__ import absolute_import
19
+ from types import FunctionType
20
+
21
+
22
+ class RecomputeRegistry:
23
+ """Registry class for register recompute generator."""
24
+
25
+ def __init__(self):
26
+ self.recompute_generator = None
27
+
28
+ def register(self, fn):
29
+ """
30
+ Register recompute generator function
31
+ :param fn:
32
+ :return:
33
+ """
34
+ if not isinstance(fn, FunctionType):
35
+ raise TypeError("Fn should be function type, but got", type(fn))
36
+ self.recompute_generator = fn
37
+
38
+ def get(self):
39
+ """
40
+ Get recompute generator.
41
+ :return:
42
+ """
43
+ if self.recompute_generator is None:
44
+ raise TypeError("Recompute generator should be initialised before get()!")
45
+ return self.recompute_generator
46
+
47
+
48
+ recompute_registry = RecomputeRegistry()
@@ -151,6 +151,7 @@ class StubTensor:
151
151
  flush_from_cache = _stub_method(Tensor.flush_from_cache)
152
152
  contiguous = _stub_method(Tensor.contiguous)
153
153
  is_contiguous = _stub_method(Tensor.is_contiguous)
154
+ register_hook = _stub_method(Tensor.register_hook)
154
155
 
155
156
  def stub_sync(self):
156
157
  """sync real tensor."""
mindspore/common/api.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # This is the Python adaptation and derivative work of Myia (https://github.com/mila-iqia/myia/).
2
2
  #
3
- # Copyright 2020-2023 Huawei Technologies Co., Ltd
3
+ # Copyright 2020-2024 Huawei Technologies Co., Ltd
4
4
  #
5
5
  # Licensed under the Apache License, Version 2.0 (the "License");
6
6
  # you may not use this file except in compliance with the License.
@@ -52,6 +52,7 @@ from mindspore.common._register_for_adapter import ms_adapter_registry
52
52
  from mindspore.common.auto_dynamic_shape import get_auto_dynamic_shape_args, update_auto_dynamic_shape_phase, \
53
53
  get_auto_dynamic_shape_args_with_check_input_signature, update_auto_dynamic_shape_phase_with_check_input_signature
54
54
 
55
+
55
56
  # Store ms_function class compiled pipeline cache.
56
57
  ms_compile_cache = set()
57
58
  # Store cell compiled pipeline cache.
@@ -292,6 +293,28 @@ def _handle_arg(obj, arg, compile_arg):
292
293
  return None
293
294
 
294
295
 
296
+ def _handle_arg_predict(obj, arg, compile_arg):
297
+ """Handle arg for runtime .If need handle the arg, return True"""
298
+ if arg is None:
299
+ return None
300
+
301
+ if isinstance(arg, (int, float)):
302
+ return None
303
+
304
+ if isinstance(arg, (list, tuple)):
305
+ if compile_arg is not None and hasattr(compile_arg, "__ms_mutable__") and \
306
+ getattr(compile_arg, "__ms_mutable__"):
307
+ # mutable([]) will be eliminated by FuncGraphSpecializer, and empty list is not supported by backend.
308
+ if isinstance(arg, list) and not arg:
309
+ return None
310
+ return arg
311
+ if hasattr(obj, "enable_tuple_broaden") and obj.enable_tuple_broaden and isinstance(arg, tuple) and \
312
+ _check_all_tensor(arg):
313
+ return arg
314
+ return None
315
+ return arg
316
+
317
+
295
318
  def _get_args_for_run(obj, args, kwargs, compile_args):
296
319
  """Get the actual input args and kwargs for runtime."""
297
320
  new_args = []
@@ -308,6 +331,22 @@ def _get_args_for_run(obj, args, kwargs, compile_args):
308
331
  return new_args
309
332
 
310
333
 
334
+ def _get_args_for_run_predict(obj, args, kwargs, compile_args):
335
+ """Get the actual input args and kwargs for runtime."""
336
+ new_args = []
337
+ for arg, compile_arg in zip(args, compile_args):
338
+ new_arg = _handle_arg_predict(obj, arg, compile_arg)
339
+ if new_arg is not None:
340
+ new_args.append(new_arg)
341
+
342
+ for _, value in kwargs.items():
343
+ new_value = _handle_arg_predict(obj, value, None)
344
+ if new_value is not None:
345
+ new_args.append(new_value)
346
+
347
+ return new_args
348
+
349
+
311
350
  class _MindsporeFunctionExecutor:
312
351
  """
313
352
  Represents a function compiled by graph compiler.
@@ -625,8 +664,8 @@ def jit(fn=None, mode="PSJit", input_signature=None, hash_args=None, jit_config=
625
664
  fn (Function): The Python function that will be run as a graph. Default: ``None`` .
626
665
  mode (str): The type of jit used, the value of mode should be ``PIJit`` or ``PSJit``. Default: ``PSJit`` .
627
666
 
628
- - `PSJit <https://www.mindspore.cn/docs/en/r2.3.q1/note/static_graph_syntax_support.html>`_ : MindSpore GRAPH_MODE.
629
- - `PIJit <https://www.mindspore.cn/docs/en/r2.3.q1/design/dynamic_graph_and_static_graph.html>`_ : MindSpore PYNATIVE_MODE.
667
+ - `PSJit <https://www.mindspore.cn/docs/en/master/note/static_graph_syntax_support.html>`_ : MindSpore GRAPH_MODE.
668
+ - `PIJit <https://www.mindspore.cn/docs/en/master/design/dynamic_graph_and_static_graph.html>`_ : MindSpore PYNATIVE_MODE.
630
669
 
631
670
  input_signature (Tensor): The Tensor which describes the input arguments. The shape and dtype of the Tensor
632
671
  will be supplied to this function. If input_signature is specified, each input to `fn` must be a `Tensor`.
@@ -1382,6 +1421,17 @@ class _PyNativeExecutor:
1382
1421
  """
1383
1422
  self._executor.set_jit_compile_status(status, phase)
1384
1423
 
1424
+ def set_is_run_recompute(self, status):
1425
+ """
1426
+ Set recompute grad is compiling
1427
+
1428
+ Args:
1429
+ status(bool): grad is in recompute status
1430
+ Return:
1431
+ None.
1432
+ """
1433
+ self._executor.set_is_run_recompute(status)
1434
+
1385
1435
  def set_dynamic_input(self, obj, *args):
1386
1436
  """
1387
1437
  Set dynamic shape tensor of input arguments.
@@ -1533,7 +1583,7 @@ class _CellGraphExecutor:
1533
1583
  enable_compile_cache = context.get_context("enable_compile_cache")
1534
1584
  if enable_compile_cache is not True and enable_compile_cache != "1":
1535
1585
  enable_compile_cache = os.getenv('MS_COMPILER_CACHE_ENABLE')
1536
- if "train" in phase and (enable_compile_cache is True or enable_compile_cache == "1"):
1586
+ if enable_compile_cache is True or enable_compile_cache == "1":
1537
1587
  self._graph_executor.set_compile_cache_dep_files(_get_compile_cache_dep_files())
1538
1588
 
1539
1589
  def compile(self, obj, *args, phase='predict', do_convert=True, jit_config_dict=None, **kwargs):
@@ -1566,7 +1616,9 @@ class _CellGraphExecutor:
1566
1616
  self._graph_executor.set_enable_tuple_broaden(self.enable_tuple_broaden)
1567
1617
  key = self._graph_executor.generate_arguments_key(obj, args, kwargs, self.enable_tuple_broaden)
1568
1618
  obj.arguments_key = str(key)
1619
+ raw_phase = phase
1569
1620
  phase = phase + '.' + str(obj.create_time) + '.' + str(id(obj)) + '.' + obj.arguments_key
1621
+ obj.phase_cache[raw_phase] = phase
1570
1622
  update_auto_dynamic_shape_phase(args, key_id, phase)
1571
1623
 
1572
1624
  if phase in obj.compile_cache and self.has_compiled(phase):
mindspore/common/dtype.py CHANGED
@@ -43,7 +43,7 @@ __dtype__ = [
43
43
  "TensorType", "_null",
44
44
  "Type", "Int",
45
45
  "complex64", "complex128",
46
- "bfloat16"
46
+ "bfloat16", "qint4x2"
47
47
  ]
48
48
 
49
49
  __method__ = [
@@ -58,6 +58,7 @@ __all__.extend(__method__)
58
58
  # type definition
59
59
  bool_ = typing.Bool()
60
60
 
61
+ qint4x2 = typing.Int(4)
61
62
  int8 = typing.Int(8)
62
63
  byte = int8
63
64
  int16 = typing.Int(16)
@@ -140,7 +141,8 @@ number_type = (int8,
140
141
  float64,
141
142
  bfloat16,
142
143
  complex64,
143
- complex128,)
144
+ complex128,
145
+ qint4x2,)
144
146
 
145
147
  int_type = (int8, int16, int32, int64,)
146
148
  uint_type = (uint8, uint16, uint32, uint64,)
@@ -349,7 +351,7 @@ class QuantDtype(enum.Enum):
349
351
  An enum for quant datatype, contains `INT1` ~ `INT16`, `UINT1` ~ `UINT16`.
350
352
 
351
353
  `QuantDtype` is defined in
352
- `dtype.py <https://gitee.com/mindspore/mindspore/tree/r2.3.q1/mindspore/python/mindspore/common/dtype.py>`_ ,
354
+ `dtype.py <https://gitee.com/mindspore/mindspore/blob/master/mindspore/python/mindspore/common/dtype.py>`_ ,
353
355
  use command below to import:
354
356
 
355
357
  .. code-block::
mindspore/common/dump.py CHANGED
@@ -27,7 +27,7 @@ def set_dump(target, enabled=True):
27
27
  `target` should be an instance of :class:`mindspore.nn.Cell` or :class:`mindspore.ops.Primitive` .
28
28
  Please note that this API takes effect only when Asynchronous Dump is enabled and the `dump_mode`
29
29
  field in dump config file is ``"2"`` . See the `dump document
30
- <https://www.mindspore.cn/tutorials/experts/en/r2.3.q1/debug/dump.html>`_ for details.
30
+ <https://www.mindspore.cn/tutorials/experts/en/master/debug/dump.html>`_ for details.
31
31
  The default enabled status for
32
32
  a :class:`mindspore.nn.Cell` or :class:`mindspore.ops.Primitive` is False.
33
33
 
@@ -61,7 +61,7 @@ def set_dump(target, enabled=True):
61
61
  .. note::
62
62
  Please set environment variable `MINDSPORE_DUMP_CONFIG` to the dump config file and set `dump_mode` field
63
63
  in dump config file to 2 before running this example.
64
- See `dump document <https://www.mindspore.cn/tutorials/experts/en/r2.3.q1/debug/dump.html>`_ for details.
64
+ See `dump document <https://www.mindspore.cn/tutorials/experts/en/master/debug/dump.html>`_ for details.
65
65
 
66
66
  >>> import numpy as np
67
67
  >>> import mindspore as ms
@@ -14,9 +14,58 @@
14
14
  # ============================================================================
15
15
  """The removable handle for cell hook function."""
16
16
  from __future__ import absolute_import
17
-
18
17
  import weakref
19
- from mindspore.common.api import _pynative_executor
18
+ from mindspore._c_expression import Tensor as Tensor_
19
+
20
+
21
+ class _TensorHookHandle:
22
+ r"""
23
+ A handle provides the ability to remote a tensor hook.
24
+
25
+ Note:
26
+ It is only supported in pynative mode and works when registering or removing hook function for tensor
27
+
28
+ Supported Platforms:
29
+ ``Ascend`` ``GPU`` ``CPU``
30
+ """
31
+
32
+ def __init__(self):
33
+ self.id = None
34
+
35
+ def remove(self):
36
+ """
37
+ Remove the tensor hook function, which corresponds to this '_TensorHookHandle' object.
38
+
39
+ Args:
40
+ None.
41
+
42
+ Returns:
43
+ None.
44
+
45
+ Supported Platforms:
46
+ ``Ascend`` ``GPU`` ``CPU``
47
+
48
+ Examples:
49
+ >>> import mindspore as ms
50
+ >>> from mindspore import Tensor
51
+ >>> ms.set_context(mode=ms.PYNATIVE_MODE)
52
+ >>> def hook_fn(grad):
53
+ ... return grad * 2
54
+ ...
55
+ >>> def hook_test(x, y):
56
+ ... z = x * y
57
+ ... handle = z.register_hook(hook_fn)
58
+ ... z = z * y
59
+ ... handle.remove()
60
+ ... return z
61
+ ...
62
+ >>> ms_grad = ms.grad(hook_test, grad_position=(0,1))
63
+ >>> output = ms_grad(Tensor(1, ms.float32), Tensor(2, ms.float32))
64
+ >>> print(output)
65
+ (Tensor(shape=[], dtype=Float32, value=4), Tensor(shape=[], dtype=Float32, value=4))
66
+ """
67
+ if self.id is not None:
68
+ Tensor_.remove_hook(self.id)
20
69
 
21
70
 
22
71
  class HookHandle:
@@ -100,9 +149,7 @@ class HookHandle:
100
149
  hook_cell = self._hook_cell()
101
150
  if self._hook_type == "_forward_pre_hook" and self._hook_key in hook_cell._forward_pre_hook:
102
151
  del hook_cell._forward_pre_hook[self._hook_key]
103
- _pynative_executor.set_hook_changed(hook_cell)
104
152
  elif self._hook_type == "_forward_hook" and self._hook_key in hook_cell._forward_hook:
105
153
  del hook_cell._forward_hook[self._hook_key]
106
- _pynative_executor.set_hook_changed(hook_cell)
107
154
  elif self._hook_type == "_cell_backward_hook":
108
155
  hook_cell._cell_backward_hook.remove_backward_hook(self._hook_key)
@@ -37,7 +37,7 @@ class Initializer:
37
37
  Initializers are intended to be used for delayed initialization in parallel mode rather than Tensor
38
38
  initialization. If you have to use Initializers to create a Tensor, :func:`mindspore.Tensor.init_data` should be
39
39
  followed in most of the cases. For more information, please refer to `mindspore.Tensor.init_data
40
- <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore/Tensor/mindspore.Tensor.init_data.html#
40
+ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/Tensor/mindspore.Tensor.init_data.html#
41
41
  mindspore-tensor-init-data>`_ .
42
42
 
43
43
  Args:
@@ -42,7 +42,7 @@ class JitConfig:
42
42
  The value must be ``"STRICT"`` , ``"LAX"`` or ``""`` . Default to an empty string, which means that this
43
43
  JitConfig configuration will be ignored and the jit_syntax_level of ms.context will be used.
44
44
  For more details about ms.context, refer to
45
- `set_context <https://www.mindspore.cn/docs/en/r2.3.q1/api_python/mindspore/mindspore.set_context.html>`_ .
45
+ `set_context <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.set_context.html>`_ .
46
46
  Default: ``""`` .
47
47
 
48
48
  - ``"STRICT"``: Only basic syntax is supported, and execution performance is optimal. Can be used for MindIR
@@ -58,6 +58,13 @@ class JitConfig:
58
58
  compiling performance.
59
59
  - ``DEBUG``: Used for debugging when errors occur, more information will be record in compiling process.
60
60
 
61
+ infer_boost (str, optional): enable infer boost mode.
62
+ The value must be ``"on"`` , ``"off"``. Default to an "off", which means that disable infer boost.
63
+ when infer boost mode is enabled, MindSpore will use high perf kernel lib, use faster runtime make
64
+ infer speed is best.
65
+ Note: current infer boost only support `jit_level` == ``"O0"`` and only Atlas A2 series products
66
+ are supported.
67
+
61
68
  **kwargs (dict): A dictionary of keyword arguments that the class needs.
62
69
 
63
70
  Examples:
@@ -66,22 +73,26 @@ class JitConfig:
66
73
  >>> jitconfig = JitConfig(jit_level="O1")
67
74
  >>>
68
75
  >>> # Define the network structure of LeNet5. Refer to
69
- >>> # https://gitee.com/mindspore/docs/blob/r2.3.q1/docs/mindspore/code/lenet.py
76
+ >>> # https://gitee.com/mindspore/docs/blob/master/docs/mindspore/code/lenet.py
70
77
  >>> net = LeNet5()
71
78
  >>>
72
79
  >>> net.set_jit_config(jitconfig)
73
80
  """
74
- def __init__(self, jit_level="", exc_mode="auto", jit_syntax_level="", debug_level="RELEASE", **kwargs):
81
+ def __init__(self, jit_level="", exc_mode="auto", jit_syntax_level="", debug_level="RELEASE",
82
+ infer_boost="off", **kwargs):
75
83
  if jit_level not in ["", "O0", "O1", "O2"]:
76
84
  raise ValueError("For 'jit_level' must be one of ['O0', 'O1', 'O2'].")
77
- if exc_mode not in ['auto', 'sink', 'no_sink']:
85
+ if exc_mode not in ["auto", "sink", "no_sink"]:
78
86
  raise ValueError("For 'exc_mode' must be one of '['auto', 'sink', 'no_sink']'.")
79
- if jit_syntax_level != "" and jit_syntax_level not in ['STRICT', 'COMPATIBLE', 'LAX']:
87
+ if jit_syntax_level != "" and jit_syntax_level not in ["STRICT", "COMPATIBLE", "LAX"]:
80
88
  raise ValueError("For 'jit_syntax_level' must be one of '['STRICT', 'LAX']'.")
81
- if debug_level not in ['RELEASE', 'DEBUG']:
89
+ if debug_level not in ["RELEASE", "DEBUG"]:
82
90
  raise ValueError("For 'debug_level' must be one of '['RELEASE', 'DEBUG']'.")
91
+ if infer_boost != "" and infer_boost not in ["on", "off"]:
92
+ raise ValueError("For 'infer_boost' must be one of '['on', 'off']'.")
83
93
  self.jit_config_dict = kwargs
84
94
  self.jit_config_dict["jit_level"] = jit_level
85
95
  self.jit_config_dict["exc_mode"] = exc_mode
86
96
  self.jit_config_dict["jit_syntax_level"] = jit_syntax_level
87
97
  self.jit_config_dict["debug_level"] = debug_level
98
+ self.jit_config_dict["infer_boost"] = infer_boost
@@ -336,6 +336,8 @@ class Parameter(Tensor_):
336
336
  cpu_cast = Cast().set_device("CPU")
337
337
  data = cpu_cast(data, mstype.float32)
338
338
  return (Tensor, data.asnumpy(), mstype.bfloat16)
339
+ if data.dtype == mstype.qint4x2:
340
+ return (Tensor, data.asnumpy(), mstype.qint4x2)
339
341
  return (Tensor, data.asnumpy())
340
342
 
341
343
  not_init_data = _is_role_sched() or (_is_role_pserver() and _cache_enable()) or _is_in_parallel_mode()
@@ -364,7 +366,7 @@ class Parameter(Tensor_):
364
366
 
365
367
  Tutorial Examples:
366
368
  - `Parameter Server Mode
367
- <https://www.mindspore.cn/tutorials/experts/en/r2.3.q1/parallel/parameter_server_training.html>`_
369
+ <https://www.mindspore.cn/tutorials/experts/en/master/parallel/parameter_server_training.html>`_
368
370
  """
369
371
  if not _is_ps_mode() or not (_is_role_worker() or _is_role_pserver() or _is_role_sched()):
370
372
  raise RuntimeError("Must complete following two steps before calling set_param_ps: \n"
@@ -792,6 +794,9 @@ class Parameter(Tensor_):
792
794
  """
793
795
  Add a pipeline stage to the parameter.
794
796
 
797
+ Note:
798
+ This interface is deprecated in 2.3, and will be deleted in the future.
799
+
795
800
  Args:
796
801
  stage(int): The pipeline stage to be added.
797
802
 
@@ -1023,7 +1028,7 @@ class ParameterTuple(tuple):
1023
1028
 
1024
1029
  Tutorial Examples:
1025
1030
  - `Cell and Parameter - Parameter Tuple
1026
- <https://mindspore.cn/tutorials/en/r2.3.q1/advanced/modules/layer.html#parameter-tuple>`_
1031
+ <https://mindspore.cn/tutorials/en/master/advanced/modules/layer.html#parameter-tuple>`_
1027
1032
  """
1028
1033
  Validator.check_str_by_regular(prefix)
1029
1034
  new = []