mindspore 2.7.0__cp310-cp310-win_amd64.whl → 2.7.0rc1__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (196) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +1 -1
  3. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  6. mindspore/_checkparam.py +2 -2
  7. mindspore/_extends/builtin_operations.py +3 -3
  8. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +1 -1
  9. mindspore/_extends/parse/__init__.py +3 -3
  10. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +1 -0
  11. mindspore/_extends/parse/parser.py +22 -28
  12. mindspore/_extends/parse/standard_method.py +1 -15
  13. mindspore/_extends/pijit/pijit_func_white_list.py +5 -2
  14. mindspore/_extends/remote/kernel_build_server_ascend.py +75 -0
  15. mindspore/amp.py +18 -0
  16. mindspore/avcodec-59.dll +0 -0
  17. mindspore/avdevice-59.dll +0 -0
  18. mindspore/avfilter-8.dll +0 -0
  19. mindspore/avformat-59.dll +0 -0
  20. mindspore/avutil-57.dll +0 -0
  21. mindspore/common/__init__.py +12 -18
  22. mindspore/common/_tensor_cpp_method.py +1 -1
  23. mindspore/common/_tensor_docs.py +38 -102
  24. mindspore/common/_utils.py +1 -9
  25. mindspore/common/api.py +106 -155
  26. mindspore/common/{dynamic_shape/auto_dynamic_shape.py → auto_dynamic_shape.py} +23 -17
  27. mindspore/common/dtype.py +57 -98
  28. mindspore/common/dump.py +1 -1
  29. mindspore/common/file_system.py +9 -59
  30. mindspore/common/hook_handle.py +3 -22
  31. mindspore/common/np_dtype.py +3 -3
  32. mindspore/common/parameter.py +20 -4
  33. mindspore/common/recompute.py +4 -2
  34. mindspore/common/tensor.py +52 -38
  35. mindspore/communication/_hccl_management.py +297 -0
  36. mindspore/context.py +21 -15
  37. mindspore/dataset/__init__.py +1 -1
  38. mindspore/dataset/audio/transforms.py +1 -1
  39. mindspore/dataset/core/config.py +1 -35
  40. mindspore/dataset/engine/datasets.py +315 -330
  41. mindspore/dataset/engine/datasets_user_defined.py +22 -38
  42. mindspore/dataset/transforms/c_transforms.py +2 -2
  43. mindspore/dataset/transforms/transforms.py +3 -3
  44. mindspore/dataset/vision/__init__.py +1 -1
  45. mindspore/dataset/vision/py_transforms.py +8 -8
  46. mindspore/dataset/vision/transforms.py +5 -17
  47. mindspore/dataset/vision/utils.py +21 -632
  48. mindspore/device_context/ascend/op_tuning.py +1 -35
  49. mindspore/dnnl.dll +0 -0
  50. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -3
  51. mindspore/include/api/cell.h +4 -28
  52. mindspore/include/api/cfg.h +7 -24
  53. mindspore/include/api/context.h +0 -1
  54. mindspore/include/api/delegate.h +2 -0
  55. mindspore/include/api/dual_abi_helper.h +19 -100
  56. mindspore/include/api/graph.h +1 -14
  57. mindspore/include/api/kernel.h +3 -16
  58. mindspore/include/api/kernel_api.h +1 -9
  59. mindspore/include/api/metrics/accuracy.h +0 -9
  60. mindspore/include/api/model.h +1 -5
  61. mindspore/include/api/model_group.h +0 -4
  62. mindspore/include/api/model_parallel_runner.h +0 -2
  63. mindspore/include/api/status.h +10 -48
  64. mindspore/include/api/types.h +1 -6
  65. mindspore/include/dataset/constants.h +0 -9
  66. mindspore/jpeg62.dll +0 -0
  67. mindspore/mindrecord/tools/cifar10.py +2 -3
  68. mindspore/mindrecord/tools/cifar10_to_mr.py +5 -5
  69. mindspore/mindspore_backend_common.dll +0 -0
  70. mindspore/mindspore_backend_manager.dll +0 -0
  71. mindspore/mindspore_common.dll +0 -0
  72. mindspore/mindspore_core.dll +0 -0
  73. mindspore/mindspore_cpu_res_manager.dll +0 -0
  74. mindspore/mindspore_dump.dll +0 -0
  75. mindspore/mindspore_frontend.dll +0 -0
  76. mindspore/mindspore_glog.dll +0 -0
  77. mindspore/mindspore_memory_pool.dll +0 -0
  78. mindspore/mindspore_ms_backend.dll +0 -0
  79. mindspore/mindspore_ops.dll +0 -0
  80. mindspore/mindspore_ops_host.dll +0 -0
  81. mindspore/mindspore_ops_kernel_common.dll +0 -0
  82. mindspore/mindspore_profiler.dll +0 -0
  83. mindspore/mindspore_pyboost.dll +0 -0
  84. mindspore/mindspore_pynative.dll +0 -0
  85. mindspore/mindspore_res_manager.dll +0 -0
  86. mindspore/mindspore_runtime_pipeline.dll +0 -0
  87. mindspore/mint/distributed/__init__.py +0 -4
  88. mindspore/mint/distributed/distributed.py +14 -217
  89. mindspore/mint/nn/layer/_functions.py +2 -1
  90. mindspore/mint/nn/layer/conv.py +6 -6
  91. mindspore/mint/nn/layer/normalization.py +3 -3
  92. mindspore/nn/cell.py +174 -216
  93. mindspore/nn/layer/activation.py +2 -4
  94. mindspore/nn/layer/basic.py +13 -7
  95. mindspore/nn/layer/image.py +1 -1
  96. mindspore/nn/optim/adam.py +3 -1
  97. mindspore/nn/optim/lamb.py +3 -1
  98. mindspore/nn/optim/tft_wrapper.py +3 -2
  99. mindspore/nn/probability/distribution/_utils/utils.py +2 -2
  100. mindspore/nn/wrap/cell_wrapper.py +5 -39
  101. mindspore/nn/wrap/grad_reducer.py +15 -0
  102. mindspore/numpy/array_creations.py +2 -2
  103. mindspore/numpy/utils_const.py +1 -1
  104. mindspore/opencv_core452.dll +0 -0
  105. mindspore/opencv_imgcodecs452.dll +0 -0
  106. mindspore/opencv_imgproc452.dll +0 -0
  107. mindspore/ops/_grad_experimental/grad_inner_ops.py +9 -0
  108. mindspore/ops/_op_impl/cpu/__init__.py +0 -1
  109. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +2 -12
  110. mindspore/ops/auto_generate/gen_extend_func.py +4 -4
  111. mindspore/ops/auto_generate/gen_ops_def.py +16 -290
  112. mindspore/ops/auto_generate/gen_ops_prim.py +76 -563
  113. mindspore/ops/composite/base.py +1 -1
  114. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +1 -1
  115. mindspore/ops/function/__init__.py +0 -1
  116. mindspore/ops/function/array_func.py +6 -10
  117. mindspore/ops/function/debug_func.py +2 -4
  118. mindspore/ops/function/grad/grad_func.py +12 -4
  119. mindspore/ops/function/math_func.py +32 -44
  120. mindspore/ops/function/nn_func.py +20 -18
  121. mindspore/ops/functional.py +1 -2
  122. mindspore/ops/functional_overload.py +12 -23
  123. mindspore/ops/operations/_inner_ops.py +12 -11
  124. mindspore/ops/operations/array_ops.py +50 -4
  125. mindspore/ops/operations/comm_ops.py +15 -1
  126. mindspore/ops/operations/custom_ops.py +4 -10
  127. mindspore/ops/operations/debug_ops.py +6 -6
  128. mindspore/ops/operations/manually_defined/ops_def.py +12 -12
  129. mindspore/ops/operations/math_ops.py +5 -5
  130. mindspore/ops/operations/nn_ops.py +1 -1
  131. mindspore/ops/primitive.py +10 -3
  132. mindspore/ops/tensor_method.py +7 -16
  133. mindspore/ops_generate/pyboost/gen_pyboost_func.py +16 -0
  134. mindspore/parallel/_auto_parallel_context.py +15 -5
  135. mindspore/parallel/_parallel_serialization.py +2 -3
  136. mindspore/parallel/_ps_context.py +2 -2
  137. mindspore/parallel/_transformer/transformer.py +4 -4
  138. mindspore/parallel/_utils.py +11 -5
  139. mindspore/parallel/auto_parallel.py +9 -23
  140. mindspore/parallel/checkpoint_transform.py +0 -2
  141. mindspore/parallel/cluster/process_entity/_api.py +1 -4
  142. mindspore/parallel/cluster/run.py +3 -5
  143. mindspore/parallel/function/reshard_func.py +5 -6
  144. mindspore/parallel/nn/parallel_cell_wrapper.py +3 -40
  145. mindspore/parallel/nn/parallel_grad_reducer.py +8 -0
  146. mindspore/parallel/shard.py +21 -7
  147. mindspore/parallel/transform_safetensors.py +4 -10
  148. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +9 -10
  149. mindspore/profiler/analysis/viewer/ascend_op_memory_viewer.py +1 -1
  150. mindspore/profiler/common/msprof_cmd_tool.py +2 -2
  151. mindspore/profiler/common/path_manager.py +0 -9
  152. mindspore/profiler/common/profiler_context.py +2 -25
  153. mindspore/profiler/common/profiler_meta_data.py +0 -1
  154. mindspore/profiler/common/profiler_op_analyse.py +6 -10
  155. mindspore/{ops/_op_impl/cpu/joinedstr_op.py → profiler/common/validator/__init__.py} +1 -15
  156. mindspore/profiler/common/validator/validate_path.py +84 -0
  157. mindspore/profiler/dynamic_profiler.py +46 -91
  158. mindspore/profiler/envprofiler.py +5 -30
  159. mindspore/profiler/experimental_config.py +1 -16
  160. mindspore/profiler/platform/cpu_profiler.py +4 -10
  161. mindspore/profiler/platform/npu_profiler.py +1 -1
  162. mindspore/profiler/profiler.py +145 -193
  163. mindspore/profiler/profiler_action_controller.py +1 -1
  164. mindspore/profiler/profiler_interface.py +2 -2
  165. mindspore/rewrite/symbol_tree/symbol_tree.py +1 -1
  166. mindspore/runtime/__init__.py +4 -6
  167. mindspore/runtime/executor.py +0 -27
  168. mindspore/runtime/memory.py +0 -1
  169. mindspore/runtime/thread_bind_core.py +1 -1
  170. mindspore/swresample-4.dll +0 -0
  171. mindspore/swscale-6.dll +0 -0
  172. mindspore/tinyxml2.dll +0 -0
  173. mindspore/train/_utils.py +3 -3
  174. mindspore/train/amp.py +3 -0
  175. mindspore/train/callback/_callback.py +1 -2
  176. mindspore/train/callback/_checkpoint.py +8 -1
  177. mindspore/train/callback/_flops_collector.py +6 -10
  178. mindspore/train/callback/_train_fault_tolerance.py +7 -3
  179. mindspore/train/data_sink.py +4 -4
  180. mindspore/train/dataset_helper.py +5 -5
  181. mindspore/train/model.py +20 -4
  182. mindspore/train/serialization.py +15 -35
  183. mindspore/train/train_thor/model_thor.py +2 -2
  184. mindspore/turbojpeg.dll +0 -0
  185. mindspore/utils/hooks.py +81 -0
  186. mindspore/utils/utils.py +8 -8
  187. mindspore/version.py +1 -1
  188. {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/METADATA +1 -1
  189. {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/RECORD +193 -192
  190. mindspore/_extends/parallel_compile/akg_compiler/custom.py +0 -1109
  191. mindspore/common/dynamic_shape/__init__.py +0 -0
  192. mindspore/common/dynamic_shape/enable_dynamic.py +0 -197
  193. /mindspore/common/{dynamic_shape/_auto_dynamic.py → _auto_dynamic.py} +0 -0
  194. {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/WHEEL +0 -0
  195. {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/entry_points.txt +0 -0
  196. {mindspore-2.7.0.dist-info → mindspore-2.7.0rc1.dist-info}/top_level.txt +0 -0
@@ -782,7 +782,7 @@ class MultitypeFuncGraph(MultitypeFuncGraph_):
782
782
  if len(self.entries) == 1:
783
783
  output = self.entries[0][1](*args)
784
784
  return output
785
- types = tuple(map(mstype._get_py_obj_dtype, args)) # pylint:disable=protected-access
785
+ types = tuple(map(mstype.get_py_obj_dtype, args))
786
786
  for sigs, fn in self.entries:
787
787
  if len(sigs) != len(types):
788
788
  continue
@@ -619,7 +619,7 @@ def scalar_in_sequence(x, y):
619
619
  @constexpr
620
620
  def get_np_eps(input_dtype):
621
621
  """Get numpy eps."""
622
- nptype = mstype._dtype_to_nptype(input_dtype) # pylint:disable=protected-access
622
+ nptype = mstype.dtype_to_nptype(input_dtype)
623
623
  eps = np.finfo(nptype).eps
624
624
  return float(eps)
625
625
 
@@ -481,7 +481,6 @@ from .nn_func import (
481
481
  max_pool3d,
482
482
  moe_token_permute,
483
483
  moe_token_unpermute,
484
- ring_attention_update,
485
484
  batch_norm,
486
485
  add_rms_norm,
487
486
  rms_norm,
@@ -784,7 +784,7 @@ def full_ext(size, fill_value, *, dtype=None): # pylint: disable=redefined-oute
784
784
 
785
785
  Keyword Args:
786
786
  dtype (mindspore.dtype, optional): The specified type of output tensor.
787
- `bool` and `number` are supported, for details,
787
+ `bool_` and `number` are supported, for details,
788
788
  please refer to :class:`mindspore.dtype` . Default: ``None`` .
789
789
 
790
790
  Returns:
@@ -857,7 +857,7 @@ def full_like_ext(input, fill_value, *, dtype=None):
857
857
  fill_value (Number): Value to fill the returned tensor. Complex numbers are not supported for now.
858
858
 
859
859
  Keyword Args:
860
- dtype (mindspore.dtype, optional): The specified type of output tensor. `bool` and `number` are supported,
860
+ dtype (mindspore.dtype, optional): The specified type of output tensor. `bool_` and `number` are supported,
861
861
  for details, please refer to :class:`mindspore.dtype` . Default: ``None`` .
862
862
 
863
863
  Returns:
@@ -4728,11 +4728,7 @@ def _tensor_split_sub_tensors(x, indices_or_sections, axis):
4728
4728
  idx = indices_or_sections[i]
4729
4729
  begin[axis] = 0 if i == 0 else indices_or_sections[i - 1]
4730
4730
  end[axis] = idx
4731
- if begin[axis] == end[axis]:
4732
- empty_shape = x.shape[0:axis] + (0,) + x.shape[axis + 1:]
4733
- sliced_tensor = ms.Tensor(shape=empty_shape, dtype=x.dtype)
4734
- else:
4735
- sliced_tensor = strided_slice(x, tuple(begin), tuple(end), strides)
4731
+ sliced_tensor = strided_slice(x, tuple(begin), tuple(end), strides)
4736
4732
  sub_tensors.append(sliced_tensor)
4737
4733
  return tuple(sub_tensors)
4738
4734
 
@@ -4748,7 +4744,7 @@ def _tensor_split_sub_int(x, indices_or_sections, axis):
4748
4744
  res = _get_cache_prim(P.Split)(axis)(x)
4749
4745
  elif indices_or_sections > length_along_dim:
4750
4746
  res = _get_cache_prim(P.Split)(axis, length_along_dim)(x)
4751
- indices_or_sections_n = [length_along_dim]
4747
+ indices_or_sections_n = [length_along_dim, length_along_dim + 1]
4752
4748
  res2 = _tensor_split_sub_tensors(x, indices_or_sections_n, axis)
4753
4749
  for _ in np.arange(length_along_dim, indices_or_sections):
4754
4750
  res += tuple(res2)[1:]
@@ -5071,7 +5067,7 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
5071
5067
  >>> # case 4: Use "where" to include only specific elements in computing the maximum.
5072
5068
  >>> where = mindspore.tensor([[0, 0, 1, 0],
5073
5069
  ... [0, 0, 1, 1],
5074
- ... [1, 1, 1, 0]], dtype=mindspore.bool)
5070
+ ... [1, 1, 1, 0]], dtype=mindspore.bool_)
5075
5071
  >>> mindspore.ops.max(input, axis=1, keepdims=True, initial=0, where=where)
5076
5072
  (Tensor(shape=[3, 1], dtype=Int64, value=
5077
5073
  [[4],
@@ -5211,7 +5207,7 @@ def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
5211
5207
  >>> # case 4: Use "where" to include only specific elements in computing the minimum.
5212
5208
  >>> where = mindspore.tensor([[1, 0, 1, 0],
5213
5209
  ... [0, 0, 1, 1],
5214
- ... [1, 1, 1, 0]], dtype=mindspore.bool)
5210
+ ... [1, 1, 1, 0]], dtype=mindspore.bool_)
5215
5211
  >>> mindspore.ops.min(input, axis=1, keepdims=True, initial=0, where=where)
5216
5212
  (Tensor(shape=[3, 1], dtype=Int64, value=
5217
5213
  [[ 0],
@@ -124,7 +124,6 @@ def tensordump(file_name, tensor, mode='out'):
124
124
  --master_port=11450 --log_dir=msrun_log --join=True --cluster_time_out=300 tensordump_example.py
125
125
 
126
126
  >>> import os
127
- >>> import time
128
127
  >>> import numpy as np
129
128
  >>> import mindspore
130
129
  >>> from mindspore import nn, context
@@ -151,16 +150,15 @@ def tensordump(file_name, tensor, mode='out'):
151
150
  >>> strategy1 = ((1, 2), (2, 1))
152
151
  >>> strategy2 = ((1, 2), (2, 1))
153
152
  >>> with no_init_parameters():
154
- ... net = Net(strategy1, strategy2)
153
+ >>> net = Net(strategy1, strategy2)
155
154
  >>> x = mindspore.tensor(0.1 * mindspore.ops.randn(64, 64), mindspore.float32)
156
155
  >>> y = mindspore.tensor(0.1 * mindspore.ops.randn(64, 64), mindspore.float32)
157
156
  >>> b = mindspore.tensor(0.1 * mindspore.ops.randn(64, 64), mindspore.float32)
158
- >>> parallel_net = AutoParallel(net, parallel_mode="semi_auto")
157
+ >>> parallel_net = Autoparallel(net, parallel_mode="semi_auto")
159
158
  >>> parallel_net.dataset_strategy(config="full_batch")
160
159
  >>> out = parallel_net(x, y, b)
161
160
  >>> print(f"out shape is: {out.shape}")
162
161
  >>> # out shape is (64, 64)
163
- >>> time.sleep(0.5) # npy file is generated asynchronously, spend an interval time then load it.
164
162
  >>> matmul1_output_slice = np.load(f'rank_{rank_id}_mul1_mul2_float32_0.npy') # load matmul1's output slice
165
163
  >>> print(f"matmul1_output_slice is loaded, shape is: {matmul1_output_slice.shape}")
166
164
  >>> # matmul1_output_slice is loaded, shape is: (64, 64)
@@ -654,7 +654,9 @@ def _check_jvp_input_v_len(inputs_len, v_len):
654
654
 
655
655
  def jvp(fn, inputs, v, has_aux=False):
656
656
  """
657
- Compute the jacobian-vector-product of the given network.
657
+ Compute the jacobian-vector-product of the given network. The calculation procedure of JVP can be found in
658
+ `forward-mode differentiation
659
+ <https://www.mindspore.cn/docs/en/master/design/programming_paradigm.html#forward-mode-ad>`_.
658
660
 
659
661
  Args:
660
662
  fn (Union[Function, Cell]): The function or net that takes Tensor inputs and returns single Tensor or tuple of
@@ -867,7 +869,9 @@ _vjp_grad_op_with_weight = _Grad(get_all=True, get_by_list=True, sens_param=True
867
869
 
868
870
  def vjp(fn, *inputs, weights=None, has_aux=False):
869
871
  """
870
- Compute the vector-jacobian-product of the given network.
872
+ Compute the vector-jacobian-product of the given network. `vjp` matches
873
+ `reverse-mode differentiation
874
+ <https://www.mindspore.cn/docs/en/master/design/programming_paradigm.html#reverse-mode-ad>`_.
871
875
 
872
876
  Args:
873
877
  fn (Union[Function, Cell]): The function or net that takes Tensor inputs and returns single Tensor or tuple of
@@ -1066,7 +1070,9 @@ _vmap = _Vmap()
1066
1070
 
1067
1071
  def jacfwd(fn, grad_position=0, has_aux=False):
1068
1072
  """
1069
- Compute Jacobian via forward mode.
1073
+ Compute Jacobian via forward mode, corresponding to
1074
+ `forward-mode differentiation
1075
+ <https://www.mindspore.cn/docs/en/master/design/programming_paradigm.html#forward-mode-ad>`_.
1070
1076
  When number of outputs is much greater than that of inputs, it's better to calculate Jacobian via forward mode than
1071
1077
  reverse mode to get better performance.
1072
1078
 
@@ -1235,7 +1241,9 @@ _grad = _Grad(get_by_position=True, has_aux=False, sens_param=True)
1235
1241
 
1236
1242
  def jacrev(fn, grad_position=0, has_aux=False):
1237
1243
  """
1238
- Compute Jacobian via reverse mode.
1244
+ Compute Jacobian via reverse mode, corresponding to
1245
+ `reverse-mode differentiation
1246
+ <https://www.mindspore.cn/docs/en/master/design/programming_paradigm.html#reverse-mode-ad>`_.
1239
1247
  When number of inputs is much greater than that of outputs, it's better to calculate Jacobian via reverse mode than
1240
1248
  forward mode to get better performance.
1241
1249
 
@@ -1218,7 +1218,7 @@ def logical_not(input):
1218
1218
 
1219
1219
  Examples:
1220
1220
  >>> import mindspore
1221
- >>> x = mindspore.tensor([True, False, True], mindspore.bool)
1221
+ >>> x = mindspore.tensor([True, False, True], mindspore.bool_)
1222
1222
  >>> output = mindspore.ops.logical_not(x)
1223
1223
  >>> print(output)
1224
1224
  [False True False]
@@ -1250,23 +1250,23 @@ def logical_or(input, other):
1250
1250
 
1251
1251
  Examples:
1252
1252
  >>> import mindspore
1253
- >>> x = mindspore.tensor([True, False, True], mindspore.bool)
1254
- >>> y = mindspore.tensor([True, True, False], mindspore.bool)
1253
+ >>> x = mindspore.tensor([True, False, True], mindspore.bool_)
1254
+ >>> y = mindspore.tensor([True, True, False], mindspore.bool_)
1255
1255
  >>> output = mindspore.ops.logical_or(x, y)
1256
1256
  >>> print(output)
1257
1257
  [ True True True]
1258
- >>> x = mindspore.tensor(1, mindspore.bool)
1259
- >>> y = mindspore.tensor(0, mindspore.bool)
1258
+ >>> x = mindspore.tensor(1, mindspore.bool_)
1259
+ >>> y = mindspore.tensor(0, mindspore.bool_)
1260
1260
  >>> output = mindspore.ops.logical_or(x, y)
1261
1261
  >>> print(output)
1262
1262
  True
1263
1263
  >>> x = True
1264
- >>> y = mindspore.tensor(0, mindspore.bool)
1264
+ >>> y = mindspore.tensor(0, mindspore.bool_)
1265
1265
  >>> output = mindspore.ops.logical_or(x, y)
1266
1266
  >>> print(output)
1267
1267
  True
1268
1268
  >>> x = True
1269
- >>> y = mindspore.tensor([True, False], mindspore.bool)
1269
+ >>> y = mindspore.tensor([True, False], mindspore.bool_)
1270
1270
  >>> output = mindspore.ops.logical_or(x, y)
1271
1271
  >>> print(output)
1272
1272
  [True True]
@@ -1298,23 +1298,23 @@ def logical_and(input, other):
1298
1298
 
1299
1299
  Examples:
1300
1300
  >>> import mindspore
1301
- >>> x = mindspore.tensor([True, False, True], mindspore.bool)
1302
- >>> y = mindspore.tensor([True, True, False], mindspore.bool)
1301
+ >>> x = mindspore.tensor([True, False, True], mindspore.bool_)
1302
+ >>> y = mindspore.tensor([True, True, False], mindspore.bool_)
1303
1303
  >>> output = mindspore.ops.logical_and(x, y)
1304
1304
  >>> print(output)
1305
1305
  [ True False False]
1306
- >>> x = mindspore.tensor(1, mindspore.bool)
1307
- >>> y = mindspore.tensor(0, mindspore.bool)
1306
+ >>> x = mindspore.tensor(1, mindspore.bool_)
1307
+ >>> y = mindspore.tensor(0, mindspore.bool_)
1308
1308
  >>> output = mindspore.ops.logical_and(x, y)
1309
1309
  >>> print(output)
1310
1310
  False
1311
1311
  >>> x = True
1312
- >>> y = mindspore.tensor(0, mindspore.bool)
1312
+ >>> y = mindspore.tensor(0, mindspore.bool_)
1313
1313
  >>> output = mindspore.ops.logical_and(x, y)
1314
1314
  >>> print(output)
1315
1315
  False
1316
1316
  >>> x = True
1317
- >>> y = mindspore.tensor([True, False], mindspore.bool)
1317
+ >>> y = mindspore.tensor([True, False], mindspore.bool_)
1318
1318
  >>> output = mindspore.ops.logical_and(x, y)
1319
1319
  >>> print(output)
1320
1320
  [True False]
@@ -1782,10 +1782,10 @@ def pow_ext(input, exponent):
1782
1782
  Args:
1783
1783
  input (Union[Tensor, Number]): The first input is a Number or a tensor whose data type is
1784
1784
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1785
- `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1785
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1786
1786
  exponent (Union[Tensor, Number]): The second input is a Number or a tensor whose data type is
1787
1787
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1788
- `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1788
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1789
1789
 
1790
1790
  Returns:
1791
1791
  Tensor, the shape is the same as the one after broadcasting,
@@ -5149,18 +5149,6 @@ def bernoulli_ext(input, *, generator=None):
5149
5149
  return bernoulli_ext_(input, seed, offset)
5150
5150
 
5151
5151
 
5152
- def bernoulli_(input, p=0.5, *, generator=None):
5153
- r"""
5154
- bernoulli_(input, p=0.5, *, generator=None) -> Tensor
5155
-
5156
- In-place version of :func:`mindspore.ops.bernoulli_ext`.
5157
- """
5158
- if generator is None:
5159
- generator = default_generator
5160
- seed, offset = generator._step(generator_step_) # pylint: disable=protected-access
5161
- return ops.functional_overload.bernoulli_(input, p, seed, offset)
5162
-
5163
-
5164
5152
  def bessel_i1(x):
5165
5153
  r"""
5166
5154
  Computes the first order modified Bessel function of the first kind for each element input.
@@ -6637,7 +6625,7 @@ def amin(input, axis=None, keepdims=False, *, initial=None, where=None):
6637
6625
  >>> # case 4: Use "where" to include only specific elements in computing the minimum.
6638
6626
  >>> where = mindspore.tensor([[1, 0, 1, 0],
6639
6627
  ... [0, 0, 1, 1],
6640
- ... [1, 1, 1, 0]], dtype=mindspore.bool)
6628
+ ... [1, 1, 1, 0]], dtype=mindspore.bool_)
6641
6629
  >>> mindspore.ops.amin(input, axis=1, keepdims=True, initial=0, where=where)
6642
6630
  Tensor(shape=[3, 1], dtype=Int64, value=
6643
6631
  [[ 0],
@@ -6719,7 +6707,7 @@ def amax(input, axis=None, keepdims=False, *, initial=None, where=None):
6719
6707
  >>> # case 4: Use "where" to include only specific elements in computing the maximum.
6720
6708
  >>> where = mindspore.tensor([[0, 0, 1, 0],
6721
6709
  ... [0, 0, 1, 1],
6722
- ... [1, 1, 1, 0]], dtype=mindspore.bool)
6710
+ ... [1, 1, 1, 0]], dtype=mindspore.bool_)
6723
6711
  >>> mindspore.ops.amax(input, axis=1, keepdims=True, initial=0, where=where)
6724
6712
  Tensor(shape=[3, 1], dtype=Int64, value=
6725
6713
  [[4],
@@ -9172,10 +9160,10 @@ def remainder_ext(input, other):
9172
9160
  input (Union[Tensor, numbers.Number, bool]): The dividend is a numbers.Number or
9173
9161
  a bool or a tensor whose data type is
9174
9162
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
9175
- `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
9163
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
9176
9164
  other (Union[Tensor, numbers.Number, bool]): The divisor is a numbers.Number or
9177
- a bool or a tensor whose data type is number or bool when the dividend is a tensor.
9178
- When the dividend is Scalar, the divisor must be a Tensor whose data type is number or bool.
9165
+ a bool or a tensor whose data type is number or bool\_ when the dividend is a tensor.
9166
+ When the dividend is Scalar, the divisor must be a Tensor whose data type is number or bool\_.
9179
9167
 
9180
9168
  Returns:
9181
9169
  Tensor, with dtype promoted and shape broadcasted.
@@ -10189,23 +10177,23 @@ def logical_xor(input, other):
10189
10177
 
10190
10178
  Examples:
10191
10179
  >>> import mindspore
10192
- >>> x = mindspore.tensor([True, False, True], mindspore.bool)
10193
- >>> y = mindspore.tensor([True, True, False], mindspore.bool)
10180
+ >>> x = mindspore.tensor([True, False, True], mindspore.bool_)
10181
+ >>> y = mindspore.tensor([True, True, False], mindspore.bool_)
10194
10182
  >>> output = mindspore.ops.logical_xor(x, y)
10195
10183
  >>> print(output)
10196
10184
  [False True True]
10197
- >>> x = mindspore.tensor(1, mindspore.bool)
10198
- >>> y = mindspore.tensor(0, mindspore.bool)
10185
+ >>> x = mindspore.tensor(1, mindspore.bool_)
10186
+ >>> y = mindspore.tensor(0, mindspore.bool_)
10199
10187
  >>> output = mindspore.ops.logical_xor(x, y)
10200
10188
  >>> print(output)
10201
10189
  True
10202
10190
  >>> x = True
10203
- >>> y = mindspore.tensor(0, mindspore.bool)
10191
+ >>> y = mindspore.tensor(0, mindspore.bool_)
10204
10192
  >>> output = mindspore.ops.logical_xor(x, y)
10205
10193
  >>> print(output)
10206
10194
  True
10207
10195
  >>> x = True
10208
- >>> y = mindspore.tensor([True, False], mindspore.bool)
10196
+ >>> y = mindspore.tensor([True, False], mindspore.bool_)
10209
10197
  >>> output = mindspore.ops.logical_xor(x, y)
10210
10198
  >>> print(output)
10211
10199
  [False True]
@@ -10664,7 +10652,7 @@ def _canonicalize_fft_shape_and_dim(input, shape, dim):
10664
10652
 
10665
10653
 
10666
10654
  def as_strided(x, shape=None, strides=None):
10667
- n = np.dtype(mstype._dtype_to_nptype(x.dtype)).itemsize # pylint:disable=protected-access
10655
+ n = np.dtype(mstype.dtype_to_nptype(x.dtype)).itemsize
10668
10656
  strides = tuple(np.array(strides) * n)
10669
10657
  if x.dtype == mstype.bfloat16:
10670
10658
  return Tensor(np.lib.stride_tricks.as_strided(x.float().asnumpy(), shape, strides, False, True), dtype=x.dtype)
@@ -10715,7 +10703,7 @@ def _permute_input(input, input_dim, ret_dim):
10715
10703
  (dim_permute_a if not is_transformed_dim[i] else dim_permute_b).append(value)
10716
10704
 
10717
10705
  # strides
10718
- type_size = np.dtype(mstype._dtype_to_nptype(input.dtype)).itemsize # pylint:disable=protected-access
10706
+ type_size = np.dtype(mstype.dtype_to_nptype(input.dtype)).itemsize
10719
10707
  input_strides = [int(x / type_size) for x in input.strides]
10720
10708
 
10721
10709
  def cmp(x, y):
@@ -10828,7 +10816,7 @@ def _handle_fftwithsize_output(out, input_dim, batch_dims, dim_permute, out_size
10828
10816
  for i in range(batch_dims, input_dim):
10829
10817
  out_strides[dim_permute[i]] = out.strides[1 + (i - batch_dims)]
10830
10818
 
10831
- type_size = np.dtype(mstype._dtype_to_nptype(out.dtype)).itemsize # pylint:disable=protected-access
10819
+ type_size = np.dtype(mstype.dtype_to_nptype(out.dtype)).itemsize
10832
10820
  if out.shape != out_sizes or out.strides != out_strides:
10833
10821
  out = as_strided(out, out_sizes, [int(i / type_size) for i in out_strides])
10834
10822
  return out
@@ -11206,7 +11194,7 @@ def count_nonzero(x, axis=(), keep_dims=False, dtype=mstype.int32):
11206
11194
  Default ``()`` , which counts all non-zero elements.
11207
11195
  keep_dims (bool, optional): Whether to maintain dimensions specified by `axis`.
11208
11196
  Default ``False`` , don't keep these dimensions.
11209
- dtype (Union[Number, mindspore.bool], optional): The data type returned.
11197
+ dtype (Union[Number, mindspore.bool\_], optional): The data type returned.
11210
11198
  Default ``mstype.int32`` .
11211
11199
 
11212
11200
 
@@ -12095,11 +12083,11 @@ def mul_ext(input, other):
12095
12083
  input (Union[Tensor, number.Number, bool]): The first input is a number.Number or
12096
12084
  a bool or a tensor whose data type is
12097
12085
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
12098
- `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
12086
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
12099
12087
  other (Union[Tensor, number.Number, bool]): The second input, is a number.Number or
12100
12088
  a bool or a tensor whose data type is
12101
12089
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
12102
- `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
12090
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
12103
12091
 
12104
12092
  Returns:
12105
12093
  Tensor, the shape is the same as the one after broadcasting,
@@ -41,7 +41,7 @@ from mindspore.ops.operations.nn_ops import TripletMarginLoss
41
41
  from mindspore.ops.operations._sequence_ops import TupleToTensor, TensorToTuple, ListToTensor
42
42
  from mindspore.common.api import _function_forbid_reuse
43
43
  from mindspore.ops.auto_generate import log_softmax, dense, prelu, celu, fast_gelu, silu, elu, sigmoid, relu6, \
44
- softmax_impl, swiglu, logsigmoid_op, kl_div_op, divs_op, l1_loss_ext
44
+ softmax_impl, swiglu, logsigmoid_op, kl_div_op, divs_op
45
45
  from mindspore.ops.auto_generate import relu_op, inplace_relu_op
46
46
  from mindspore.ops.auto_generate import group_norm_op, rms_norm, add_rms_norm, layer_norm_ext_op, batch_norm_ext_op,\
47
47
  mse_loss_ext
@@ -49,7 +49,7 @@ from mindspore.ops.auto_generate import group_norm_op, rms_norm, add_rms_norm, l
49
49
  from mindspore.ops.auto_generate import (reflection_pad_1d_op, reflection_pad_2d_op, add_layernorm_v2_op,
50
50
  reflection_pad_3d_op, # pylint: disable=W0611
51
51
  replication_pad_1d_op, replication_pad_2d_op, replication_pad_3d_op,
52
- constant_pad_nd_op, func_dropout_ext_op, reverse_v2_impl, avg_pool2d_op,
52
+ constant_pad_nd_op, dropout_ext_op, reverse_v2_impl, avg_pool2d_op,
53
53
  upsample_nearest1d_op, upsample_nearest2d_op, upsample_nearest3d_op,
54
54
  upsample_linear1d_op, upsample_bilinear2d_op, upsample_bicubic2d_op,
55
55
  upsample_trilinear3d_impl, fill_scalar_op, floor_op, nllloss_2d_op,
@@ -101,7 +101,7 @@ from mindspore.common.generator import default_generator
101
101
  from mindspore.ops.auto_generate import hardshrink, hardsigmoid, hardswish
102
102
  from mindspore.ops.auto_generate import softshrink
103
103
  from mindspore.ops.auto_generate import soft_margin_loss
104
- from mindspore.ops.auto_generate import moe_token_permute, moe_token_unpermute, ring_attention_update
104
+ from mindspore.ops.auto_generate import moe_token_permute, moe_token_unpermute
105
105
  from mindspore.ops.auto_generate import adaptive_avg_pool2d_ext_op
106
106
  from mindspore.ops.auto_generate.pyboost_inner_prim import nllloss_impl
107
107
  from mindspore.ops.auto_generate.pyboost_inner_prim import adaptive_max_pool2d_impl
@@ -1623,6 +1623,7 @@ def dropout_ext(input, p=0.5, training=True, inplace=False):
1623
1623
  - **output** (Tensor) - Zeroed tensor, with the same shape and data type as `input`.
1624
1624
 
1625
1625
  Raises:
1626
+ TypeError: If `p` is not a float.
1626
1627
  TypeError: If `input` is not a Tensor.
1627
1628
 
1628
1629
  Supported Platforms:
@@ -1636,8 +1637,16 @@ def dropout_ext(input, p=0.5, training=True, inplace=False):
1636
1637
  >>> print(output.shape)
1637
1638
  (2, 2)
1638
1639
  """
1640
+ check_bool_const(training, "training", "dropout_ext")
1641
+ check_bool_const(inplace, "inplace", "dropout_ext")
1642
+ if not training:
1643
+ return input
1639
1644
  seed, offset = default_generator._step(generator_step_) # pylint: disable=protected-access
1640
- return func_dropout_ext_op(input, p, training, inplace, seed, offset)
1645
+ out, _ = dropout_ext_op(input, p, seed, offset)
1646
+ if inplace:
1647
+ input.copy_(out)
1648
+ return input
1649
+ return out
1641
1650
 
1642
1651
 
1643
1652
  def dropout1d(input, p=0.5, training=True):
@@ -2386,7 +2395,7 @@ def is_floating_point(input):
2386
2395
  >>> mindspore.ops.is_floating_point(input)
2387
2396
  False
2388
2397
  """
2389
- return input.dtype in mstype.float_type
2398
+ return input.dtype in [mstype.float32, mstype.bfloat16, mstype.float16, mstype.float64]
2390
2399
 
2391
2400
 
2392
2401
  def _is_dim_unknown(shape):
@@ -4294,15 +4303,6 @@ def nll_loss(inputs, target, weight=None, ignore_index=-100, reduction='mean', l
4294
4303
  \sum_{n=1}^{N} l_{n}, & \text { if reduction }=\text { 'sum' }
4295
4304
  \end{array}\right.
4296
4305
 
4297
- .. warning::
4298
- - In GE mode, the rank of `inputs` should be 1D or 2D, the rank of `target` and `weight` should be 1D,
4299
- and the following restraints should be met:
4300
-
4301
- - when `inputs` is 1D: target_shape[0] == 1 and weight_shape[0] == inputs_shape[0].
4302
- - when `inputs` is 2D: target_shape[0] == inputs_shape[0] and weight_shape[0] == inputs_shape[1].
4303
-
4304
- - On GPU or CPU, `inputs` should be 2D.
4305
-
4306
4306
  Args:
4307
4307
  inputs (Tensor): :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)`
4308
4308
  in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)`.
@@ -4846,9 +4846,8 @@ def smooth_l1_loss(input, target, beta=1.0, reduction='none'):
4846
4846
  >>> print(output)
4847
4847
  [0. 0. 0.5]
4848
4848
  """
4849
- if beta == 0.0:
4850
- return l1_loss_ext(input, target, reduction)
4851
- return ops.auto_generate.smooth_l1_loss(input, target, beta=beta, reduction=reduction)
4849
+ _smooth_l1_loss = _get_cache_prim(P.SmoothL1Loss)(beta, reduction)
4850
+ return _smooth_l1_loss(input, target)
4852
4851
 
4853
4852
 
4854
4853
  def threshold(input, thr, value):
@@ -6439,6 +6438,9 @@ def conv2d_ext(input, weight, bias=None, stride=1, padding=0, dilation=1, groups
6439
6438
  <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_ and
6440
6439
  `ConvNets <http://cs231n.github.io/convolutional-networks/>`_.
6441
6440
 
6441
+ .. warning::
6442
+ This is an experimental API that is subject to change or deletion.
6443
+
6442
6444
  Args:
6443
6445
  input (Tensor): Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`.
6444
6446
  weight (Tensor): Tensor of shape
@@ -7390,6 +7392,7 @@ def conv3d_ext(input, weight, bias=None, stride=1, padding=0, dilation=1, groups
7390
7392
 
7391
7393
  .. warning::
7392
7394
  This API does not support Atlas series products.
7395
+ This is an experimental API that is subject to change or deletion.
7393
7396
 
7394
7397
  Args:
7395
7398
  input (Tensor): Tensor of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`.
@@ -9650,7 +9653,6 @@ __all__ = [
9650
9653
  'lp_pool2d',
9651
9654
  'moe_token_permute',
9652
9655
  'moe_token_unpermute',
9653
- 'ring_attention_update',
9654
9656
  'max_unpool1d',
9655
9657
  'max_unpool2d',
9656
9658
  'max_unpool3d',
@@ -20,7 +20,7 @@ from mindspore.common._register_for_tensor import tensor_operator_registry
20
20
  from mindspore.ops import _constants
21
21
  from mindspore.ops.function import *
22
22
  from mindspore.ops.function.array_func import chunk_ext, zero_
23
- from mindspore.ops.function.math_func import all, argmax_ext, float_power_ext, erfinv_, tanh_, bernoulli_ext, bernoulli_
23
+ from mindspore.ops.function.math_func import all, argmax_ext, float_power_ext, erfinv_, tanh_, bernoulli_ext
24
24
  from mindspore.ops.function.random_func import random_, uniform_ext, uniform_, normal_, exponential_
25
25
  from mindspore.ops import operations as P
26
26
  from mindspore.ops.operations import array_ops
@@ -397,7 +397,6 @@ setattr(tensor_operator_registry, 'inplace_scatter_add', auto_generate.inplace_s
397
397
  setattr(tensor_operator_registry, 'slice_scatter', slice_scatter)
398
398
  setattr(tensor_operator_registry, 'select_scatter', select_scatter)
399
399
  setattr(tensor_operator_registry, 'bernoulli', bernoulli_ext)
400
- setattr(tensor_operator_registry, 'bernoulli_', bernoulli_)
401
400
  setattr(tensor_operator_registry, 'poisson', P.Poisson)
402
401
  setattr(tensor_operator_registry, 'randperm', P.Randperm)
403
402
  setattr(tensor_operator_registry, 'multinomial', multinomial)
@@ -17,7 +17,6 @@ from mindspore._c_expression import _add_instance
17
17
  from mindspore._c_expression import _addcdiv_instance
18
18
  from mindspore._c_expression import _all_gather_matmul_instance
19
19
  from mindspore._c_expression import _any_instance
20
- from mindspore._c_expression import _bernoulli__instance
21
20
  from mindspore._c_expression import _bitwise_not_instance
22
21
  from mindspore._c_expression import _clamp_instance
23
22
  from mindspore._c_expression import _conv3d_instance
@@ -67,10 +66,10 @@ def add(*args, **kwargs):
67
66
  Args:
68
67
  input (Union[Tensor, number.Number, bool]): `input` is a number.Number or a bool or a tensor whose data type is
69
68
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
70
- `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
69
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
71
70
  other (Union[Tensor, number.Number, bool]): `other` is a number.Number or a bool or a tensor whose data type is
72
71
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
73
- `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
72
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
74
73
 
75
74
  Keyword Args:
76
75
  alpha (number.Number, optional): A scaling factor applied to `other`, default ``1``.
@@ -318,15 +317,6 @@ def any(*args, **kwargs):
318
317
  return _any_instance(*args, **kwargs)
319
318
 
320
319
 
321
- def bernoulli_(*args, **kwargs):
322
- r"""
323
- bernoulli_(input, p, seed, offset) -> Tensor
324
-
325
- Inner function, used for Tensor.bernoulli_.
326
- """
327
- return _bernoulli__instance(*args, **kwargs)
328
-
329
-
330
320
  def bitwise_not(*args, **kwargs):
331
321
  r"""
332
322
  bitwise_not(input) -> Tensor
@@ -589,7 +579,7 @@ def div(*args, **kwargs):
589
579
  .. note::
590
580
  - When the two inputs have different shapes, they must be able to broadcast to a common shape.
591
581
  - The two inputs can not be bool type at the same time,
592
- [True, Tensor(True), Tensor(np.array([True]))] are all considered bool type.
582
+ [True, Tensor(True, bool\_), Tensor(np.array([True]), bool\_)] are all considered bool type.
593
583
  - The two inputs comply with the implicit type conversion rules to make the data types
594
584
  consistent.
595
585
 
@@ -1117,10 +1107,10 @@ def greater_equal(*args, **kwargs):
1117
1107
 
1118
1108
  Args:
1119
1109
  input (Union[Tensor, Number]): The first input is a number
1120
- or a tensor whose data type is `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_ or `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_.
1110
+ or a tensor whose data type is `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_ or `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html#mindspore.dtype>`_.
1121
1111
  other (Union[Tensor, Number]): Second input. When the first input is a Tensor, the second input should be a Number,
1122
- or a Tensor of the number or bool data type. When the first input is a Scalar,
1123
- the second input must be a Tensor of number or bool data type.
1112
+ or a Tensor of the number or bool_ data type. When the first input is a Scalar,
1113
+ the second input must be a Tensor of number or bool_ data type.
1124
1114
 
1125
1115
  Returns:
1126
1116
  Tensor, the shape is the same as the one after broadcasting, and the data type is bool.
@@ -1729,10 +1719,10 @@ def remainder(*args, **kwargs):
1729
1719
  input (Union[Tensor, numbers.Number, bool]): The dividend is a numbers.Number or
1730
1720
  a bool or a tensor whose data type is
1731
1721
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1732
- `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1722
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1733
1723
  other (Union[Tensor, numbers.Number, bool]): The divisor is a numbers.Number or
1734
- a bool or a tensor whose data type is number or bool when the dividend is a tensor.
1735
- When the dividend is Scalar, the divisor must be a Tensor whose data type is number or bool.
1724
+ a bool or a tensor whose data type is number or bool\_ when the dividend is a tensor.
1725
+ When the dividend is Scalar, the divisor must be a Tensor whose data type is number or bool\_.
1736
1726
 
1737
1727
  Returns:
1738
1728
  Tensor, with dtype promoted and shape broadcasted.
@@ -1826,10 +1816,10 @@ def sub(*args, **kwargs):
1826
1816
  Args:
1827
1817
  input (Union[Tensor, number.Number, bool]): `input` is a number.Number or a bool or a tensor whose data type is
1828
1818
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1829
- `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1819
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1830
1820
  other (Union[Tensor, number.Number, bool]): `other` is a number.Number or a bool or a tensor whose data type is
1831
1821
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1832
- `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1822
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1833
1823
 
1834
1824
  Keyword Args:
1835
1825
  alpha (number.Number, optional): A scaling factor applied to `other`, default ``1``.
@@ -1945,7 +1935,7 @@ def xlogy(*args, **kwargs):
1945
1935
  input (Union[Tensor, numbers.Number, bool]): The first input is a numbers.Number or
1946
1936
  a bool or a tensor whose data type is
1947
1937
  `number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ or
1948
- `bool <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1938
+ `bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
1949
1939
  other (Union[Tensor, numbers.Number, bool]): The second input is a numbers.Number or
1950
1940
  a bool or a tensor whose data type is number or bool when the first input is a tensor.
1951
1941
  When the first input is Scalar, the second input must be a Tensor whose data type is number or bool.
@@ -1979,7 +1969,6 @@ __all__ = [
1979
1969
  "addcdiv",
1980
1970
  "all_gather_matmul",
1981
1971
  "any",
1982
- "bernoulli_",
1983
1972
  "bitwise_not",
1984
1973
  "clamp",
1985
1974
  "clip",