mindspore 2.2.11__cp38-none-any.whl → 2.2.14__cp38-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (121) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +2 -1
  3. mindspore/_akg/akg/topi/cpp/impl.py +1 -1
  4. mindspore/_akg/akg/tvm/_ffi/base.py +1 -1
  5. mindspore/_c_dataengine.cpython-38-aarch64-linux-gnu.so +0 -0
  6. mindspore/_c_expression.cpython-38-aarch64-linux-gnu.so +0 -0
  7. mindspore/_c_mindrecord.cpython-38-aarch64-linux-gnu.so +0 -0
  8. mindspore/_mindspore_offline_debug.cpython-38-aarch64-linux-gnu.so +0 -0
  9. mindspore/bin/cache_admin +0 -0
  10. mindspore/bin/cache_server +0 -0
  11. mindspore/common/tensor.py +0 -2
  12. mindspore/communication/management.py +3 -0
  13. mindspore/context.py +34 -4
  14. mindspore/dataset/engine/datasets.py +23 -0
  15. mindspore/dataset/engine/validators.py +1 -1
  16. mindspore/dataset/vision/py_transforms_util.py +2 -2
  17. mindspore/experimental/optim/lr_scheduler.py +5 -6
  18. mindspore/lib/libdnnl.so.2 +0 -0
  19. mindspore/lib/libmindspore.so +0 -0
  20. mindspore/lib/libmindspore_backend.so +0 -0
  21. mindspore/lib/libmindspore_common.so +0 -0
  22. mindspore/lib/libmindspore_core.so +0 -0
  23. mindspore/lib/libmindspore_glog.so.0 +0 -0
  24. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  25. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  26. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  27. mindspore/lib/libmindspore_shared_lib.so +0 -0
  28. mindspore/lib/libopencv_core.so.4.5 +0 -0
  29. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  30. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  31. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
  32. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  33. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +48 -0
  34. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  35. mindspore/lib/plugin/ascend/libakg.so +0 -0
  36. mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
  37. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  38. mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
  39. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  40. mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
  41. mindspore/mindrecord/tools/cifar100_to_mr.py +49 -57
  42. mindspore/mindrecord/tools/cifar10_to_mr.py +46 -55
  43. mindspore/mindrecord/tools/csv_to_mr.py +3 -8
  44. mindspore/mindrecord/tools/mnist_to_mr.py +4 -9
  45. mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -4
  46. mindspore/nn/layer/activation.py +1 -1
  47. mindspore/nn/layer/embedding.py +2 -2
  48. mindspore/nn/loss/loss.py +1 -1
  49. mindspore/nn/optim/ada_grad.py +2 -2
  50. mindspore/nn/optim/sgd.py +3 -2
  51. mindspore/numpy/math_ops.py +1 -1
  52. mindspore/ops/__init__.py +3 -0
  53. mindspore/ops/_grad_experimental/grad_array_ops.py +0 -31
  54. mindspore/ops/_grad_experimental/grad_comm_ops.py +4 -2
  55. mindspore/ops/_grad_experimental/grad_inner_ops.py +8 -0
  56. mindspore/ops/_grad_experimental/grad_math_ops.py +37 -17
  57. mindspore/ops/_op_impl/aicpu/__init__.py +1 -0
  58. mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +38 -0
  59. mindspore/ops/function/array_func.py +6 -5
  60. mindspore/ops/function/debug_func.py +1 -1
  61. mindspore/ops/function/linalg_func.py +21 -11
  62. mindspore/ops/function/math_func.py +3 -0
  63. mindspore/ops/function/nn_func.py +13 -11
  64. mindspore/ops/function/parameter_func.py +2 -0
  65. mindspore/ops/function/sparse_unary_func.py +2 -2
  66. mindspore/ops/function/vmap_func.py +1 -0
  67. mindspore/ops/operations/_embedding_cache_ops.py +1 -1
  68. mindspore/ops/operations/_inner_ops.py +56 -1
  69. mindspore/ops/operations/_quant_ops.py +4 -4
  70. mindspore/ops/operations/_rl_inner_ops.py +1 -1
  71. mindspore/ops/operations/array_ops.py +15 -4
  72. mindspore/ops/operations/custom_ops.py +1 -1
  73. mindspore/ops/operations/debug_ops.py +1 -1
  74. mindspore/ops/operations/image_ops.py +3 -3
  75. mindspore/ops/operations/inner_ops.py +49 -0
  76. mindspore/ops/operations/math_ops.py +62 -0
  77. mindspore/ops/operations/nn_ops.py +7 -3
  78. mindspore/ops/operations/random_ops.py +2 -0
  79. mindspore/ops/operations/sparse_ops.py +4 -4
  80. mindspore/ops/silent_check.py +162 -0
  81. mindspore/parallel/__init__.py +3 -2
  82. mindspore/parallel/_auto_parallel_context.py +82 -3
  83. mindspore/parallel/_parallel_serialization.py +34 -2
  84. mindspore/parallel/_tensor.py +3 -1
  85. mindspore/parallel/_transformer/transformer.py +8 -8
  86. mindspore/parallel/checkpoint_transform.py +191 -45
  87. mindspore/profiler/parser/ascend_cluster_generator.py +111 -0
  88. mindspore/profiler/parser/ascend_communicate_generator.py +315 -0
  89. mindspore/profiler/parser/ascend_flops_generator.py +8 -2
  90. mindspore/profiler/parser/ascend_fpbp_generator.py +8 -2
  91. mindspore/profiler/parser/ascend_hccl_generator.py +2 -2
  92. mindspore/profiler/parser/ascend_msprof_exporter.py +30 -6
  93. mindspore/profiler/parser/ascend_msprof_generator.py +16 -5
  94. mindspore/profiler/parser/ascend_op_generator.py +15 -7
  95. mindspore/profiler/parser/ascend_timeline_generator.py +5 -2
  96. mindspore/profiler/parser/base_timeline_generator.py +11 -3
  97. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +2 -1
  98. mindspore/profiler/parser/framework_parser.py +8 -2
  99. mindspore/profiler/parser/memory_usage_parser.py +8 -2
  100. mindspore/profiler/parser/minddata_analyzer.py +8 -2
  101. mindspore/profiler/parser/minddata_parser.py +1 -1
  102. mindspore/profiler/parser/msadvisor_analyzer.py +4 -2
  103. mindspore/profiler/parser/msadvisor_parser.py +9 -3
  104. mindspore/profiler/profiling.py +97 -25
  105. mindspore/rewrite/api/node.py +1 -1
  106. mindspore/rewrite/api/symbol_tree.py +2 -2
  107. mindspore/train/callback/_checkpoint.py +8 -8
  108. mindspore/train/callback/_landscape.py +2 -3
  109. mindspore/train/callback/_summary_collector.py +6 -7
  110. mindspore/train/dataset_helper.py +6 -0
  111. mindspore/train/model.py +17 -5
  112. mindspore/train/serialization.py +6 -1
  113. mindspore/train/summary/_writer_pool.py +1 -1
  114. mindspore/train/summary/summary_record.py +5 -6
  115. mindspore/version.py +1 -1
  116. {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/METADATA +1 -1
  117. {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/RECORD +120 -117
  118. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  119. {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/WHEEL +0 -0
  120. {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/entry_points.txt +0 -0
  121. {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/top_level.txt +0 -0
@@ -59,6 +59,8 @@ def cond(A, p=None):
59
59
 
60
60
  Args:
61
61
  A (Tensor): Tensor of shape :math:`(*, n)` or :math:`(*, m, n)` where * is zero or more batch dimensions.
62
+ If `p` is one of Union[1, -1, inf, -inf, 'fro', 'nuc'], the function uses
63
+ :class:`mindspore.ops.MatrixInverse` , therefore, :math:`(*, m, n)` has to be square and ivertible.
62
64
  p (Union[int, float, inf, -inf, 'fro', 'nuc'], optional): norm's mode. Refer to the table above for
63
65
  behavior. Default: ``None``.
64
66
 
@@ -84,8 +86,8 @@ def cond(A, p=None):
84
86
  matrix_inverse = _get_cache_prim(P.MatrixInverse)(adjoint=False)
85
87
  if p is None:
86
88
  p = 2
87
- norm_a = F.norm(A, p)
88
- norm_inv_a = F.norm(matrix_inverse(A), p)
89
+ norm_a = F.matrix_norm(A, p)
90
+ norm_inv_a = F.matrix_norm(matrix_inverse(A), p)
89
91
  return norm_a * norm_inv_a
90
92
 
91
93
 
@@ -194,6 +196,8 @@ def geqrf(input):
194
196
  ``Ascend`` ``GPU`` ``CPU``
195
197
 
196
198
  Examples:
199
+ >>> from mindspore import Tensor, ops
200
+ >>> import numpy as np
197
201
  >>> input_x = Tensor(np.array([[-2.0, -1.0], [1.0, 2.0]]).astype(np.float32))
198
202
  >>> y, tau = ops.geqrf(input_x)
199
203
  >>> print(y)
@@ -266,6 +270,16 @@ def svd(input, full_matrices=False, compute_uv=True):
266
270
  return s
267
271
 
268
272
 
273
+ def _check_pinv_shape(x):
274
+ if not isinstance(x, (Tensor, Tensor_)):
275
+ raise TypeError("The input x must be tensor")
276
+ if x.shape == ():
277
+ raise TypeError("For pinv, the 0-D input is not supported")
278
+ x_shape = F.shape(x)
279
+ if len(x_shape) < 2:
280
+ raise ValueError("input x should have 2 or more dimensions, " f"but got {len(x_shape)}.")
281
+
282
+
269
283
  def pinv(x, *, atol=None, rtol=None, hermitian=False):
270
284
  r"""
271
285
  Computes the (Moore-Penrose) pseudo-inverse of a matrix.
@@ -318,19 +332,15 @@ def pinv(x, *, atol=None, rtol=None, hermitian=False):
318
332
  ``CPU``
319
333
 
320
334
  Examples:
335
+ >>> import mindspore
336
+ >>> from mindspore import Tensor, ops
321
337
  >>> x = Tensor([[4., 0.], [0., 5.]], mindspore.float32)
322
338
  >>> output = ops.pinv(x)
323
339
  >>> print(output)
324
- [[0.25 0. ]
325
- [0. 0.2 ]]
340
+ [[0.25 0. ]
341
+ [0. 0.2 ]]
326
342
  """
327
- if not isinstance(x, (Tensor, Tensor_)):
328
- raise TypeError("The input x must be tensor")
329
- if x.shape == ():
330
- raise TypeError("For pinv, the 0-D input is not supported")
331
- x_shape = F.shape(x)
332
- if len(x_shape) < 2:
333
- raise ValueError("input x should have 2 or more dimensions, " f"but got {len(x_shape)}.")
343
+ _check_pinv_shape(x)
334
344
  x_dtype = _get_cache_prim(P.DType)()(x)
335
345
  _check_input_dtype("x", x_dtype, [mstype.float32, mstype.float64], "pinv")
336
346
  _check_attr_dtype("hermitian", hermitian, [bool], "pinv")
@@ -343,6 +343,7 @@ def add(input, other):
343
343
 
344
344
  Examples:
345
345
  >>> import numpy as np
346
+ >>> import mindspore
346
347
  >>> from mindspore import Tensor, ops
347
348
  >>> # case 1: x and y are both Tensor.
348
349
  >>> x = Tensor(np.array([1, 2, 3]).astype(np.float32))
@@ -12784,6 +12785,7 @@ def count_nonzero(x, axis=(), keep_dims=False, dtype=mstype.int32):
12784
12785
  Examples:
12785
12786
  >>> from mindspore import Tensor, ops
12786
12787
  >>> import numpy as np
12788
+ >>> import mindspore
12787
12789
  >>> # case 1: each value specified.
12788
12790
  >>> x = Tensor(np.array([[0, 1, 0], [1, 1, 0]]).astype(np.float32))
12789
12791
  >>> nonzero_num = ops.count_nonzero(x=x, axis=[0, 1], keep_dims=True, dtype=mindspore.int32)
@@ -13392,6 +13394,7 @@ def batch_dot(x1, x2, axes=None):
13392
13394
  ``Ascend`` ``GPU`` ``CPU``
13393
13395
 
13394
13396
  Examples:
13397
+ >>> import mindspore
13395
13398
  >>> from mindspore import Tensor, ops
13396
13399
  >>> import numpy as np
13397
13400
  >>> x1 = Tensor(np.ones(shape=[2, 2, 3]), mindspore.float32)
@@ -2748,6 +2748,7 @@ def softshrink(x, lambd=0.5):
2748
2748
  ``Ascend`` ``GPU`` ``CPU``
2749
2749
 
2750
2750
  Examples:
2751
+ >>> import mindspore
2751
2752
  >>> from mindspore import Tensor
2752
2753
  >>> from mindspore import ops
2753
2754
  >>> import numpy as np
@@ -3000,11 +3001,12 @@ def dense(input, weight, bias=None):
3000
3001
 
3001
3002
  Examples:
3002
3003
  >>> import numpy as np
3004
+ >>> import mindspore
3003
3005
  >>> from mindspore import Tensor, ops
3004
- >>> input = mindspore.Tensor([[-1., 1., 2.], [-3., -3., 1.]], mindspore.float32)
3005
- >>> weight = mindspore.Tensor([[-2., -2., -2.], [0., -1., 0.]], mindspore.float32)
3006
- >>> bias = mindspore.Tensor([0., 1.], mindspore.float32)
3007
- >>> output = mindspore.ops.dense(input, weight, bias)
3006
+ >>> input = Tensor([[-1., 1., 2.], [-3., -3., 1.]], mindspore.float32)
3007
+ >>> weight = Tensor([[-2., -2., -2.], [0., -1., 0.]], mindspore.float32)
3008
+ >>> bias = Tensor([0., 1.], mindspore.float32)
3009
+ >>> output = ops.dense(input, weight, bias)
3008
3010
  >>> print(output)
3009
3011
  [[-4. 0.]
3010
3012
  [10. 4.]]
@@ -3992,8 +3994,8 @@ def l1_loss(input, target, reduction='mean'):
3992
3994
  Examples:
3993
3995
  >>> from mindspore import Tensor, ops
3994
3996
  >>> from mindspore import dtype as mstype
3995
- >>> x = ms.Tensor([[1, 2, 3], [4, 5, 6]], mstype.float32)
3996
- >>> target = ms.Tensor([[6, 5, 4], [3, 2, 1]], mstype.float32)
3997
+ >>> x = Tensor([[1, 2, 3], [4, 5, 6]], mstype.float32)
3998
+ >>> target = Tensor([[6, 5, 4], [3, 2, 1]], mstype.float32)
3997
3999
  >>> output = ops.l1_loss(x, target, reduction="mean")
3998
4000
  >>> print(output)
3999
4001
  3.0
@@ -5342,7 +5344,7 @@ def conv1d(input, weight, bias=None, stride=1, pad_mode="valid", padding=0, dila
5342
5344
  >>> from mindspore import Tensor, ops
5343
5345
  >>> x = Tensor(np.arange(64).reshape((4, 4, 4)), mindspore.float32)
5344
5346
  >>> weight = Tensor(np.arange(8).reshape((2, 2, 2)), mindspore.float32)
5345
- >>> bias = Tensor([-0.12345, 2.7683], ms.float32)
5347
+ >>> bias = Tensor([-0.12345, 2.7683], mindspore.float32)
5346
5348
  >>> output = ops.conv1d(x, weight, pad_mode='pad', padding=(1,), bias=bias, groups=2)
5347
5349
  >>> print(output.shape)
5348
5350
  (4, 2, 5)
@@ -7444,8 +7446,8 @@ def max_pool2d(x, kernel_size, stride=None, padding=0, dilation=1, return_indice
7444
7446
  return out
7445
7447
 
7446
7448
 
7447
- def prompt_flash_attention(query, key, value, padding_mask, attn_mask, actual_seq_lengths,
7448
- actual_seq_lengths_kv, deq_scale1, quant_scale1,
7449
+ def prompt_flash_attention(query, key, value, attn_mask, actual_seq_lengths,
7450
+ actual_seq_lengths_kv, pse_shift, deq_scale1, quant_scale1,
7449
7451
  deq_scale2, quant_scale2, quant_offset2, num_heads, scale_value=1.0, pre_tokens=2147483547,
7450
7452
  next_tokens=0, input_layout='BSH',
7451
7453
  num_key_value_heads=0, sparse_mode=0):
@@ -7468,11 +7470,11 @@ def prompt_flash_attention(query, key, value, padding_mask, attn_mask, actual_se
7468
7470
  Input tensor of shape :math:`(B, S, H)` / `(B, N, S, D)`.
7469
7471
  value (Tensor) - The value tensor with data type of float16 or float32.
7470
7472
  Input tensor of shape :math:`(B, S, H)` / `(B, N, S, D)`.
7471
- padding_mask (Tensor) - The padding mask tensor with data type of float16 or float32
7472
7473
  attn_mask (Tensor) - The attention mask tensor with data type of float16 or float32.
7473
7474
  For each element, 0 indicates retention and 1 indicates discard. Input tensor of shape :math:`(B, 1, S, S)`.
7474
7475
  actual_seq_lengths (list[int]): Describe actual sequence length of each input with data type of int.
7475
7476
  actual_seq_lengths_kv (list[int]): Describe actual sequence length of each input with data type of int.
7477
+ pse_shift (Tensor) - The position encoding tensor with data type of float16 or float32.
7476
7478
  dep_scale1 (Tensor)
7477
7479
  quant_scale1 (Tensor)
7478
7480
  deq_scale2 (Tensor)
@@ -7516,7 +7518,7 @@ def prompt_flash_attention(query, key, value, padding_mask, attn_mask, actual_se
7516
7518
 
7517
7519
  pfa = _get_cache_prim(NN_OPS.PromptFlashAttention)(num_heads, scale_value, pre_tokens, next_tokens, input_layout,
7518
7520
  num_key_value_heads, sparse_mode)
7519
- return pfa(query, key, value, padding_mask, attn_mask, actual_seq_lengths, actual_seq_lengths_kv, deq_scale1,
7521
+ return pfa(query, key, value, attn_mask, actual_seq_lengths, actual_seq_lengths_kv, pse_shift, deq_scale1,
7520
7522
  quant_scale1, deq_scale2, quant_scale2, quant_offset2)
7521
7523
 
7522
7524
 
@@ -99,6 +99,7 @@ def assign_sub(variable, value):
99
99
  >>> import mindspore
100
100
  >>> import numpy as np
101
101
  >>> from mindspore import Tensor, ops
102
+ >>> from mindspore.common.initializer import initializer
102
103
  >>> variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step")
103
104
  >>> value = Tensor(np.ones([1]).astype(np.int32) * 100)
104
105
  >>> ops.assign_sub(variable, value)
@@ -149,6 +150,7 @@ def assign_add(variable, value):
149
150
  >>> import mindspore
150
151
  >>> import numpy as np
151
152
  >>> from mindspore import Tensor, ops
153
+ >>> from mindspore.common.initializer import initializer
152
154
  >>> variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step")
153
155
  >>> value = Tensor(np.ones([1]).astype(np.int32) * 100)
154
156
  >>> ops.assign_add(variable, value)
@@ -28,7 +28,7 @@ def csr_cos(x: CSRTensor) -> CSRTensor:
28
28
  out_i = \cos(x_i)
29
29
 
30
30
  .. warning::
31
- Currently support data types float16 and float32. If use Float64, there may be a problem of missing precision.
31
+ Currently support data types float16 and float32. If use float64, there may be a problem of missing precision.
32
32
 
33
33
  Args:
34
34
  x (CSRTensor): Input CSRTensor.
@@ -69,7 +69,7 @@ def coo_cos(x: COOTensor) -> COOTensor:
69
69
  out_i = \cos(x_i)
70
70
 
71
71
  .. warning::
72
- If use Float64, there may be a problem of missing precision.
72
+ If use float64, there may be a problem of missing precision.
73
73
 
74
74
  Args:
75
75
  x (COOTensor): Input COOTensor.
@@ -81,6 +81,7 @@ def vmap(fn, in_axes=0, out_axes=0):
81
81
  ``Ascend`` ``GPU`` ``CPU``
82
82
 
83
83
  Examples:
84
+ >>> import numpy as np
84
85
  >>> from mindspore import Tensor
85
86
  >>> from mindspore import vmap
86
87
  >>> def test_vmap(x, y, z): # ([a],[a],[a]) -> [a]
@@ -65,7 +65,7 @@ class SubAndFilter(PrimitiveWithCheck):
65
65
 
66
66
  Inputs:
67
67
  - **input_x** (Tensor) - Input tensor.
68
- - **max_num** (Int) - The max value of element that after sub `offset`.
68
+ - **max_num** (int) - The max value of element that after sub `offset`.
69
69
  - **offset** (int) - Specifies the offset value of this `input_x`.
70
70
 
71
71
  Outputs:
@@ -16,6 +16,7 @@
16
16
  """Inner operators."""
17
17
  from types import FunctionType, MethodType
18
18
  from collections.abc import Iterable
19
+ import os
19
20
  import numpy as np
20
21
 
21
22
  from mindspore.common import Tensor
@@ -1008,7 +1009,7 @@ class Centralization(PrimitiveWithInfer):
1008
1009
 
1009
1010
  Inputs:
1010
1011
  - **input_x** (Tensor) - The input tensor. The data type mast be float16 or float32.
1011
- - **axis** (Union[Int, Tuple(Int), List(Int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
1012
+ - **axis** (Union[int, Tuple(int), List(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
1012
1013
  Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)).
1013
1014
 
1014
1015
  Outputs:
@@ -2858,6 +2859,60 @@ class DecoderKVCache(Primitive):
2858
2859
  self.add_prim_attr('side_effect_mem', True)
2859
2860
 
2860
2861
 
2862
+ class _MirrorSilentCheck(PrimitiveWithInfer):
2863
+ """
2864
+ The operator _MirrorSilentCheck implements accuracy-sensitive detection on the tensor input in backpropagator.
2865
+ Call _MirrorSilentCheck in method __call__ of derived class to implement accuracy-sensitive detection.
2866
+
2867
+ Inputs:
2868
+ - **input** (Tensor) : The tensor used for detection.
2869
+ Its data type must be mindspore.float16, mindspore.float32 or mindspore.bfloat16.
2870
+ - **pre_val** (Parameter(Tensor)) : Support parameter in accuracy-sensitive detection.
2871
+ Please only generated by method generate_params() of ASDBase.
2872
+ - **min_val** (Parameter(Tensor)) : Support parameter in accuracy-sensitive detection.
2873
+ Please only generated by method generate_params() of ASDBase.
2874
+ - **max_val** (Parameter(Tensor)) : Support parameter in accuracy-sensitive detection.
2875
+ Please only generated by method generate_params() of ASDBase.
2876
+ - **cnt** (Parameter(Tensor)) : Support parameter in accuracy-sensitive detection.
2877
+ Please only generated by method generate_params() of ASDBase.
2878
+ After each invocation of _MirrorSilentCheck, increment the value of cnt by one.
2879
+
2880
+ Outputs:
2881
+ - **output** (Tensor) - Same shape, type and value as `input`.
2882
+ """
2883
+ @prim_attr_register
2884
+ def __init__(self, min_steps=8):
2885
+ upper_thresh, sigma_thresh = self.get_thresh()
2886
+ self.min_steps = min_steps
2887
+ self.thresh_l1 = upper_thresh[0]
2888
+ self.coeff_l1 = sigma_thresh[0]
2889
+ self.thresh_l2 = upper_thresh[1]
2890
+ self.coeff_l2 = sigma_thresh[1]
2891
+ self.add_prim_attr('side_effect_mem', True)
2892
+
2893
+ def parse_thresh(self, env_var_name, default_value, min_value):
2894
+ env_var = os.environ.get(env_var_name, default=default_value)
2895
+ thresh = [value.strip() for value in env_var.split(",")]
2896
+ if len(thresh) != 2 or not all(value.isdigit() for value in thresh):
2897
+ thresh = default_value.split(",")
2898
+ thresh = [float(max(int(value), min_value)) for value in thresh]
2899
+ if thresh[0] <= thresh[1]:
2900
+ thresh = [float(value) for value in default_value.split(",")]
2901
+
2902
+ return thresh
2903
+
2904
+ def get_thresh(self):
2905
+ upper_thresh = self.parse_thresh("NPU_ASD_UPPER_THRESH", "1000000,10000", 3)
2906
+ sigma_thresh = self.parse_thresh("NPU_ASD_SIGMA_THRESH", "100000,5000", 3)
2907
+ return upper_thresh, sigma_thresh
2908
+
2909
+ def infer_shape(self, x_shape, pre_shape, min_shape, max_shape, n_step, loss_scale_shape):
2910
+ return x_shape
2911
+
2912
+ def infer_dtype(self, x_dtype, pre_dtype, min_dtype, max_dtype, n_dtype, loss_scale_dtype):
2913
+ return x_dtype
2914
+
2915
+
2861
2916
  class PromptKVCache(Primitive):
2862
2917
  r"""
2863
2918
  The PromptKVCache is used for prefill the KVCache of transformer network.
@@ -579,7 +579,7 @@ class FakeQuantWithMinMaxVars(PrimitiveWithInfer):
579
579
  range is [1, 2^num_bits-1]. Default: ``False``.
580
580
 
581
581
  Inputs:
582
- - **x** (Tensor) - Float32 tensor representing the shape of the output tensor.
582
+ - **x** (Tensor) - float32 tensor representing the shape of the output tensor.
583
583
  - **min** (Tensor) - Value of the min range of the input data x.
584
584
  - **max** (Tensor) - Value of the max range of the input data x.
585
585
 
@@ -638,7 +638,7 @@ class FakeQuantWithMinMaxVarsGradient(PrimitiveWithInfer):
638
638
 
639
639
  Inputs:
640
640
  - **gradients** (Tensor) - The gradient above the FakeQuantWithMinMaxVars.
641
- - **x** (Tensor) - Float32 tensor representing the shape of the output tensor.
641
+ - **x** (Tensor) - float32 tensor representing the shape of the output tensor.
642
642
  - **min** (Tensor) - Value of the min range of the input data x.
643
643
  - **max** (Tensor) - Value of the max range of the input data x.
644
644
 
@@ -702,7 +702,7 @@ class FakeQuantWithMinMaxVarsPerChannel(PrimitiveWithInfer):
702
702
  range is [1, 2^num_bits-1]. Default: ``False``.
703
703
 
704
704
  Inputs:
705
- - **x** (Tensor) - Float32 tensor representing the shape of the output tensor.
705
+ - **x** (Tensor) - float32 tensor representing the shape of the output tensor.
706
706
  - **min** (Tensor) - Value of the min range of the input data x.
707
707
  - **max** (Tensor) - Value of the max range of the input data x.
708
708
 
@@ -754,7 +754,7 @@ class FakeQuantWithMinMaxVarsPerChannelGradient(PrimitiveWithInfer):
754
754
 
755
755
  Inputs:
756
756
  - **gradients** (Tensor) - The gradient above the FakeQuantWithMinMaxVars.
757
- - **x** (Tensor) - Float32 tensor representing the shape of the output tensor.
757
+ - **x** (Tensor) - float32 tensor representing the shape of the output tensor.
758
758
  - **min** (Tensor) - Value of the min range of the input data x.
759
759
  - **max** (Tensor) - Value of the max range of the input data x.
760
760
 
@@ -341,7 +341,7 @@ class LSTMV2(Primitive):
341
341
  - **h** (Tensor) - Tensor of shape (num_directions * `num_layers`, batch_size, `hidden_size`).
342
342
  - **c** (Tensor) - Tensor of shape (num_directions * `num_layers`, batch_size, `hidden_size`).
343
343
  - **w** (Tensor) - The input tensor which states for weights.
344
- - **seq_lengths** (Tensor) - The Tensor[Int32] of shape (batch_size, ),
344
+ - **seq_lengths** (Tensor) - The Tensor[int32] of shape (batch_size, ),
345
345
  indicates the seq_length of each batch dim.
346
346
 
347
347
  Outputs:
@@ -227,7 +227,11 @@ class ExpandDims(PrimitiveWithCheck):
227
227
  def infer_value(self, input_x, axis):
228
228
  value = None
229
229
  if input_x is not None and axis is not None:
230
- value = Tensor(np.expand_dims(input_x.asnumpy(), axis))
230
+ dtype = input_x.dtype
231
+ if input_x.dtype == mstype.bfloat16:
232
+ cpu_cast = Cast().set_device("CPU")
233
+ input_x = cpu_cast(input_x, mstype.float32)
234
+ value = Tensor(np.expand_dims(input_x.asnumpy(), axis), dtype)
231
235
  return value
232
236
 
233
237
 
@@ -375,6 +379,9 @@ class Cast(PrimitiveWithCheck):
375
379
  if isinstance(x, (int, float)):
376
380
  value = Tensor(np.array(x).astype(np_dst_type), dtype=dst_type)
377
381
  else:
382
+ if x.dtype == mstype.bfloat16:
383
+ cpu_cast = Cast().set_device("CPU")
384
+ x = cpu_cast(x, mstype.float32)
378
385
  value = Tensor(x.asnumpy().astype(np_dst_type), dtype=dst_type)
379
386
  return value
380
387
 
@@ -5837,6 +5844,9 @@ class SpaceToBatchND(Primitive):
5837
5844
  ``Ascend`` ``GPU`` ``CPU``
5838
5845
 
5839
5846
  Examples:
5847
+ >>> import mindspore
5848
+ >>> from mindspore import Tensor, ops
5849
+ >>> import numpy as np
5840
5850
  >>> block_shape = [2, 2]
5841
5851
  >>> paddings = [[0, 0], [0, 0]]
5842
5852
  >>> space_to_batch_nd = ops.SpaceToBatchND(block_shape, paddings)
@@ -6279,8 +6289,8 @@ class Sort(Primitive):
6279
6289
  Sorts the elements of the input tensor along the given dimension in the specified order.
6280
6290
 
6281
6291
  .. warning::
6282
- Currently, the data types of Float16 is well supported.
6283
- Using Float32 might cause loss of accuracy.
6292
+ Currently, the data types of float16, uint8, int8, int16, int32, int64 are well supported.
6293
+ If use float32, it may cause loss of accuracy.
6284
6294
 
6285
6295
  Args:
6286
6296
  axis (int, optional): The dimension to sort along. Default: ``-1``, means the last dimension.
@@ -8719,7 +8729,8 @@ class TopK(Primitive):
8719
8729
  - GPU: float16, float32.
8720
8730
  - CPU: all numeric types.
8721
8731
 
8722
- - **k** (int) - The number of top elements to be computed along the last dimension, constant input is needed.
8732
+ - **k** (Union(Tensor, int)) - The number of top elements to be computed along the last dimension.
8733
+ If `k` is a Tensor, the supported dtype is int32 and it should be 0-D or 1-D with shape :math:`(1, )` .
8723
8734
 
8724
8735
  Outputs:
8725
8736
  A tuple consisting of `values` and `indexes`.
@@ -470,7 +470,7 @@ class Custom(ops.PrimitiveWithInfer):
470
470
  op_path_in_cache = [] # Save paths for op functions created in the cached.
471
471
  custom_aot_warning = True # Flag to enable warnings about custom aot path white list
472
472
 
473
- def __init__(self, func, out_shape=None, out_dtype=None, func_type=HYBRID_TYPE, bprop=None, reg_info=None):
473
+ def __init__(self, func, out_shape=None, out_dtype=None, func_type="hybrid", bprop=None, reg_info=None):
474
474
  super().__init__("Custom")
475
475
 
476
476
  self.supported_targets = [ASCEND, GPU, CPU]
@@ -472,7 +472,7 @@ class Print(Primitive):
472
472
 
473
473
  Examples:
474
474
  >>> import numpy as np
475
- >>> from mindspore import Tensor, nn
475
+ >>> from mindspore import Tensor, nn, ops
476
476
  >>> class PrintDemo(nn.Cell):
477
477
  ... def __init__(self):
478
478
  ... super(PrintDemo, self).__init__()
@@ -388,7 +388,7 @@ class NonMaxSuppressionV3(Primitive):
388
388
  single score associated with each box (i.e., each row of the `boxes` Tensor).
389
389
  It is required that the number of scores in `scores` must be equal to the number of boxes in `boxes`.
390
390
  The supported data type is float32.
391
- - **max_output_size** (Union[Tensor, Number.Int]) - A scalar integer Tensor representing the maximum
391
+ - **max_output_size** (Union[Tensor, Number.int]) - A scalar integer Tensor representing the maximum
392
392
  number of boxes to be selected by non max suppression. The supported data type is int32.
393
393
  - **iou_threshold** (Union[Tensor, Number.Float]) - A scalar float Tensor represents the threshold
394
394
  used for determining if the intersection over union (IOU) between boxes is too high.
@@ -459,7 +459,7 @@ class NonMaxSuppressionWithOverlaps(Primitive):
459
459
  single score associated with each box (i.e., each row of the `boxes` Tensor).
460
460
  It is required that the number of scores in `scores` must be equal to the number of boxes in `boxes`.
461
461
  The supported data type is float32.
462
- - **max_output_size** (Union[Tensor, Number.Int]) - A scalar integer Tensor representing the maximum
462
+ - **max_output_size** (Union[Tensor, Number.int]) - A scalar integer Tensor representing the maximum
463
463
  number of boxes to be selected by non max suppression, and max_output_size must be equal to or greater
464
464
  than 0.
465
465
  Types allowed:int32.
@@ -816,7 +816,7 @@ class ResizeBicubic(Primitive):
816
816
  Examples:
817
817
  >>> import mindspore
818
818
  >>> import numpy as np
819
- >>> from mindspore import Tensor, ops
819
+ >>> from mindspore import Tensor, ops, nn
820
820
  >>> class NetResizeBicubic(nn.Cell):
821
821
  ... def __init__(self):
822
822
  ... super(NetResizeBicubic, self).__init__()
@@ -642,6 +642,55 @@ class FusedAdaFactorWithGlobalNorm(FusedAdaFactor):
642
642
  return param_type
643
643
 
644
644
 
645
+ class GenerateEodMask(Primitive):
646
+ r"""
647
+ Given the input `inputs_ids`, if found eod_token_id, the output position and attention mask matrix will be reset.
648
+ This means the `position_id` will start counting from 0, and the corresponding mask matrix will be filled with 0.
649
+
650
+ Args:
651
+ eod_token_id (int) - In the NLP scenario, this value corresponds to the id of
652
+ the symbol of 'EodOfDocument' in the vocabulary.
653
+
654
+ Inputs:
655
+ - **inputs_ids** (Tensor) - token id, a 2-D Tensor with shape :math:`(batch\_size, seq\_length)`.
656
+
657
+ Outputs:
658
+ - **position_id** (Tensor) - position id matrix with same shape and type as original `inputs_ids`.
659
+ - **attention_mask** (Tensor) - attention mask matrix with type
660
+ float16 and shape :math:`(batch\_size, seq\_length)`.
661
+
662
+ Supported Platforms:
663
+ ``Ascend``
664
+
665
+ Examples:
666
+ >>> op = ops.GenerateEodMask(eod_token_id=0)
667
+ >>> position, mask = op(Tensor([[1, 0, 3], [1, 0, 0]], dtype=mindspore.int32))
668
+ >>> print(position)
669
+ [[0 1 0] [0 0 1]]
670
+ >>> print(mask)
671
+ [[[ 1. 0. 0.]
672
+ [1. 1. 0.]
673
+ [0. 0. 1.]]
674
+ [[1. 0. 0.]
675
+ [0. 1. 0.]
676
+ [0. 1. 1.]]]
677
+
678
+ Raises:
679
+ - **TypeError** - If `eod_token_id` is not int.
680
+ - **TypeError** - If `inputs_ids` is not int.
681
+ - **ValueError** - If `inputs_ids` is not a 2-D Tensor.
682
+ """
683
+ @prim_attr_register
684
+ def __init__(self, n_pos, eod_token_id, n_step, n_error_mode='specific'):
685
+ """Initialize GenerateEodMask"""
686
+ validator.check_value_type("eod_token_id", eod_token_id, [int], self.name)
687
+ validator.check_value_type("n_pos", n_pos, [int], self.name)
688
+ validator.check_value_type("n_step", n_step, [list], self.name)
689
+ validator.check_value_type("n_error_mode", n_error_mode, [str], self.name)
690
+ self.init_prim_io_names(inputs=['inputs_ids'],
691
+ outputs=['position_ids'])
692
+
693
+
645
694
  class ScaleGrad(PrimitiveWithInfer):
646
695
  """
647
696
  Scale the input grad according to the loss scale.
@@ -123,6 +123,64 @@ class _MathBinaryOp(_BinaryOp):
123
123
  real_shape = [dim if cmp_dim > 0 else cmp_dim for dim, cmp_dim in zip(shape_value, cmp_shape)]
124
124
  return tuple(real_shape)
125
125
 
126
+ class SilentCheck(Primitive):
127
+ """
128
+ Implement SilentCheck on `pre_val`, `min_val`, `max_val`, `result` and
129
+ update them inplace with given parameters.
130
+
131
+ Args:
132
+ c_min_steps (int): an int determines...
133
+
134
+ c_thresh_l1 (float): a float determines...
135
+
136
+ c_coeff_l1 (float): a float determines...
137
+
138
+ c_thresh_l2 (float): a float determines...
139
+
140
+ c_coeff_l2 (float): a float determines...
141
+
142
+ Inputs:
143
+ - **val** (Tensor) - Tensor with dtype float32.
144
+ - **input_grad** (Parameter) - Tensor with dtype float32.
145
+ - **pre_val** (Parameter) - Input Parameter with dtype float32.
146
+ - **min_val** (Parameter) - Input Parameter with dtype float32.
147
+ - **max_val** (Parameter) - Input Parameter with dtype float32.
148
+ - **val_counter** (Parameter) - Input Parameter with dtype int32.
149
+
150
+ Outputs:
151
+ Tuple of 5 Tensors, the updated parameters.
152
+ - **input_grad** (Tensor) - Tensor with dtype float32.
153
+ - **pre_val** (Tensor) - Tensor with dtype float32.
154
+ - **min_val** (Tensor) - Tensor with dtype float32.
155
+ - **max_val** (Tensor) - Tensor with dtype float32.
156
+ - **result** (Tensor) - Tensor with dtype int32.
157
+
158
+ Raises:
159
+ TypeError: If `val` is not Tensor with dtype float32.
160
+ TypeError: If `result` is not Tensor with dtype int32.
161
+ TypeError: If `pre_val`, `min_val`, `max_val`, `input_grad` are not all Parameter type with dtype float32.
162
+ TypeError: If `c_thresh_l1` or `c_coeff_l1` is not a float number.
163
+ TypeError: If `c_min_steps` is not an int number.
164
+
165
+ Supported Platforms:
166
+ ``Ascend``
167
+
168
+ Examples:
169
+ >>> from mindspore.ops.operations.math_ops import SilentCheck
170
+ >>> silent_check = SilentCheck()
171
+ xxx
172
+ """
173
+
174
+ @prim_attr_register
175
+ def __init__(self, c_min_steps, c_thresh_l1, c_coeff_l1, c_thresh_l2, c_coeff_l2):
176
+ """Initialize SilentCheck."""
177
+ validator.check_value_type("c_min_steps", c_min_steps, [int], self.name)
178
+ validator.check_value_type("c_thresh_l1", c_thresh_l1, [float], self.name)
179
+ validator.check_value_type("c_coeff_l1", c_coeff_l1, [float], self.name)
180
+ validator.check_value_type("c_thresh_l2", c_thresh_l2, [float], self.name)
181
+ validator.check_value_type("c_coeff_l2", c_coeff_l2, [float], self.name)
182
+ self.add_prim_attr('side_effect_mem', True)
183
+
126
184
 
127
185
  class _BitwiseBinaryOp(_MathBinaryOp):
128
186
  """
@@ -462,6 +520,7 @@ class AssignAdd(Primitive):
462
520
  >>> import mindspore
463
521
  >>> import numpy as np
464
522
  >>> from mindspore import Tensor, ops, nn
523
+ >>> from mindspore.common.initializer import initializer
465
524
  >>> class Net(nn.Cell):
466
525
  ... def __init__(self):
467
526
  ... super(Net, self).__init__()
@@ -512,6 +571,7 @@ class AssignSub(Primitive):
512
571
  >>> import mindspore
513
572
  >>> import numpy as np
514
573
  >>> from mindspore import Tensor, ops, nn
574
+ >>> from mindspore.common.initializer import initializer
515
575
  >>> class Net(nn.Cell):
516
576
  ... def __init__(self):
517
577
  ... super(Net, self).__init__()
@@ -7253,6 +7313,7 @@ class Igamma(Primitive):
7253
7313
 
7254
7314
  Examples:
7255
7315
  >>> import numpy as np
7316
+ >>> import mindspore
7256
7317
  >>> from mindspore import Tensor, ops
7257
7318
  >>> a = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32))
7258
7319
  >>> x = Tensor(np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32))
@@ -7291,6 +7352,7 @@ class Igammac(Primitive):
7291
7352
  ``Ascend`` ``GPU`` ``CPU``
7292
7353
 
7293
7354
  Examples:
7355
+ >>> import mindspore
7294
7356
  >>> import numpy as np
7295
7357
  >>> from mindspore import Tensor, ops
7296
7358
  >>> a = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32))
@@ -3777,7 +3777,7 @@ class LayerNorm(Primitive):
3777
3777
  - **output_x** (Tensor) - The normalized input, has the same type and shape as the `input_x`.
3778
3778
  - **mean** (Tensor) - The first `begin_norm_axis` dimensions of `mean` shape is the same as `input_x`,
3779
3779
  and the remaining dimensions are 1. Suppose the shape of the `input_x` is :math:`(x_1, x_2, \ldots, x_R)`,
3780
- the shape of the `mean` is :math:`(x_1, \ldots, x_{begin_params_axis}, 1, \ldots, 1)`
3780
+ the shape of the `mean` is :math:`(x_1, \ldots, x_{begin\_params\_axis}, 1, \ldots, 1)`
3781
3781
  (when `begin_params_axis=0`, the shape of `mean` is :math:`(1, \ldots, 1)` ).
3782
3782
  - **variance** (Tensor) - Shape is the same as `mean` .
3783
3783
 
@@ -4917,6 +4917,7 @@ class Adam(Primitive):
4917
4917
  >>> import mindspore
4918
4918
  >>> import numpy as np
4919
4919
  >>> from mindspore import Tensor, nn, ops
4920
+ >>> from mindspore import Parameter
4920
4921
  >>> class Net(nn.Cell):
4921
4922
  ... def __init__(self):
4922
4923
  ... super(Net, self).__init__()
@@ -9991,6 +9992,9 @@ class FractionalMaxPool3DWithFixedKsize(Primitive):
9991
9992
  ``Ascend`` ``GPU`` ``CPU``
9992
9993
 
9993
9994
  Examples:
9995
+ >>> import numpy as np
9996
+ >>> from mindspore import Tensor, ops
9997
+ >>> from mindspore import dtype as mstype
9994
9998
  >>> x = Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])
9995
9999
  ... .reshape([1, 1, 2, 2, 4]), mstype.float32)
9996
10000
  >>> random_samples = Tensor(np.array([0.7, 0.7, 0.7]).reshape([1, 1, 3]), mstype.float32)
@@ -11363,7 +11367,7 @@ class PromptFlashAttention(Primitive):
11363
11367
  For each element, 0 indicates retention and 1 indicates discard. Input tensor of shape :math:`(B, 1, S, S)`.
11364
11368
  - **actual_seq_lengths** (Tensor): Describe actual sequence length of each input with data type of int.
11365
11369
  - **actual_seq_lengths_kv** (Tensor): Describe actual sequence length of each input with data type of int.
11366
- - **padding_mask** (Tensor) - The padding mask tensor with data type of float16 or float32
11370
+ - **pse_shift** (Tensor) - The position encoding tensor with data type of float16 or float32.
11367
11371
  - **dep_scale1** (Tensor)
11368
11372
  - **quant_scale1** (Tensor)
11369
11373
  - **deq_scale2** (Tensor)
@@ -11406,7 +11410,7 @@ class PromptFlashAttention(Primitive):
11406
11410
  validator.check_value_type('num_key_value_heads', num_key_value_heads, [int], self.name)
11407
11411
  validator.check_value_type('sparse_mode', sparse_mode, [int], self.name)
11408
11412
  self.init_prim_io_names(inputs=["query", "key", "value", "attn_mask", "actual_seq_lengths",
11409
- "actual_seq_lengths_kv", "padding_mask", "deq_scale1", "quant_scale1",
11413
+ "actual_seq_lengths_kv", "pse_shift", "deq_scale1", "quant_scale1",
11410
11414
  "deq_scale2", "quant_scale2", "quant_offset2"],
11411
11415
  outputs=["attention_out"])
11412
11416
 
@@ -1271,6 +1271,8 @@ class RandpermV2(Primitive):
1271
1271
  ``Ascend`` ``CPU``
1272
1272
 
1273
1273
  Examples:
1274
+ >>> from mindspore import Tensor, ops
1275
+ >>> from mindspore import dtype as mstype
1274
1276
  >>> n = Tensor([4], mstype.int64)
1275
1277
  >>> seed = 0
1276
1278
  >>> offset = 0