mindspore 2.2.11__cp37-none-any.whl → 2.2.14__cp37-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +2 -1
- mindspore/_akg/akg/topi/cpp/impl.py +1 -1
- mindspore/_akg/akg/tvm/_ffi/base.py +1 -1
- mindspore/_c_dataengine.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/_c_expression.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/_c_mindrecord.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/_mindspore_offline_debug.cpython-37m-aarch64-linux-gnu.so +0 -0
- mindspore/bin/cache_admin +0 -0
- mindspore/bin/cache_server +0 -0
- mindspore/common/tensor.py +0 -2
- mindspore/communication/management.py +3 -0
- mindspore/context.py +34 -4
- mindspore/dataset/engine/datasets.py +23 -0
- mindspore/dataset/engine/validators.py +1 -1
- mindspore/dataset/vision/py_transforms_util.py +2 -2
- mindspore/experimental/optim/lr_scheduler.py +5 -6
- mindspore/lib/libdnnl.so.2 +0 -0
- mindspore/lib/libmindspore.so +0 -0
- mindspore/lib/libmindspore_backend.so +0 -0
- mindspore/lib/libmindspore_common.so +0 -0
- mindspore/lib/libmindspore_core.so +0 -0
- mindspore/lib/libmindspore_glog.so.0 +0 -0
- mindspore/lib/libmindspore_gpr.so.15 +0 -0
- mindspore/lib/libmindspore_grpc.so.15 +0 -0
- mindspore/lib/libmindspore_shared_lib.so +0 -0
- mindspore/lib/libopencv_core.so.4.5 +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +48 -0
- mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
- mindspore/lib/plugin/ascend/libakg.so +0 -0
- mindspore/lib/plugin/ascend/libascend_collective.so +0 -0
- mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_aicpu_kernels.so +0 -0
- mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
- mindspore/lib/plugin/libmindspore_ascend.so.1 +0 -0
- mindspore/mindrecord/tools/cifar100_to_mr.py +49 -57
- mindspore/mindrecord/tools/cifar10_to_mr.py +46 -55
- mindspore/mindrecord/tools/csv_to_mr.py +3 -8
- mindspore/mindrecord/tools/mnist_to_mr.py +4 -9
- mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -4
- mindspore/nn/layer/activation.py +1 -1
- mindspore/nn/layer/embedding.py +2 -2
- mindspore/nn/loss/loss.py +1 -1
- mindspore/nn/optim/ada_grad.py +2 -2
- mindspore/nn/optim/sgd.py +3 -2
- mindspore/numpy/math_ops.py +1 -1
- mindspore/ops/__init__.py +3 -0
- mindspore/ops/_grad_experimental/grad_array_ops.py +0 -31
- mindspore/ops/_grad_experimental/grad_comm_ops.py +4 -2
- mindspore/ops/_grad_experimental/grad_inner_ops.py +8 -0
- mindspore/ops/_grad_experimental/grad_math_ops.py +37 -17
- mindspore/ops/_op_impl/aicpu/__init__.py +1 -0
- mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +38 -0
- mindspore/ops/function/array_func.py +6 -5
- mindspore/ops/function/debug_func.py +1 -1
- mindspore/ops/function/linalg_func.py +21 -11
- mindspore/ops/function/math_func.py +3 -0
- mindspore/ops/function/nn_func.py +13 -11
- mindspore/ops/function/parameter_func.py +2 -0
- mindspore/ops/function/sparse_unary_func.py +2 -2
- mindspore/ops/function/vmap_func.py +1 -0
- mindspore/ops/operations/_embedding_cache_ops.py +1 -1
- mindspore/ops/operations/_inner_ops.py +56 -1
- mindspore/ops/operations/_quant_ops.py +4 -4
- mindspore/ops/operations/_rl_inner_ops.py +1 -1
- mindspore/ops/operations/array_ops.py +15 -4
- mindspore/ops/operations/custom_ops.py +1 -1
- mindspore/ops/operations/debug_ops.py +1 -1
- mindspore/ops/operations/image_ops.py +3 -3
- mindspore/ops/operations/inner_ops.py +49 -0
- mindspore/ops/operations/math_ops.py +62 -0
- mindspore/ops/operations/nn_ops.py +7 -3
- mindspore/ops/operations/random_ops.py +2 -0
- mindspore/ops/operations/sparse_ops.py +4 -4
- mindspore/ops/silent_check.py +162 -0
- mindspore/parallel/__init__.py +3 -2
- mindspore/parallel/_auto_parallel_context.py +82 -3
- mindspore/parallel/_parallel_serialization.py +34 -2
- mindspore/parallel/_tensor.py +3 -1
- mindspore/parallel/_transformer/transformer.py +8 -8
- mindspore/parallel/checkpoint_transform.py +191 -45
- mindspore/profiler/parser/ascend_cluster_generator.py +111 -0
- mindspore/profiler/parser/ascend_communicate_generator.py +315 -0
- mindspore/profiler/parser/ascend_flops_generator.py +8 -2
- mindspore/profiler/parser/ascend_fpbp_generator.py +8 -2
- mindspore/profiler/parser/ascend_hccl_generator.py +2 -2
- mindspore/profiler/parser/ascend_msprof_exporter.py +30 -6
- mindspore/profiler/parser/ascend_msprof_generator.py +16 -5
- mindspore/profiler/parser/ascend_op_generator.py +15 -7
- mindspore/profiler/parser/ascend_timeline_generator.py +5 -2
- mindspore/profiler/parser/base_timeline_generator.py +11 -3
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +2 -1
- mindspore/profiler/parser/framework_parser.py +8 -2
- mindspore/profiler/parser/memory_usage_parser.py +8 -2
- mindspore/profiler/parser/minddata_analyzer.py +8 -2
- mindspore/profiler/parser/minddata_parser.py +1 -1
- mindspore/profiler/parser/msadvisor_analyzer.py +4 -2
- mindspore/profiler/parser/msadvisor_parser.py +9 -3
- mindspore/profiler/profiling.py +97 -25
- mindspore/rewrite/api/node.py +1 -1
- mindspore/rewrite/api/symbol_tree.py +2 -2
- mindspore/train/callback/_checkpoint.py +8 -8
- mindspore/train/callback/_landscape.py +2 -3
- mindspore/train/callback/_summary_collector.py +6 -7
- mindspore/train/dataset_helper.py +6 -0
- mindspore/train/model.py +17 -5
- mindspore/train/serialization.py +6 -1
- mindspore/train/summary/_writer_pool.py +1 -1
- mindspore/train/summary/summary_record.py +5 -6
- mindspore/version.py +1 -1
- {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/METADATA +1 -1
- {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/RECORD +117 -114
- mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
- {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/WHEEL +0 -0
- {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/entry_points.txt +0 -0
- {mindspore-2.2.11.dist-info → mindspore-2.2.14.dist-info}/top_level.txt +0 -0
|
@@ -99,6 +99,7 @@ def assign_sub(variable, value):
|
|
|
99
99
|
>>> import mindspore
|
|
100
100
|
>>> import numpy as np
|
|
101
101
|
>>> from mindspore import Tensor, ops
|
|
102
|
+
>>> from mindspore.common.initializer import initializer
|
|
102
103
|
>>> variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step")
|
|
103
104
|
>>> value = Tensor(np.ones([1]).astype(np.int32) * 100)
|
|
104
105
|
>>> ops.assign_sub(variable, value)
|
|
@@ -149,6 +150,7 @@ def assign_add(variable, value):
|
|
|
149
150
|
>>> import mindspore
|
|
150
151
|
>>> import numpy as np
|
|
151
152
|
>>> from mindspore import Tensor, ops
|
|
153
|
+
>>> from mindspore.common.initializer import initializer
|
|
152
154
|
>>> variable = mindspore.Parameter(initializer(1, [1], mindspore.int32), name="global_step")
|
|
153
155
|
>>> value = Tensor(np.ones([1]).astype(np.int32) * 100)
|
|
154
156
|
>>> ops.assign_add(variable, value)
|
|
@@ -28,7 +28,7 @@ def csr_cos(x: CSRTensor) -> CSRTensor:
|
|
|
28
28
|
out_i = \cos(x_i)
|
|
29
29
|
|
|
30
30
|
.. warning::
|
|
31
|
-
Currently support data types float16 and float32. If use
|
|
31
|
+
Currently support data types float16 and float32. If use float64, there may be a problem of missing precision.
|
|
32
32
|
|
|
33
33
|
Args:
|
|
34
34
|
x (CSRTensor): Input CSRTensor.
|
|
@@ -69,7 +69,7 @@ def coo_cos(x: COOTensor) -> COOTensor:
|
|
|
69
69
|
out_i = \cos(x_i)
|
|
70
70
|
|
|
71
71
|
.. warning::
|
|
72
|
-
If use
|
|
72
|
+
If use float64, there may be a problem of missing precision.
|
|
73
73
|
|
|
74
74
|
Args:
|
|
75
75
|
x (COOTensor): Input COOTensor.
|
|
@@ -65,7 +65,7 @@ class SubAndFilter(PrimitiveWithCheck):
|
|
|
65
65
|
|
|
66
66
|
Inputs:
|
|
67
67
|
- **input_x** (Tensor) - Input tensor.
|
|
68
|
-
- **max_num** (
|
|
68
|
+
- **max_num** (int) - The max value of element that after sub `offset`.
|
|
69
69
|
- **offset** (int) - Specifies the offset value of this `input_x`.
|
|
70
70
|
|
|
71
71
|
Outputs:
|
|
@@ -16,6 +16,7 @@
|
|
|
16
16
|
"""Inner operators."""
|
|
17
17
|
from types import FunctionType, MethodType
|
|
18
18
|
from collections.abc import Iterable
|
|
19
|
+
import os
|
|
19
20
|
import numpy as np
|
|
20
21
|
|
|
21
22
|
from mindspore.common import Tensor
|
|
@@ -1008,7 +1009,7 @@ class Centralization(PrimitiveWithInfer):
|
|
|
1008
1009
|
|
|
1009
1010
|
Inputs:
|
|
1010
1011
|
- **input_x** (Tensor) - The input tensor. The data type mast be float16 or float32.
|
|
1011
|
-
- **axis** (Union[
|
|
1012
|
+
- **axis** (Union[int, Tuple(int), List(int)]) - The dimensions to reduce. Default: (), reduce all dimensions.
|
|
1012
1013
|
Only constant value is allowed. Must be in the range [-rank(input_x), rank(input_x)).
|
|
1013
1014
|
|
|
1014
1015
|
Outputs:
|
|
@@ -2858,6 +2859,60 @@ class DecoderKVCache(Primitive):
|
|
|
2858
2859
|
self.add_prim_attr('side_effect_mem', True)
|
|
2859
2860
|
|
|
2860
2861
|
|
|
2862
|
+
class _MirrorSilentCheck(PrimitiveWithInfer):
|
|
2863
|
+
"""
|
|
2864
|
+
The operator _MirrorSilentCheck implements accuracy-sensitive detection on the tensor input in backpropagator.
|
|
2865
|
+
Call _MirrorSilentCheck in method __call__ of derived class to implement accuracy-sensitive detection.
|
|
2866
|
+
|
|
2867
|
+
Inputs:
|
|
2868
|
+
- **input** (Tensor) : The tensor used for detection.
|
|
2869
|
+
Its data type must be mindspore.float16, mindspore.float32 or mindspore.bfloat16.
|
|
2870
|
+
- **pre_val** (Parameter(Tensor)) : Support parameter in accuracy-sensitive detection.
|
|
2871
|
+
Please only generated by method generate_params() of ASDBase.
|
|
2872
|
+
- **min_val** (Parameter(Tensor)) : Support parameter in accuracy-sensitive detection.
|
|
2873
|
+
Please only generated by method generate_params() of ASDBase.
|
|
2874
|
+
- **max_val** (Parameter(Tensor)) : Support parameter in accuracy-sensitive detection.
|
|
2875
|
+
Please only generated by method generate_params() of ASDBase.
|
|
2876
|
+
- **cnt** (Parameter(Tensor)) : Support parameter in accuracy-sensitive detection.
|
|
2877
|
+
Please only generated by method generate_params() of ASDBase.
|
|
2878
|
+
After each invocation of _MirrorSilentCheck, increment the value of cnt by one.
|
|
2879
|
+
|
|
2880
|
+
Outputs:
|
|
2881
|
+
- **output** (Tensor) - Same shape, type and value as `input`.
|
|
2882
|
+
"""
|
|
2883
|
+
@prim_attr_register
|
|
2884
|
+
def __init__(self, min_steps=8):
|
|
2885
|
+
upper_thresh, sigma_thresh = self.get_thresh()
|
|
2886
|
+
self.min_steps = min_steps
|
|
2887
|
+
self.thresh_l1 = upper_thresh[0]
|
|
2888
|
+
self.coeff_l1 = sigma_thresh[0]
|
|
2889
|
+
self.thresh_l2 = upper_thresh[1]
|
|
2890
|
+
self.coeff_l2 = sigma_thresh[1]
|
|
2891
|
+
self.add_prim_attr('side_effect_mem', True)
|
|
2892
|
+
|
|
2893
|
+
def parse_thresh(self, env_var_name, default_value, min_value):
|
|
2894
|
+
env_var = os.environ.get(env_var_name, default=default_value)
|
|
2895
|
+
thresh = [value.strip() for value in env_var.split(",")]
|
|
2896
|
+
if len(thresh) != 2 or not all(value.isdigit() for value in thresh):
|
|
2897
|
+
thresh = default_value.split(",")
|
|
2898
|
+
thresh = [float(max(int(value), min_value)) for value in thresh]
|
|
2899
|
+
if thresh[0] <= thresh[1]:
|
|
2900
|
+
thresh = [float(value) for value in default_value.split(",")]
|
|
2901
|
+
|
|
2902
|
+
return thresh
|
|
2903
|
+
|
|
2904
|
+
def get_thresh(self):
|
|
2905
|
+
upper_thresh = self.parse_thresh("NPU_ASD_UPPER_THRESH", "1000000,10000", 3)
|
|
2906
|
+
sigma_thresh = self.parse_thresh("NPU_ASD_SIGMA_THRESH", "100000,5000", 3)
|
|
2907
|
+
return upper_thresh, sigma_thresh
|
|
2908
|
+
|
|
2909
|
+
def infer_shape(self, x_shape, pre_shape, min_shape, max_shape, n_step, loss_scale_shape):
|
|
2910
|
+
return x_shape
|
|
2911
|
+
|
|
2912
|
+
def infer_dtype(self, x_dtype, pre_dtype, min_dtype, max_dtype, n_dtype, loss_scale_dtype):
|
|
2913
|
+
return x_dtype
|
|
2914
|
+
|
|
2915
|
+
|
|
2861
2916
|
class PromptKVCache(Primitive):
|
|
2862
2917
|
r"""
|
|
2863
2918
|
The PromptKVCache is used for prefill the KVCache of transformer network.
|
|
@@ -579,7 +579,7 @@ class FakeQuantWithMinMaxVars(PrimitiveWithInfer):
|
|
|
579
579
|
range is [1, 2^num_bits-1]. Default: ``False``.
|
|
580
580
|
|
|
581
581
|
Inputs:
|
|
582
|
-
- **x** (Tensor) -
|
|
582
|
+
- **x** (Tensor) - float32 tensor representing the shape of the output tensor.
|
|
583
583
|
- **min** (Tensor) - Value of the min range of the input data x.
|
|
584
584
|
- **max** (Tensor) - Value of the max range of the input data x.
|
|
585
585
|
|
|
@@ -638,7 +638,7 @@ class FakeQuantWithMinMaxVarsGradient(PrimitiveWithInfer):
|
|
|
638
638
|
|
|
639
639
|
Inputs:
|
|
640
640
|
- **gradients** (Tensor) - The gradient above the FakeQuantWithMinMaxVars.
|
|
641
|
-
- **x** (Tensor) -
|
|
641
|
+
- **x** (Tensor) - float32 tensor representing the shape of the output tensor.
|
|
642
642
|
- **min** (Tensor) - Value of the min range of the input data x.
|
|
643
643
|
- **max** (Tensor) - Value of the max range of the input data x.
|
|
644
644
|
|
|
@@ -702,7 +702,7 @@ class FakeQuantWithMinMaxVarsPerChannel(PrimitiveWithInfer):
|
|
|
702
702
|
range is [1, 2^num_bits-1]. Default: ``False``.
|
|
703
703
|
|
|
704
704
|
Inputs:
|
|
705
|
-
- **x** (Tensor) -
|
|
705
|
+
- **x** (Tensor) - float32 tensor representing the shape of the output tensor.
|
|
706
706
|
- **min** (Tensor) - Value of the min range of the input data x.
|
|
707
707
|
- **max** (Tensor) - Value of the max range of the input data x.
|
|
708
708
|
|
|
@@ -754,7 +754,7 @@ class FakeQuantWithMinMaxVarsPerChannelGradient(PrimitiveWithInfer):
|
|
|
754
754
|
|
|
755
755
|
Inputs:
|
|
756
756
|
- **gradients** (Tensor) - The gradient above the FakeQuantWithMinMaxVars.
|
|
757
|
-
- **x** (Tensor) -
|
|
757
|
+
- **x** (Tensor) - float32 tensor representing the shape of the output tensor.
|
|
758
758
|
- **min** (Tensor) - Value of the min range of the input data x.
|
|
759
759
|
- **max** (Tensor) - Value of the max range of the input data x.
|
|
760
760
|
|
|
@@ -341,7 +341,7 @@ class LSTMV2(Primitive):
|
|
|
341
341
|
- **h** (Tensor) - Tensor of shape (num_directions * `num_layers`, batch_size, `hidden_size`).
|
|
342
342
|
- **c** (Tensor) - Tensor of shape (num_directions * `num_layers`, batch_size, `hidden_size`).
|
|
343
343
|
- **w** (Tensor) - The input tensor which states for weights.
|
|
344
|
-
- **seq_lengths** (Tensor) - The Tensor[
|
|
344
|
+
- **seq_lengths** (Tensor) - The Tensor[int32] of shape (batch_size, ),
|
|
345
345
|
indicates the seq_length of each batch dim.
|
|
346
346
|
|
|
347
347
|
Outputs:
|
|
@@ -227,7 +227,11 @@ class ExpandDims(PrimitiveWithCheck):
|
|
|
227
227
|
def infer_value(self, input_x, axis):
|
|
228
228
|
value = None
|
|
229
229
|
if input_x is not None and axis is not None:
|
|
230
|
-
|
|
230
|
+
dtype = input_x.dtype
|
|
231
|
+
if input_x.dtype == mstype.bfloat16:
|
|
232
|
+
cpu_cast = Cast().set_device("CPU")
|
|
233
|
+
input_x = cpu_cast(input_x, mstype.float32)
|
|
234
|
+
value = Tensor(np.expand_dims(input_x.asnumpy(), axis), dtype)
|
|
231
235
|
return value
|
|
232
236
|
|
|
233
237
|
|
|
@@ -375,6 +379,9 @@ class Cast(PrimitiveWithCheck):
|
|
|
375
379
|
if isinstance(x, (int, float)):
|
|
376
380
|
value = Tensor(np.array(x).astype(np_dst_type), dtype=dst_type)
|
|
377
381
|
else:
|
|
382
|
+
if x.dtype == mstype.bfloat16:
|
|
383
|
+
cpu_cast = Cast().set_device("CPU")
|
|
384
|
+
x = cpu_cast(x, mstype.float32)
|
|
378
385
|
value = Tensor(x.asnumpy().astype(np_dst_type), dtype=dst_type)
|
|
379
386
|
return value
|
|
380
387
|
|
|
@@ -5837,6 +5844,9 @@ class SpaceToBatchND(Primitive):
|
|
|
5837
5844
|
``Ascend`` ``GPU`` ``CPU``
|
|
5838
5845
|
|
|
5839
5846
|
Examples:
|
|
5847
|
+
>>> import mindspore
|
|
5848
|
+
>>> from mindspore import Tensor, ops
|
|
5849
|
+
>>> import numpy as np
|
|
5840
5850
|
>>> block_shape = [2, 2]
|
|
5841
5851
|
>>> paddings = [[0, 0], [0, 0]]
|
|
5842
5852
|
>>> space_to_batch_nd = ops.SpaceToBatchND(block_shape, paddings)
|
|
@@ -6279,8 +6289,8 @@ class Sort(Primitive):
|
|
|
6279
6289
|
Sorts the elements of the input tensor along the given dimension in the specified order.
|
|
6280
6290
|
|
|
6281
6291
|
.. warning::
|
|
6282
|
-
Currently, the data types of
|
|
6283
|
-
|
|
6292
|
+
Currently, the data types of float16, uint8, int8, int16, int32, int64 are well supported.
|
|
6293
|
+
If use float32, it may cause loss of accuracy.
|
|
6284
6294
|
|
|
6285
6295
|
Args:
|
|
6286
6296
|
axis (int, optional): The dimension to sort along. Default: ``-1``, means the last dimension.
|
|
@@ -8719,7 +8729,8 @@ class TopK(Primitive):
|
|
|
8719
8729
|
- GPU: float16, float32.
|
|
8720
8730
|
- CPU: all numeric types.
|
|
8721
8731
|
|
|
8722
|
-
- **k** (int) - The number of top elements to be computed along the last dimension
|
|
8732
|
+
- **k** (Union(Tensor, int)) - The number of top elements to be computed along the last dimension.
|
|
8733
|
+
If `k` is a Tensor, the supported dtype is int32 and it should be 0-D or 1-D with shape :math:`(1, )` .
|
|
8723
8734
|
|
|
8724
8735
|
Outputs:
|
|
8725
8736
|
A tuple consisting of `values` and `indexes`.
|
|
@@ -470,7 +470,7 @@ class Custom(ops.PrimitiveWithInfer):
|
|
|
470
470
|
op_path_in_cache = [] # Save paths for op functions created in the cached.
|
|
471
471
|
custom_aot_warning = True # Flag to enable warnings about custom aot path white list
|
|
472
472
|
|
|
473
|
-
def __init__(self, func, out_shape=None, out_dtype=None, func_type=
|
|
473
|
+
def __init__(self, func, out_shape=None, out_dtype=None, func_type="hybrid", bprop=None, reg_info=None):
|
|
474
474
|
super().__init__("Custom")
|
|
475
475
|
|
|
476
476
|
self.supported_targets = [ASCEND, GPU, CPU]
|
|
@@ -472,7 +472,7 @@ class Print(Primitive):
|
|
|
472
472
|
|
|
473
473
|
Examples:
|
|
474
474
|
>>> import numpy as np
|
|
475
|
-
>>> from mindspore import Tensor, nn
|
|
475
|
+
>>> from mindspore import Tensor, nn, ops
|
|
476
476
|
>>> class PrintDemo(nn.Cell):
|
|
477
477
|
... def __init__(self):
|
|
478
478
|
... super(PrintDemo, self).__init__()
|
|
@@ -388,7 +388,7 @@ class NonMaxSuppressionV3(Primitive):
|
|
|
388
388
|
single score associated with each box (i.e., each row of the `boxes` Tensor).
|
|
389
389
|
It is required that the number of scores in `scores` must be equal to the number of boxes in `boxes`.
|
|
390
390
|
The supported data type is float32.
|
|
391
|
-
- **max_output_size** (Union[Tensor, Number.
|
|
391
|
+
- **max_output_size** (Union[Tensor, Number.int]) - A scalar integer Tensor representing the maximum
|
|
392
392
|
number of boxes to be selected by non max suppression. The supported data type is int32.
|
|
393
393
|
- **iou_threshold** (Union[Tensor, Number.Float]) - A scalar float Tensor represents the threshold
|
|
394
394
|
used for determining if the intersection over union (IOU) between boxes is too high.
|
|
@@ -459,7 +459,7 @@ class NonMaxSuppressionWithOverlaps(Primitive):
|
|
|
459
459
|
single score associated with each box (i.e., each row of the `boxes` Tensor).
|
|
460
460
|
It is required that the number of scores in `scores` must be equal to the number of boxes in `boxes`.
|
|
461
461
|
The supported data type is float32.
|
|
462
|
-
- **max_output_size** (Union[Tensor, Number.
|
|
462
|
+
- **max_output_size** (Union[Tensor, Number.int]) - A scalar integer Tensor representing the maximum
|
|
463
463
|
number of boxes to be selected by non max suppression, and max_output_size must be equal to or greater
|
|
464
464
|
than 0.
|
|
465
465
|
Types allowed:int32.
|
|
@@ -816,7 +816,7 @@ class ResizeBicubic(Primitive):
|
|
|
816
816
|
Examples:
|
|
817
817
|
>>> import mindspore
|
|
818
818
|
>>> import numpy as np
|
|
819
|
-
>>> from mindspore import Tensor, ops
|
|
819
|
+
>>> from mindspore import Tensor, ops, nn
|
|
820
820
|
>>> class NetResizeBicubic(nn.Cell):
|
|
821
821
|
... def __init__(self):
|
|
822
822
|
... super(NetResizeBicubic, self).__init__()
|
|
@@ -642,6 +642,55 @@ class FusedAdaFactorWithGlobalNorm(FusedAdaFactor):
|
|
|
642
642
|
return param_type
|
|
643
643
|
|
|
644
644
|
|
|
645
|
+
class GenerateEodMask(Primitive):
|
|
646
|
+
r"""
|
|
647
|
+
Given the input `inputs_ids`, if found eod_token_id, the output position and attention mask matrix will be reset.
|
|
648
|
+
This means the `position_id` will start counting from 0, and the corresponding mask matrix will be filled with 0.
|
|
649
|
+
|
|
650
|
+
Args:
|
|
651
|
+
eod_token_id (int) - In the NLP scenario, this value corresponds to the id of
|
|
652
|
+
the symbol of 'EodOfDocument' in the vocabulary.
|
|
653
|
+
|
|
654
|
+
Inputs:
|
|
655
|
+
- **inputs_ids** (Tensor) - token id, a 2-D Tensor with shape :math:`(batch\_size, seq\_length)`.
|
|
656
|
+
|
|
657
|
+
Outputs:
|
|
658
|
+
- **position_id** (Tensor) - position id matrix with same shape and type as original `inputs_ids`.
|
|
659
|
+
- **attention_mask** (Tensor) - attention mask matrix with type
|
|
660
|
+
float16 and shape :math:`(batch\_size, seq\_length)`.
|
|
661
|
+
|
|
662
|
+
Supported Platforms:
|
|
663
|
+
``Ascend``
|
|
664
|
+
|
|
665
|
+
Examples:
|
|
666
|
+
>>> op = ops.GenerateEodMask(eod_token_id=0)
|
|
667
|
+
>>> position, mask = op(Tensor([[1, 0, 3], [1, 0, 0]], dtype=mindspore.int32))
|
|
668
|
+
>>> print(position)
|
|
669
|
+
[[0 1 0] [0 0 1]]
|
|
670
|
+
>>> print(mask)
|
|
671
|
+
[[[ 1. 0. 0.]
|
|
672
|
+
[1. 1. 0.]
|
|
673
|
+
[0. 0. 1.]]
|
|
674
|
+
[[1. 0. 0.]
|
|
675
|
+
[0. 1. 0.]
|
|
676
|
+
[0. 1. 1.]]]
|
|
677
|
+
|
|
678
|
+
Raises:
|
|
679
|
+
- **TypeError** - If `eod_token_id` is not int.
|
|
680
|
+
- **TypeError** - If `inputs_ids` is not int.
|
|
681
|
+
- **ValueError** - If `inputs_ids` is not a 2-D Tensor.
|
|
682
|
+
"""
|
|
683
|
+
@prim_attr_register
|
|
684
|
+
def __init__(self, n_pos, eod_token_id, n_step, n_error_mode='specific'):
|
|
685
|
+
"""Initialize GenerateEodMask"""
|
|
686
|
+
validator.check_value_type("eod_token_id", eod_token_id, [int], self.name)
|
|
687
|
+
validator.check_value_type("n_pos", n_pos, [int], self.name)
|
|
688
|
+
validator.check_value_type("n_step", n_step, [list], self.name)
|
|
689
|
+
validator.check_value_type("n_error_mode", n_error_mode, [str], self.name)
|
|
690
|
+
self.init_prim_io_names(inputs=['inputs_ids'],
|
|
691
|
+
outputs=['position_ids'])
|
|
692
|
+
|
|
693
|
+
|
|
645
694
|
class ScaleGrad(PrimitiveWithInfer):
|
|
646
695
|
"""
|
|
647
696
|
Scale the input grad according to the loss scale.
|
|
@@ -123,6 +123,64 @@ class _MathBinaryOp(_BinaryOp):
|
|
|
123
123
|
real_shape = [dim if cmp_dim > 0 else cmp_dim for dim, cmp_dim in zip(shape_value, cmp_shape)]
|
|
124
124
|
return tuple(real_shape)
|
|
125
125
|
|
|
126
|
+
class SilentCheck(Primitive):
|
|
127
|
+
"""
|
|
128
|
+
Implement SilentCheck on `pre_val`, `min_val`, `max_val`, `result` and
|
|
129
|
+
update them inplace with given parameters.
|
|
130
|
+
|
|
131
|
+
Args:
|
|
132
|
+
c_min_steps (int): an int determines...
|
|
133
|
+
|
|
134
|
+
c_thresh_l1 (float): a float determines...
|
|
135
|
+
|
|
136
|
+
c_coeff_l1 (float): a float determines...
|
|
137
|
+
|
|
138
|
+
c_thresh_l2 (float): a float determines...
|
|
139
|
+
|
|
140
|
+
c_coeff_l2 (float): a float determines...
|
|
141
|
+
|
|
142
|
+
Inputs:
|
|
143
|
+
- **val** (Tensor) - Tensor with dtype float32.
|
|
144
|
+
- **input_grad** (Parameter) - Tensor with dtype float32.
|
|
145
|
+
- **pre_val** (Parameter) - Input Parameter with dtype float32.
|
|
146
|
+
- **min_val** (Parameter) - Input Parameter with dtype float32.
|
|
147
|
+
- **max_val** (Parameter) - Input Parameter with dtype float32.
|
|
148
|
+
- **val_counter** (Parameter) - Input Parameter with dtype int32.
|
|
149
|
+
|
|
150
|
+
Outputs:
|
|
151
|
+
Tuple of 5 Tensors, the updated parameters.
|
|
152
|
+
- **input_grad** (Tensor) - Tensor with dtype float32.
|
|
153
|
+
- **pre_val** (Tensor) - Tensor with dtype float32.
|
|
154
|
+
- **min_val** (Tensor) - Tensor with dtype float32.
|
|
155
|
+
- **max_val** (Tensor) - Tensor with dtype float32.
|
|
156
|
+
- **result** (Tensor) - Tensor with dtype int32.
|
|
157
|
+
|
|
158
|
+
Raises:
|
|
159
|
+
TypeError: If `val` is not Tensor with dtype float32.
|
|
160
|
+
TypeError: If `result` is not Tensor with dtype int32.
|
|
161
|
+
TypeError: If `pre_val`, `min_val`, `max_val`, `input_grad` are not all Parameter type with dtype float32.
|
|
162
|
+
TypeError: If `c_thresh_l1` or `c_coeff_l1` is not a float number.
|
|
163
|
+
TypeError: If `c_min_steps` is not an int number.
|
|
164
|
+
|
|
165
|
+
Supported Platforms:
|
|
166
|
+
``Ascend``
|
|
167
|
+
|
|
168
|
+
Examples:
|
|
169
|
+
>>> from mindspore.ops.operations.math_ops import SilentCheck
|
|
170
|
+
>>> silent_check = SilentCheck()
|
|
171
|
+
xxx
|
|
172
|
+
"""
|
|
173
|
+
|
|
174
|
+
@prim_attr_register
|
|
175
|
+
def __init__(self, c_min_steps, c_thresh_l1, c_coeff_l1, c_thresh_l2, c_coeff_l2):
|
|
176
|
+
"""Initialize SilentCheck."""
|
|
177
|
+
validator.check_value_type("c_min_steps", c_min_steps, [int], self.name)
|
|
178
|
+
validator.check_value_type("c_thresh_l1", c_thresh_l1, [float], self.name)
|
|
179
|
+
validator.check_value_type("c_coeff_l1", c_coeff_l1, [float], self.name)
|
|
180
|
+
validator.check_value_type("c_thresh_l2", c_thresh_l2, [float], self.name)
|
|
181
|
+
validator.check_value_type("c_coeff_l2", c_coeff_l2, [float], self.name)
|
|
182
|
+
self.add_prim_attr('side_effect_mem', True)
|
|
183
|
+
|
|
126
184
|
|
|
127
185
|
class _BitwiseBinaryOp(_MathBinaryOp):
|
|
128
186
|
"""
|
|
@@ -462,6 +520,7 @@ class AssignAdd(Primitive):
|
|
|
462
520
|
>>> import mindspore
|
|
463
521
|
>>> import numpy as np
|
|
464
522
|
>>> from mindspore import Tensor, ops, nn
|
|
523
|
+
>>> from mindspore.common.initializer import initializer
|
|
465
524
|
>>> class Net(nn.Cell):
|
|
466
525
|
... def __init__(self):
|
|
467
526
|
... super(Net, self).__init__()
|
|
@@ -512,6 +571,7 @@ class AssignSub(Primitive):
|
|
|
512
571
|
>>> import mindspore
|
|
513
572
|
>>> import numpy as np
|
|
514
573
|
>>> from mindspore import Tensor, ops, nn
|
|
574
|
+
>>> from mindspore.common.initializer import initializer
|
|
515
575
|
>>> class Net(nn.Cell):
|
|
516
576
|
... def __init__(self):
|
|
517
577
|
... super(Net, self).__init__()
|
|
@@ -7253,6 +7313,7 @@ class Igamma(Primitive):
|
|
|
7253
7313
|
|
|
7254
7314
|
Examples:
|
|
7255
7315
|
>>> import numpy as np
|
|
7316
|
+
>>> import mindspore
|
|
7256
7317
|
>>> from mindspore import Tensor, ops
|
|
7257
7318
|
>>> a = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32))
|
|
7258
7319
|
>>> x = Tensor(np.array([2.0, 3.0, 4.0, 5.0]).astype(np.float32))
|
|
@@ -7291,6 +7352,7 @@ class Igammac(Primitive):
|
|
|
7291
7352
|
``Ascend`` ``GPU`` ``CPU``
|
|
7292
7353
|
|
|
7293
7354
|
Examples:
|
|
7355
|
+
>>> import mindspore
|
|
7294
7356
|
>>> import numpy as np
|
|
7295
7357
|
>>> from mindspore import Tensor, ops
|
|
7296
7358
|
>>> a = Tensor(np.array([2.0, 4.0, 6.0, 8.0]).astype(np.float32))
|
|
@@ -3777,7 +3777,7 @@ class LayerNorm(Primitive):
|
|
|
3777
3777
|
- **output_x** (Tensor) - The normalized input, has the same type and shape as the `input_x`.
|
|
3778
3778
|
- **mean** (Tensor) - The first `begin_norm_axis` dimensions of `mean` shape is the same as `input_x`,
|
|
3779
3779
|
and the remaining dimensions are 1. Suppose the shape of the `input_x` is :math:`(x_1, x_2, \ldots, x_R)`,
|
|
3780
|
-
the shape of the `mean` is :math:`(x_1, \ldots, x_{
|
|
3780
|
+
the shape of the `mean` is :math:`(x_1, \ldots, x_{begin\_params\_axis}, 1, \ldots, 1)`
|
|
3781
3781
|
(when `begin_params_axis=0`, the shape of `mean` is :math:`(1, \ldots, 1)` ).
|
|
3782
3782
|
- **variance** (Tensor) - Shape is the same as `mean` .
|
|
3783
3783
|
|
|
@@ -4917,6 +4917,7 @@ class Adam(Primitive):
|
|
|
4917
4917
|
>>> import mindspore
|
|
4918
4918
|
>>> import numpy as np
|
|
4919
4919
|
>>> from mindspore import Tensor, nn, ops
|
|
4920
|
+
>>> from mindspore import Parameter
|
|
4920
4921
|
>>> class Net(nn.Cell):
|
|
4921
4922
|
... def __init__(self):
|
|
4922
4923
|
... super(Net, self).__init__()
|
|
@@ -9991,6 +9992,9 @@ class FractionalMaxPool3DWithFixedKsize(Primitive):
|
|
|
9991
9992
|
``Ascend`` ``GPU`` ``CPU``
|
|
9992
9993
|
|
|
9993
9994
|
Examples:
|
|
9995
|
+
>>> import numpy as np
|
|
9996
|
+
>>> from mindspore import Tensor, ops
|
|
9997
|
+
>>> from mindspore import dtype as mstype
|
|
9994
9998
|
>>> x = Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])
|
|
9995
9999
|
... .reshape([1, 1, 2, 2, 4]), mstype.float32)
|
|
9996
10000
|
>>> random_samples = Tensor(np.array([0.7, 0.7, 0.7]).reshape([1, 1, 3]), mstype.float32)
|
|
@@ -11363,7 +11367,7 @@ class PromptFlashAttention(Primitive):
|
|
|
11363
11367
|
For each element, 0 indicates retention and 1 indicates discard. Input tensor of shape :math:`(B, 1, S, S)`.
|
|
11364
11368
|
- **actual_seq_lengths** (Tensor): Describe actual sequence length of each input with data type of int.
|
|
11365
11369
|
- **actual_seq_lengths_kv** (Tensor): Describe actual sequence length of each input with data type of int.
|
|
11366
|
-
- **
|
|
11370
|
+
- **pse_shift** (Tensor) - The position encoding tensor with data type of float16 or float32.
|
|
11367
11371
|
- **dep_scale1** (Tensor)
|
|
11368
11372
|
- **quant_scale1** (Tensor)
|
|
11369
11373
|
- **deq_scale2** (Tensor)
|
|
@@ -11406,7 +11410,7 @@ class PromptFlashAttention(Primitive):
|
|
|
11406
11410
|
validator.check_value_type('num_key_value_heads', num_key_value_heads, [int], self.name)
|
|
11407
11411
|
validator.check_value_type('sparse_mode', sparse_mode, [int], self.name)
|
|
11408
11412
|
self.init_prim_io_names(inputs=["query", "key", "value", "attn_mask", "actual_seq_lengths",
|
|
11409
|
-
"actual_seq_lengths_kv", "
|
|
11413
|
+
"actual_seq_lengths_kv", "pse_shift", "deq_scale1", "quant_scale1",
|
|
11410
11414
|
"deq_scale2", "quant_scale2", "quant_offset2"],
|
|
11411
11415
|
outputs=["attention_out"])
|
|
11412
11416
|
|
|
@@ -479,8 +479,8 @@ class SparseToDenseV2(Primitive):
|
|
|
479
479
|
Tensor, converted from sparse tensor. The dtype is same as `values`, and the shape is `output_shape`.
|
|
480
480
|
|
|
481
481
|
Raises:
|
|
482
|
-
TypeError: If the dtype of `indices` is neither
|
|
483
|
-
TypeError: If the dtype of `outputshape` is neither
|
|
482
|
+
TypeError: If the dtype of `indices` is neither int32 nor int64.
|
|
483
|
+
TypeError: If the dtype of `outputshape` is neither int32 nor int64.
|
|
484
484
|
ValueError: If the shape of `output_shape`, shape of `indices`,
|
|
485
485
|
shape of `default_value` and shape of `values` don't meet the parameter description.
|
|
486
486
|
ValueError: If each Element of `output_shape` is not > 0.
|
|
@@ -2382,8 +2382,8 @@ class SparseCountSparseOutput(Primitive):
|
|
|
2382
2382
|
Args:
|
|
2383
2383
|
binary_output (bool) - If ``False`` , output the number of occurrences of each value,
|
|
2384
2384
|
if ``True`` output 1 for orresponding values. Default: ``False`` .
|
|
2385
|
-
minlength(Scalar) -
|
|
2386
|
-
maxlength(Scalar) -
|
|
2385
|
+
minlength(Scalar) - int type minimum value to count, Default: ``-1`` .
|
|
2386
|
+
maxlength(Scalar) - int type maximum value to count, Default: ``-1`` .
|
|
2387
2387
|
|
|
2388
2388
|
Inputs:
|
|
2389
2389
|
- **indices** (Tensor) - Tensor representing the position of the element in the sparse
|