mindspore 2.3.0__cp39-cp39-win_amd64.whl → 2.4.0__cp39-cp39-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mindspore might be problematic. Click here for more details.
- mindspore/.commit_id +1 -1
- mindspore/__init__.py +3 -1
- mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
- mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
- mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
- mindspore/_checkparam.py +50 -9
- mindspore/_extends/parse/compile_config.py +41 -0
- mindspore/_extends/parse/parser.py +9 -7
- mindspore/_extends/parse/standard_method.py +52 -14
- mindspore/_extends/pijit/pijit_func_white_list.py +350 -24
- mindspore/amp.py +24 -10
- mindspore/avcodec-59.dll +0 -0
- mindspore/avdevice-59.dll +0 -0
- mindspore/avfilter-8.dll +0 -0
- mindspore/avformat-59.dll +0 -0
- mindspore/avutil-57.dll +0 -0
- mindspore/common/__init__.py +6 -4
- mindspore/common/_pijit_context.py +190 -0
- mindspore/common/_register_for_tensor.py +2 -1
- mindspore/common/_tensor_overload.py +139 -0
- mindspore/common/api.py +102 -87
- mindspore/common/dump.py +5 -6
- mindspore/common/generator.py +1 -7
- mindspore/common/hook_handle.py +14 -26
- mindspore/common/mindir_util.py +2 -2
- mindspore/common/parameter.py +46 -13
- mindspore/common/recompute.py +39 -9
- mindspore/common/sparse_tensor.py +7 -3
- mindspore/common/tensor.py +209 -29
- mindspore/communication/__init__.py +1 -1
- mindspore/communication/_comm_helper.py +38 -3
- mindspore/communication/comm_func.py +310 -55
- mindspore/communication/management.py +14 -14
- mindspore/context.py +123 -22
- mindspore/dataset/__init__.py +1 -1
- mindspore/dataset/audio/__init__.py +1 -1
- mindspore/dataset/core/config.py +7 -0
- mindspore/dataset/core/validator_helpers.py +7 -0
- mindspore/dataset/engine/cache_client.py +1 -1
- mindspore/dataset/engine/datasets.py +72 -44
- mindspore/dataset/engine/datasets_audio.py +7 -7
- mindspore/dataset/engine/datasets_standard_format.py +53 -3
- mindspore/dataset/engine/datasets_text.py +20 -20
- mindspore/dataset/engine/datasets_user_defined.py +174 -104
- mindspore/dataset/engine/datasets_vision.py +33 -33
- mindspore/dataset/engine/iterators.py +29 -0
- mindspore/dataset/engine/obs/util.py +7 -0
- mindspore/dataset/engine/queue.py +114 -60
- mindspore/dataset/engine/serializer_deserializer.py +2 -2
- mindspore/dataset/engine/validators.py +34 -14
- mindspore/dataset/text/__init__.py +1 -4
- mindspore/dataset/transforms/__init__.py +0 -3
- mindspore/dataset/utils/line_reader.py +2 -0
- mindspore/dataset/vision/__init__.py +1 -4
- mindspore/dataset/vision/utils.py +1 -1
- mindspore/dataset/vision/validators.py +2 -1
- mindspore/dnnl.dll +0 -0
- mindspore/{nn/extend → experimental/es}/__init__.py +4 -11
- mindspore/experimental/es/embedding_service.py +883 -0
- mindspore/{nn/layer → experimental/es}/embedding_service_layer.py +218 -30
- mindspore/experimental/llm_boost/__init__.py +21 -0
- mindspore/{nn/extend/layer → experimental/llm_boost/atb}/__init__.py +4 -8
- mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
- mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
- mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
- mindspore/experimental/llm_boost/register.py +129 -0
- mindspore/experimental/llm_boost/utils.py +31 -0
- mindspore/experimental/optim/adamw.py +85 -0
- mindspore/experimental/optim/optimizer.py +3 -0
- mindspore/hal/__init__.py +3 -3
- mindspore/hal/contiguous_tensors_handle.py +175 -0
- mindspore/hal/stream.py +18 -0
- mindspore/include/api/model_group.h +13 -1
- mindspore/include/api/types.h +10 -10
- mindspore/include/dataset/config.h +2 -2
- mindspore/include/dataset/constants.h +2 -2
- mindspore/include/dataset/execute.h +2 -2
- mindspore/include/dataset/vision.h +4 -0
- mindspore/jpeg62.dll +0 -0
- mindspore/log.py +1 -1
- mindspore/mindrecord/filewriter.py +68 -51
- mindspore/mindspore_backend.dll +0 -0
- mindspore/mindspore_common.dll +0 -0
- mindspore/mindspore_core.dll +0 -0
- mindspore/mindspore_glog.dll +0 -0
- mindspore/mindspore_np_dtype.dll +0 -0
- mindspore/mindspore_ops.dll +0 -0
- mindspore/mint/__init__.py +495 -46
- mindspore/mint/distributed/__init__.py +31 -0
- mindspore/mint/distributed/distributed.py +254 -0
- mindspore/mint/nn/__init__.py +266 -21
- mindspore/mint/nn/functional.py +125 -19
- mindspore/mint/nn/layer/__init__.py +39 -0
- mindspore/mint/nn/layer/activation.py +133 -0
- mindspore/mint/nn/layer/normalization.py +477 -0
- mindspore/mint/nn/layer/pooling.py +110 -0
- mindspore/mint/optim/adamw.py +28 -7
- mindspore/mint/special/__init__.py +63 -0
- mindspore/multiprocessing/__init__.py +2 -1
- mindspore/nn/__init__.py +0 -1
- mindspore/nn/cell.py +275 -93
- mindspore/nn/layer/activation.py +211 -44
- mindspore/nn/layer/basic.py +113 -3
- mindspore/nn/layer/embedding.py +120 -2
- mindspore/nn/layer/normalization.py +101 -5
- mindspore/nn/layer/padding.py +34 -48
- mindspore/nn/layer/pooling.py +161 -7
- mindspore/nn/layer/transformer.py +3 -3
- mindspore/nn/loss/__init__.py +2 -2
- mindspore/nn/loss/loss.py +84 -6
- mindspore/nn/optim/__init__.py +2 -1
- mindspore/nn/optim/adadelta.py +1 -1
- mindspore/nn/optim/adam.py +1 -1
- mindspore/nn/optim/lamb.py +1 -1
- mindspore/nn/optim/tft_wrapper.py +127 -0
- mindspore/nn/wrap/cell_wrapper.py +12 -23
- mindspore/nn/wrap/grad_reducer.py +5 -5
- mindspore/nn/wrap/loss_scale.py +17 -3
- mindspore/numpy/__init__.py +1 -1
- mindspore/numpy/array_creations.py +65 -68
- mindspore/numpy/array_ops.py +64 -60
- mindspore/numpy/fft.py +610 -75
- mindspore/numpy/logic_ops.py +11 -10
- mindspore/numpy/math_ops.py +85 -84
- mindspore/numpy/utils_const.py +4 -4
- mindspore/opencv_core452.dll +0 -0
- mindspore/opencv_imgcodecs452.dll +0 -0
- mindspore/opencv_imgproc452.dll +0 -0
- mindspore/ops/__init__.py +6 -4
- mindspore/ops/_grad_experimental/grad_comm_ops.py +47 -3
- mindspore/ops/_grad_experimental/grad_math_ops.py +0 -22
- mindspore/ops/_vmap/vmap_array_ops.py +2 -4
- mindspore/ops/_vmap/vmap_math_ops.py +17 -1
- mindspore/ops/_vmap/vmap_nn_ops.py +43 -2
- mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +85 -7
- mindspore/ops/auto_generate/gen_arg_dtype_cast.py +2 -0
- mindspore/ops/auto_generate/gen_extend_func.py +734 -13
- mindspore/ops/auto_generate/gen_ops_def.py +2420 -381
- mindspore/ops/auto_generate/gen_ops_prim.py +5196 -1659
- mindspore/ops/auto_generate/pyboost_inner_prim.py +176 -56
- mindspore/ops/composite/base.py +85 -48
- mindspore/ops/composite/multitype_ops/_compile_utils.py +1 -0
- mindspore/ops/composite/multitype_ops/not_in_impl.py +2 -2
- mindspore/ops/function/__init__.py +22 -0
- mindspore/ops/function/array_func.py +490 -153
- mindspore/ops/function/debug_func.py +113 -1
- mindspore/ops/function/fft_func.py +15 -2
- mindspore/ops/function/grad/grad_func.py +3 -2
- mindspore/ops/function/math_func.py +558 -207
- mindspore/ops/function/nn_func.py +817 -383
- mindspore/ops/function/other_func.py +3 -2
- mindspore/ops/function/random_func.py +184 -8
- mindspore/ops/function/reshard_func.py +13 -11
- mindspore/ops/function/sparse_unary_func.py +1 -1
- mindspore/ops/function/vmap_func.py +3 -2
- mindspore/ops/functional.py +24 -14
- mindspore/ops/op_info_register.py +3 -3
- mindspore/ops/operations/__init__.py +6 -1
- mindspore/ops/operations/_grad_ops.py +2 -76
- mindspore/ops/operations/_infer_ops.py +1 -1
- mindspore/ops/operations/_inner_ops.py +71 -94
- mindspore/ops/operations/array_ops.py +12 -146
- mindspore/ops/operations/comm_ops.py +42 -53
- mindspore/ops/operations/custom_ops.py +83 -19
- mindspore/ops/operations/debug_ops.py +42 -10
- mindspore/ops/operations/manually_defined/_inner.py +12 -0
- mindspore/ops/operations/manually_defined/ops_def.py +265 -10
- mindspore/ops/operations/math_ops.py +12 -223
- mindspore/ops/operations/nn_ops.py +20 -114
- mindspore/ops/operations/other_ops.py +7 -4
- mindspore/ops/operations/random_ops.py +46 -1
- mindspore/ops/primitive.py +18 -6
- mindspore/ops_generate/arg_dtype_cast.py +2 -0
- mindspore/ops_generate/gen_aclnn_implement.py +11 -11
- mindspore/ops_generate/gen_constants.py +36 -0
- mindspore/ops_generate/gen_ops.py +67 -52
- mindspore/ops_generate/gen_ops_inner_prim.py +1 -1
- mindspore/ops_generate/gen_pyboost_func.py +131 -47
- mindspore/ops_generate/op_proto.py +10 -3
- mindspore/ops_generate/pyboost_utils.py +14 -1
- mindspore/ops_generate/template.py +43 -21
- mindspore/parallel/__init__.py +3 -1
- mindspore/parallel/_auto_parallel_context.py +28 -8
- mindspore/parallel/_cell_wrapper.py +83 -0
- mindspore/parallel/_parallel_serialization.py +47 -19
- mindspore/parallel/_tensor.py +81 -11
- mindspore/parallel/_utils.py +13 -1
- mindspore/parallel/algo_parameter_config.py +5 -5
- mindspore/parallel/checkpoint_transform.py +46 -39
- mindspore/parallel/cluster/process_entity/__init__.py +1 -1
- mindspore/parallel/cluster/process_entity/_api.py +31 -23
- mindspore/parallel/cluster/process_entity/_utils.py +2 -27
- mindspore/parallel/parameter_broadcast.py +3 -4
- mindspore/parallel/shard.py +162 -31
- mindspore/parallel/transform_safetensors.py +993 -0
- mindspore/profiler/__init__.py +2 -1
- mindspore/profiler/common/constant.py +29 -0
- mindspore/profiler/common/registry.py +47 -0
- mindspore/profiler/common/util.py +28 -0
- mindspore/profiler/dynamic_profiler.py +694 -0
- mindspore/profiler/envprofiling.py +17 -19
- mindspore/profiler/parser/ascend_analysis/constant.py +18 -0
- mindspore/profiler/parser/ascend_analysis/file_manager.py +25 -4
- mindspore/profiler/parser/ascend_analysis/function_event.py +43 -19
- mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +31 -26
- mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +56 -10
- mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +55 -8
- mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
- mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +27 -20
- mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +9 -2
- mindspore/profiler/parser/ascend_msprof_exporter.py +5 -4
- mindspore/profiler/parser/ascend_timeline_generator.py +27 -25
- mindspore/profiler/parser/base_timeline_generator.py +19 -25
- mindspore/profiler/parser/cpu_gpu_timeline_generator.py +25 -12
- mindspore/profiler/parser/framework_parser.py +1 -391
- mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
- mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
- mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
- mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
- mindspore/profiler/parser/memory_usage_parser.py +0 -154
- mindspore/profiler/parser/profiler_info.py +78 -6
- mindspore/profiler/profiler.py +153 -0
- mindspore/profiler/profiling.py +280 -412
- mindspore/rewrite/__init__.py +1 -2
- mindspore/rewrite/common/namespace.py +4 -4
- mindspore/rewrite/symbol_tree/symbol_tree.py +3 -3
- mindspore/run_check/_check_version.py +36 -103
- mindspore/safeguard/rewrite_obfuscation.py +591 -247
- mindspore/swresample-4.dll +0 -0
- mindspore/swscale-6.dll +0 -0
- mindspore/tinyxml2.dll +0 -0
- mindspore/train/__init__.py +4 -3
- mindspore/train/_utils.py +28 -2
- mindspore/train/amp.py +171 -53
- mindspore/train/callback/__init__.py +2 -2
- mindspore/train/callback/_callback.py +4 -4
- mindspore/train/callback/_checkpoint.py +85 -22
- mindspore/train/callback/_cluster_monitor.py +1 -1
- mindspore/train/callback/_flops_collector.py +1 -0
- mindspore/train/callback/_loss_monitor.py +3 -3
- mindspore/train/callback/_on_request_exit.py +134 -31
- mindspore/train/callback/_summary_collector.py +5 -5
- mindspore/train/callback/_tft_register.py +352 -0
- mindspore/train/dataset_helper.py +7 -3
- mindspore/train/metrics/metric.py +3 -3
- mindspore/train/metrics/roc.py +4 -4
- mindspore/train/mind_ir_pb2.py +44 -39
- mindspore/train/model.py +134 -58
- mindspore/train/serialization.py +336 -112
- mindspore/turbojpeg.dll +0 -0
- mindspore/utils/__init__.py +21 -0
- mindspore/utils/utils.py +60 -0
- mindspore/version.py +1 -1
- {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/METADATA +6 -2
- {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/RECORD +258 -252
- mindspore/include/c_api/ms/abstract.h +0 -67
- mindspore/include/c_api/ms/attribute.h +0 -197
- mindspore/include/c_api/ms/base/handle_types.h +0 -43
- mindspore/include/c_api/ms/base/macros.h +0 -32
- mindspore/include/c_api/ms/base/status.h +0 -33
- mindspore/include/c_api/ms/base/types.h +0 -283
- mindspore/include/c_api/ms/context.h +0 -102
- mindspore/include/c_api/ms/graph.h +0 -160
- mindspore/include/c_api/ms/node.h +0 -606
- mindspore/include/c_api/ms/tensor.h +0 -161
- mindspore/include/c_api/ms/value.h +0 -84
- mindspore/mindspore_shared_lib.dll +0 -0
- mindspore/nn/extend/basic.py +0 -140
- mindspore/nn/extend/embedding.py +0 -143
- mindspore/nn/extend/layer/normalization.py +0 -109
- mindspore/nn/extend/pooling.py +0 -117
- mindspore/nn/layer/embedding_service.py +0 -531
- mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +0 -93
- mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +0 -66
- mindspore/ops/extend/__init__.py +0 -53
- mindspore/ops/extend/array_func.py +0 -218
- mindspore/ops/extend/math_func.py +0 -76
- mindspore/ops/extend/nn_func.py +0 -308
- mindspore/ops/silent_check.py +0 -162
- mindspore/profiler/parser/msadvisor_analyzer.py +0 -82
- mindspore/profiler/parser/msadvisor_parser.py +0 -240
- mindspore/train/callback/_mindio_ttp.py +0 -443
- {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/WHEEL +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/entry_points.txt +0 -0
- {mindspore-2.3.0.dist-info → mindspore-2.4.0.dist-info}/top_level.txt +0 -0
|
@@ -32,14 +32,13 @@ from mindspore.ops.operations._inner_ops import DynamicBroadcastTo
|
|
|
32
32
|
from mindspore.ops.operations._sequence_ops import TupleToTensor
|
|
33
33
|
from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
|
|
34
34
|
from mindspore.ops.operations._sequence_ops import TensorToList
|
|
35
|
-
from mindspore.ops.auto_generate import OnesLikeExt, ZerosLikeExt, FillScalar, FillTensor, Arange, Chunk, UniqueDim
|
|
36
|
-
Unique2, SortExt, NonZero, NonZeroExt
|
|
35
|
+
from mindspore.ops.auto_generate import OnesLikeExt, ZerosLikeExt, FillScalar, FillTensor, Arange, Chunk, UniqueDim, \
|
|
36
|
+
Unique2, SortExt, NonZero, NonZeroExt, Scatter, ScatterValue
|
|
37
37
|
from mindspore.ops.auto_generate.gen_ops_prim import SplitTensor
|
|
38
38
|
from mindspore.ops.auto_generate.gen_ops_prim import SplitWithSize, RepeatInterleaveInt, RepeatInterleaveTensor
|
|
39
|
-
|
|
39
|
+
from mindspore.ops.auto_generate.pyboost_inner_prim import _PyboostSearchSortedPrim
|
|
40
40
|
from mindspore.ops.operations.array_ops import (
|
|
41
41
|
UniqueConsecutive,
|
|
42
|
-
SearchSorted,
|
|
43
42
|
MatrixDiagV3,
|
|
44
43
|
MatrixDiagPartV3,
|
|
45
44
|
MatrixSetDiagV3,
|
|
@@ -58,7 +57,6 @@ from mindspore.ops.operations.array_ops import (
|
|
|
58
57
|
ArgMaxWithValue,
|
|
59
58
|
ArgMinWithValue
|
|
60
59
|
)
|
|
61
|
-
from mindspore.ops.operations.array_ops import TensorScatterElements
|
|
62
60
|
from mindspore.common import Tensor
|
|
63
61
|
from mindspore.ops._primitive_cache import _get_cache_prim
|
|
64
62
|
from mindspore import _checkparam as validator
|
|
@@ -66,10 +64,12 @@ from mindspore._c_expression import Tensor as Tensor_
|
|
|
66
64
|
from mindspore.ops._utils.utils import ms_arrange
|
|
67
65
|
|
|
68
66
|
from mindspore.ops.auto_generate import cat, range, scatter_nd, deepcopy, masked_fill, diagonal, expand_dims, \
|
|
69
|
-
flip, transpose, triu, unsorted_segment_sum, diag, gather, gather_d, gather_nd, reshape, \
|
|
70
|
-
broadcast_to, strided_slice, ones, zeros, max_, min_, select
|
|
71
|
-
from mindspore.ops.auto_generate
|
|
67
|
+
flip, transpose, triu, unsorted_segment_sum, diag, gather, gather_d, gather_nd, reshape, masked_select, \
|
|
68
|
+
broadcast_to, strided_slice, ones, zeros, max_, min_, select, zero_
|
|
69
|
+
from mindspore.ops.auto_generate import tensor_scatter_elements as tensor_scatter_elements_ext
|
|
70
|
+
from mindspore.ops.auto_generate.gen_ops_prim import scatter_add_ext_op, slice_ext_op, gather_d_op
|
|
72
71
|
from mindspore.ops.operations.manually_defined import tile, rank, scalar_cast
|
|
72
|
+
from mindspore.ops.auto_generate.pyboost_inner_prim import _PyboostOneHotExtPrim, tril_ext_impl
|
|
73
73
|
|
|
74
74
|
arg_max_with_value_ = ArgMaxWithValue()
|
|
75
75
|
arg_min_with_value_ = ArgMinWithValue()
|
|
@@ -87,7 +87,6 @@ gather_nd_ = P.GatherNd()
|
|
|
87
87
|
ger_ = P.Ger()
|
|
88
88
|
index_fill_ = IndexFill()
|
|
89
89
|
lstsq_ = Lstsq()
|
|
90
|
-
masked_select_ = P.MaskedSelect()
|
|
91
90
|
matrix_band_part_ = P.array_ops.MatrixBandPart()
|
|
92
91
|
ones_ = P.Ones()
|
|
93
92
|
population_count_ = P.PopulationCount()
|
|
@@ -104,6 +103,7 @@ scatter_min_ = P.ScatterMin()
|
|
|
104
103
|
scatter_mul_ = P.ScatterMul()
|
|
105
104
|
scatter_nd_ = P.ScatterNd()
|
|
106
105
|
scatter_update_ = P.ScatterUpdate()
|
|
106
|
+
search_sorted_ = _PyboostSearchSortedPrim()
|
|
107
107
|
shape_ = P.Shape()
|
|
108
108
|
split_tensor = SplitTensor()
|
|
109
109
|
split_with_size = SplitWithSize()
|
|
@@ -122,18 +122,20 @@ transpose_ = P.Transpose()
|
|
|
122
122
|
tuple_to_array_ = P.TupleToArray()
|
|
123
123
|
tuple_to_tensor_ = TupleToTensor()
|
|
124
124
|
unique_ = P.Unique()
|
|
125
|
-
unique_with_pad_ = P.UniqueWithPad()
|
|
126
125
|
unsorted_segment_max_ = P.UnsortedSegmentMax()
|
|
127
126
|
unsorted_segment_min_ = P.UnsortedSegmentMin()
|
|
128
127
|
unsorted_segment_prod_ = P.UnsortedSegmentProd()
|
|
129
128
|
unsorted_segment_sum_ = P.UnsortedSegmentSum()
|
|
130
129
|
ones_like_ = P.OnesLike()
|
|
130
|
+
one_hot_ext_impl = _PyboostOneHotExtPrim()
|
|
131
131
|
zeros_like_ = P.ZerosLike()
|
|
132
132
|
ones_like_ext_ = OnesLikeExt()
|
|
133
133
|
zeros_like_ext_ = ZerosLikeExt()
|
|
134
134
|
fill_scalar_ = FillScalar()
|
|
135
135
|
fill_tensor_ = FillTensor()
|
|
136
136
|
sort_ext_ = SortExt()
|
|
137
|
+
scatter_ = Scatter()
|
|
138
|
+
scatter_value_ = ScatterValue()
|
|
137
139
|
arange_ = Arange()
|
|
138
140
|
chunk_ = Chunk()
|
|
139
141
|
repeat_interleave_int_ = RepeatInterleaveInt()
|
|
@@ -199,7 +201,8 @@ def _get_max_type(start, end, step):
|
|
|
199
201
|
|
|
200
202
|
type_map = {'Float64': '3', 'Float32': '2', "<class 'float'>": '2', 'Int64': '1', "<class 'int'>": '1',
|
|
201
203
|
'Int32': '0'}
|
|
202
|
-
type_map_reverse = {'3': mstype.float64,
|
|
204
|
+
type_map_reverse = {'3': mstype.float64,
|
|
205
|
+
'2': mstype.float32, '1': mstype.int64, '0': mstype.int32}
|
|
203
206
|
type_level = [type_map.get(i) for i in arg_type_map]
|
|
204
207
|
max_level = builtins.max(type_level)
|
|
205
208
|
return type_map_reverse.get(max_level)
|
|
@@ -329,7 +332,7 @@ def arange_ext(start=0, end=None, step=1, *, dtype=None):
|
|
|
329
332
|
[7 5 3]
|
|
330
333
|
>>> print(output.dtype)
|
|
331
334
|
Int64
|
|
332
|
-
>>> output = ops.arange_ext(12, 2, -1, dtype=ms.bfloat16)
|
|
335
|
+
>>> output = ops.arange_ext(12, 2, -1, dtype=ms.bfloat16)
|
|
333
336
|
>>> print(output)
|
|
334
337
|
[12. 11. 10. 9. 8. 7. 6. 5. 4. 3.]
|
|
335
338
|
>>> print(output.dtype)
|
|
@@ -347,9 +350,9 @@ def concat(tensors, axis=0):
|
|
|
347
350
|
Tutorial Examples:
|
|
348
351
|
- `Tensor - Tensor Operation <https://mindspore.cn/tutorials/en/master/beginner/tensor.html#tensor-operation>`_
|
|
349
352
|
- `Vision Transformer Image Classification - Building ViT as a whole
|
|
350
|
-
<https://mindspore.cn/tutorials/
|
|
353
|
+
<https://mindspore.cn/tutorials/en/master/cv/vit.html#building-vit-as-a-whole>`_
|
|
351
354
|
- `Sentiment Classification Implemented by RNN - Dense
|
|
352
|
-
<https://mindspore.cn/tutorials/
|
|
355
|
+
<https://mindspore.cn/tutorials/en/master/nlp/sentiment_analysis.html#dense>`_
|
|
353
356
|
"""
|
|
354
357
|
return cat(tensors, axis)
|
|
355
358
|
|
|
@@ -451,20 +454,25 @@ def hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype
|
|
|
451
454
|
[0.08 0.39785218 0.91214782 0.91214782 0.39785218 0.08]
|
|
452
455
|
"""
|
|
453
456
|
if not isinstance(window_length, int):
|
|
454
|
-
raise TypeError(f"For array function 'hamming_window', 'window_length' must be int, but got"
|
|
457
|
+
raise TypeError(f"For array function 'hamming_window', 'window_length' must be int, but got"
|
|
455
458
|
f" {type(window_length)}.")
|
|
456
459
|
if window_length < 0:
|
|
457
|
-
raise ValueError(
|
|
460
|
+
raise ValueError(
|
|
461
|
+
f"For array function 'hamming_window', 'window_length' must be non negative number.")
|
|
458
462
|
if not isinstance(periodic, bool):
|
|
459
|
-
raise TypeError(
|
|
463
|
+
raise TypeError(
|
|
464
|
+
f"For array function 'hamming_window', 'periodic' must be bool, but got {type(periodic)}.")
|
|
460
465
|
if not isinstance(alpha, float):
|
|
461
|
-
raise TypeError(
|
|
466
|
+
raise TypeError(
|
|
467
|
+
f"For array function 'hamming_window', 'alpha' must be float, but got {type(alpha)}.")
|
|
462
468
|
if not isinstance(beta, float):
|
|
463
|
-
raise TypeError(
|
|
469
|
+
raise TypeError(
|
|
470
|
+
f"For array function 'hamming_window', 'beta' must be float, but got {type(beta)}.")
|
|
464
471
|
if window_length <= 1:
|
|
465
472
|
return Tensor(np.ones(window_length))
|
|
466
473
|
if dtype is not None and dtype not in mstype.float_type:
|
|
467
|
-
raise TypeError(
|
|
474
|
+
raise TypeError(
|
|
475
|
+
f"For array function 'hamming_window', 'dtype' must be floating point dtypes, but got {dtype}.")
|
|
468
476
|
|
|
469
477
|
dtype = mstype.float32 if dtype is None else dtype
|
|
470
478
|
op = _get_cache_prim(P.HammingWindow)(periodic, alpha, beta, dtype)
|
|
@@ -641,7 +649,8 @@ def _check_axis_type(axis, type_int=True, type_tuple=True, type_list=True, ops_n
|
|
|
641
649
|
if (type_tuple and isinstance(axis, tuple)) or (type_list and isinstance(axis, list)):
|
|
642
650
|
for ax in axis:
|
|
643
651
|
if not isinstance(ax, int):
|
|
644
|
-
raise TypeError(
|
|
652
|
+
raise TypeError(
|
|
653
|
+
f"For {ops_name}, each axis must be integer, but got {type(ax)} in {axis}.")
|
|
645
654
|
return True
|
|
646
655
|
|
|
647
656
|
type_str = ""
|
|
@@ -651,7 +660,8 @@ def _check_axis_type(axis, type_int=True, type_tuple=True, type_list=True, ops_n
|
|
|
651
660
|
type_str += "tuple, "
|
|
652
661
|
if type_list:
|
|
653
662
|
type_str += "list, "
|
|
654
|
-
raise TypeError(
|
|
663
|
+
raise TypeError(
|
|
664
|
+
f"For {ops_name}, the axis should be {type_str}, but got {type(axis)}.")
|
|
655
665
|
|
|
656
666
|
|
|
657
667
|
def one_hot(indices, depth, on_value=1, off_value=0, axis=-1):
|
|
@@ -720,8 +730,8 @@ def fill(type, shape, value): # pylint: disable=redefined-outer-name
|
|
|
720
730
|
|
|
721
731
|
Args:
|
|
722
732
|
type (mindspore.dtype): The specified type of output tensor. The data type only supports
|
|
723
|
-
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore
|
|
724
|
-
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore
|
|
733
|
+
`bool_ <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ and
|
|
734
|
+
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_ .
|
|
725
735
|
shape (Union(Tensor, tuple[int])): The specified shape of output tensor.
|
|
726
736
|
value (Union(Tensor, number.Number, bool)): Value to fill the returned tensor.
|
|
727
737
|
|
|
@@ -786,11 +796,13 @@ def full(size, fill_value, *, dtype=None): # pylint: disable=redefined-outer-na
|
|
|
786
796
|
[0. 0. 0.]]
|
|
787
797
|
"""
|
|
788
798
|
if not isinstance(size, (list, tuple)):
|
|
789
|
-
raise TypeError(
|
|
799
|
+
raise TypeError(
|
|
800
|
+
f"For 'ops.full', 'size' must be a tuple or list of ints, but got {type(size)}.")
|
|
790
801
|
if dtype is None:
|
|
791
802
|
dtype = mstype.int64
|
|
792
803
|
if dtype not in mstype.all_types:
|
|
793
|
-
raise TypeError(
|
|
804
|
+
raise TypeError(
|
|
805
|
+
f"For 'ops.full', 'dtype' must be mindspore.type, but got {dtype}.")
|
|
794
806
|
if isinstance(size, list):
|
|
795
807
|
size = tuple(size)
|
|
796
808
|
return ops.fill(dtype, size, fill_value)
|
|
@@ -802,7 +814,8 @@ def full_ext(size, fill_value, *, dtype=None): # pylint: disable=redefined-oute
|
|
|
802
814
|
|
|
803
815
|
Args:
|
|
804
816
|
size (Union(tuple[int], list[int])): The specified shape of output tensor.
|
|
805
|
-
fill_value (number.Number): Value to fill the returned tensor.
|
|
817
|
+
fill_value (Union(number.Number, Tensor)): Value to fill the returned tensor. It can be a Scalar number, a 0-D
|
|
818
|
+
Tensor, or a 1-D Tensor with only one element.
|
|
806
819
|
|
|
807
820
|
Keyword Args:
|
|
808
821
|
dtype (mindspore.dtype): The specified type of output tensor. `bool_` and `number` are supported, for details,
|
|
@@ -820,18 +833,16 @@ def full_ext(size, fill_value, *, dtype=None): # pylint: disable=redefined-oute
|
|
|
820
833
|
|
|
821
834
|
Examples:
|
|
822
835
|
>>> from mindspore import ops
|
|
823
|
-
>>> output = ops.
|
|
836
|
+
>>> output = ops.full_ext((2, 2), 1)
|
|
824
837
|
>>> print(output)
|
|
825
838
|
[[1. 1.]
|
|
826
839
|
[1. 1.]]
|
|
827
|
-
>>> output = ops.
|
|
840
|
+
>>> output = ops.full_ext((3, 3), 0)
|
|
828
841
|
>>> print(output)
|
|
829
842
|
[[0. 0. 0.]
|
|
830
843
|
[0. 0. 0.]
|
|
831
844
|
[0. 0. 0.]]
|
|
832
845
|
"""
|
|
833
|
-
if isinstance(fill_value, Tensor):
|
|
834
|
-
return fill_tensor_(size, fill_value, dtype)
|
|
835
846
|
return fill_scalar_(size, fill_value, dtype)
|
|
836
847
|
|
|
837
848
|
|
|
@@ -872,7 +883,8 @@ def full_like(input, fill_value, *, dtype=None):
|
|
|
872
883
|
[0. 0. 0.]]
|
|
873
884
|
"""
|
|
874
885
|
if not isinstance(input, Tensor):
|
|
875
|
-
raise TypeError(
|
|
886
|
+
raise TypeError(
|
|
887
|
+
f"For ops.full_like, the argument 'x' must be tensor, but got {type(input)}")
|
|
876
888
|
if dtype is None:
|
|
877
889
|
dtype = input.dtype
|
|
878
890
|
return full(input.shape, fill_value, dtype=dtype)
|
|
@@ -914,19 +926,24 @@ def chunk(input, chunks, axis=0):
|
|
|
914
926
|
Tensor(shape=[3], dtype=Float32, value= [ 6.00000000e+00, 7.00000000e+00, 8.00000000e+00]))
|
|
915
927
|
"""
|
|
916
928
|
if not isinstance(input, Tensor):
|
|
917
|
-
raise TypeError(
|
|
929
|
+
raise TypeError(
|
|
930
|
+
f'For ops.chunk parameter `input` must be Tensor, but got {type(input)}')
|
|
918
931
|
_check_axis_type(axis, True, False, False, "ops.chunk")
|
|
919
932
|
arr_axis = _canonicalize_axis(axis, input.ndim)
|
|
920
933
|
|
|
921
934
|
if not isinstance(chunks, int):
|
|
922
|
-
raise TypeError(
|
|
935
|
+
raise TypeError(
|
|
936
|
+
f"For ops.chunk type of argument `chunks` should be integer, but got {type(chunks)}")
|
|
923
937
|
if chunks <= 0:
|
|
924
|
-
raise ValueError(
|
|
938
|
+
raise ValueError(
|
|
939
|
+
f"For ops.chunk parameter 'chunks' must be greater than 0, but got {chunks}")
|
|
925
940
|
|
|
926
941
|
arr_shape = input.shape
|
|
927
942
|
length_along_dim = arr_shape[arr_axis]
|
|
928
943
|
|
|
929
|
-
if
|
|
944
|
+
if length_along_dim == 0:
|
|
945
|
+
res = _get_cache_prim(P.Split)(arr_axis)(input)
|
|
946
|
+
elif chunks > length_along_dim:
|
|
930
947
|
res = _get_cache_prim(P.Split)(arr_axis, length_along_dim)(input)
|
|
931
948
|
elif length_along_dim % chunks == 0:
|
|
932
949
|
res = _get_cache_prim(P.Split)(arr_axis, chunks)(input)
|
|
@@ -939,9 +956,11 @@ def chunk(input, chunks, axis=0):
|
|
|
939
956
|
size1 = _tuple_setitem(arr_shape, arr_axis, length1)
|
|
940
957
|
start2 = _tuple_setitem(start1, arr_axis, length1)
|
|
941
958
|
size2 = _tuple_setitem(arr_shape, arr_axis, length2)
|
|
942
|
-
res = _get_cache_prim(P.Split)(arr_axis, true_chunks)(
|
|
959
|
+
res = _get_cache_prim(P.Split)(arr_axis, true_chunks)(
|
|
960
|
+
tensor_slice(input, start1, size1))
|
|
943
961
|
if length2:
|
|
944
|
-
res += _get_cache_prim(P.Split)(arr_axis,
|
|
962
|
+
res += _get_cache_prim(P.Split)(arr_axis,
|
|
963
|
+
1)(tensor_slice(input, start2, size2))
|
|
945
964
|
return res
|
|
946
965
|
|
|
947
966
|
|
|
@@ -952,6 +971,9 @@ def chunk_ext(input, chunks, dim=0):
|
|
|
952
971
|
Note:
|
|
953
972
|
This function may return less than the specified number of chunks!
|
|
954
973
|
|
|
974
|
+
.. warning::
|
|
975
|
+
This is an experimental API that is subject to change or deletion.
|
|
976
|
+
|
|
955
977
|
Args:
|
|
956
978
|
input (Tensor): A Tensor to be cut.
|
|
957
979
|
chunks (int): Number of sub-tensors to cut.
|
|
@@ -1260,11 +1282,14 @@ def unique_ext(input, sorted=True, return_inverse=False, return_counts=False, di
|
|
|
1260
1282
|
[0 1 2 1]
|
|
1261
1283
|
"""
|
|
1262
1284
|
if not F.isconstant(return_inverse) or not F.isconstant(return_counts):
|
|
1263
|
-
raise ValueError(
|
|
1285
|
+
raise ValueError(
|
|
1286
|
+
f"For 'unique_ext', 'return_inverse' and 'return_counts' cannot be mutable")
|
|
1264
1287
|
if dim is None:
|
|
1265
|
-
y, inverse, counts = unique2_(
|
|
1288
|
+
y, inverse, counts = unique2_(
|
|
1289
|
+
input, sorted, return_inverse, return_counts)
|
|
1266
1290
|
else:
|
|
1267
|
-
validator.check_value_type(
|
|
1291
|
+
validator.check_value_type(
|
|
1292
|
+
"return_counts", return_counts, [bool], "unique_ext")
|
|
1268
1293
|
y, inverse, counts = unique_dim_(input, sorted, return_inverse, dim)
|
|
1269
1294
|
if return_inverse and return_counts:
|
|
1270
1295
|
return y, inverse, counts
|
|
@@ -1285,6 +1310,9 @@ def unique_with_pad(x, pad_num):
|
|
|
1285
1310
|
the UniqueWithPad operator will fill the `y` Tensor with the `pad_num` specified by the user
|
|
1286
1311
|
to make it have the same shape as the Tensor `idx`.
|
|
1287
1312
|
|
|
1313
|
+
.. warning::
|
|
1314
|
+
:func:`mindspore.ops.unique_with_pad` is deprecated from version 2.4 and will be removed in a future version.
|
|
1315
|
+
|
|
1288
1316
|
Args:
|
|
1289
1317
|
x (Tensor): The tensor need to be unique. Must be 1-D vector with types: int32, int64.
|
|
1290
1318
|
pad_num (int): Pad num. The data type is an int.
|
|
@@ -1297,10 +1325,10 @@ def unique_with_pad(x, pad_num):
|
|
|
1297
1325
|
|
|
1298
1326
|
Raises:
|
|
1299
1327
|
TypeError: If dtype of `x` is neither int32 nor int64.
|
|
1300
|
-
ValueError: If
|
|
1328
|
+
ValueError: If `x` is not a 1-D Tensor.
|
|
1301
1329
|
|
|
1302
1330
|
Supported Platforms:
|
|
1303
|
-
|
|
1331
|
+
Deprecated
|
|
1304
1332
|
|
|
1305
1333
|
Examples:
|
|
1306
1334
|
>>> import mindspore
|
|
@@ -1319,7 +1347,7 @@ def unique_with_pad(x, pad_num):
|
|
|
1319
1347
|
>>> print(idx)
|
|
1320
1348
|
[0 1 1 2 3 3]
|
|
1321
1349
|
"""
|
|
1322
|
-
return
|
|
1350
|
+
return _get_cache_prim(P.UniqueWithPad)()(x, pad_num)
|
|
1323
1351
|
|
|
1324
1352
|
|
|
1325
1353
|
def unique_consecutive(input, return_idx=False, return_counts=False, axis=None):
|
|
@@ -1369,7 +1397,8 @@ def unique_consecutive(input, return_idx=False, return_counts=False, axis=None):
|
|
|
1369
1397
|
|
|
1370
1398
|
if not isinstance(input, (Tensor, Tensor_)):
|
|
1371
1399
|
raise TypeError("For 'unique_consecutive', 'input' must be Tensor.")
|
|
1372
|
-
unique_consecutive_op = _get_cache_prim(
|
|
1400
|
+
unique_consecutive_op = _get_cache_prim(
|
|
1401
|
+
UniqueConsecutive)(return_idx, return_counts, axis)
|
|
1373
1402
|
output, idx, counts = unique_consecutive_op(input)
|
|
1374
1403
|
if return_idx and return_counts:
|
|
1375
1404
|
return output, idx, counts
|
|
@@ -1400,7 +1429,7 @@ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=
|
|
|
1400
1429
|
set to ``"left"`` while `right` is ``True``. Default: ``None`` .
|
|
1401
1430
|
sorter(Tensor, optional): if provided, a tensor matching the shape of the unsorted sorted_sequence
|
|
1402
1431
|
containing a sequence of indices that sort it in the ascending order on the innermost
|
|
1403
|
-
dimension and type must be int64. Default: ``None`` .
|
|
1432
|
+
dimension and type must be int64. Default: ``None`` . CPU and GPU can only use default values
|
|
1404
1433
|
|
|
1405
1434
|
Returns:
|
|
1406
1435
|
Tensor containing the indices from the innermost dimension of `sorted_sequence` such that,
|
|
@@ -1437,8 +1466,7 @@ def searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=
|
|
|
1437
1466
|
f"got side of left while right was True.")
|
|
1438
1467
|
if side == "right":
|
|
1439
1468
|
right = True
|
|
1440
|
-
search_sorted_
|
|
1441
|
-
return search_sorted_(sorted_sequence, values, sorter)
|
|
1469
|
+
return search_sorted_(sorted_sequence, values, sorter, dtype, right)
|
|
1442
1470
|
|
|
1443
1471
|
|
|
1444
1472
|
def ger(input, vec2):
|
|
@@ -1488,7 +1516,7 @@ def size(input_x):
|
|
|
1488
1516
|
|
|
1489
1517
|
Args:
|
|
1490
1518
|
input_x (Tensor): Input parameters, the shape of tensor is :math:`(x_1, x_2, ..., x_R)`. The data type is
|
|
1491
|
-
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore
|
|
1519
|
+
`number <https://www.mindspore.cn/docs/en/master/api_python/mindspore/mindspore.dtype.html>`_.
|
|
1492
1520
|
|
|
1493
1521
|
Returns:
|
|
1494
1522
|
int. A scalar representing the elements' size of `input_x`, tensor is the number of elements
|
|
@@ -1681,7 +1709,8 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
|
|
|
1681
1709
|
|
|
1682
1710
|
def check_dim_valid(start_dim, end_dim):
|
|
1683
1711
|
if start_dim > end_dim:
|
|
1684
|
-
raise ValueError(
|
|
1712
|
+
raise ValueError(
|
|
1713
|
+
"For 'flatten', 'start_dim' cannot come after 'end_dim'.")
|
|
1685
1714
|
|
|
1686
1715
|
def canonicalize_axis(axis, x_rank):
|
|
1687
1716
|
ndim = x_rank if x_rank != 0 else 1
|
|
@@ -1693,7 +1722,8 @@ def flatten(input, order='C', *, start_dim=1, end_dim=-1):
|
|
|
1693
1722
|
raise TypeError(f"For 'flatten', argument 'input' must be Tensor.")
|
|
1694
1723
|
if not isinstance(start_dim, int) or not isinstance(end_dim, int) or \
|
|
1695
1724
|
isinstance(start_dim, bool) or isinstance(end_dim, bool):
|
|
1696
|
-
raise TypeError(
|
|
1725
|
+
raise TypeError(
|
|
1726
|
+
f"For 'flatten', both 'start_dim' and 'end_dim' must be int.")
|
|
1697
1727
|
check_flatten_order_const(order)
|
|
1698
1728
|
if order == 'F':
|
|
1699
1729
|
x_rank = rank_(input)
|
|
@@ -3269,13 +3299,13 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
|
|
|
3269
3299
|
Note:
|
|
3270
3300
|
If some values of the `indices` exceed the upper or lower bounds of the index of `input_x`, instead of raising
|
|
3271
3301
|
an index error, the corresponding `updates` will not be updated to `input_x`.
|
|
3302
|
+
The backward is supported only for the case `updates.shape == indices.shape`.
|
|
3272
3303
|
|
|
3273
3304
|
Args:
|
|
3274
3305
|
input_x (Tensor): The target tensor. The rank must be at least 1.
|
|
3275
3306
|
indices (Tensor): The index of `input_x` to do scatter operation whose data type must be mindspore.int32 or
|
|
3276
3307
|
mindspore.int64. Same rank as `input_x`. And accepted range is [-s, s) where s is the size along axis.
|
|
3277
|
-
updates (Tensor): The tensor doing the scatter operation with `input_x
|
|
3278
|
-
the same shape as `indices`.
|
|
3308
|
+
updates (Tensor): The tensor doing the scatter operation with `input_x`.
|
|
3279
3309
|
axis (int): Which axis to scatter. Accepted range is [-r, r) where r = rank(input_x). Default: ``0``.
|
|
3280
3310
|
reduction (str): Which reduction operation to scatter, supports ``"none"`` , ``"add"`` . Default: ``"none"``.
|
|
3281
3311
|
When `reduction` is set to ``"none"``, `updates` will be assigned to `input_x` according to `indices`.
|
|
@@ -3287,7 +3317,6 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
|
|
|
3287
3317
|
Raises:
|
|
3288
3318
|
TypeError: If `indices` is neither int32 nor int64.
|
|
3289
3319
|
ValueError: If anyone of the rank among `input_x`, `indices` and `updates` less than 1.
|
|
3290
|
-
ValueError: If the shape of `updates` is not equal to the shape of `indices`.
|
|
3291
3320
|
ValueError: If the rank of `updates` is not equal to the rank of `input_x`.
|
|
3292
3321
|
RuntimeError: If the data type of `input_x` and `updates` conversion of Parameter
|
|
3293
3322
|
is required when data type conversion of Parameter is not supported.
|
|
@@ -3319,8 +3348,7 @@ def tensor_scatter_elements(input_x, indices, updates, axis=0, reduction="none")
|
|
|
3319
3348
|
[ 5 5 14]
|
|
3320
3349
|
[ 7 15 11]]
|
|
3321
3350
|
"""
|
|
3322
|
-
|
|
3323
|
-
return _tensor_scatter_elements(input_x, indices, updates)
|
|
3351
|
+
return tensor_scatter_elements_ext(input_x, indices, updates, axis, reduction)
|
|
3324
3352
|
|
|
3325
3353
|
|
|
3326
3354
|
def scatter(input, axis, index, src):
|
|
@@ -3328,24 +3356,26 @@ def scatter(input, axis, index, src):
|
|
|
3328
3356
|
Update the value in `src` to `input` according to the specified index.
|
|
3329
3357
|
Refer to :func:`mindspore.ops.tensor_scatter_elements` for more details.
|
|
3330
3358
|
|
|
3359
|
+
.. note::
|
|
3360
|
+
The backward is supported only for the case `src.shape == index.shape`.
|
|
3361
|
+
|
|
3331
3362
|
Args:
|
|
3332
3363
|
input (Tensor): The target tensor. The rank of `input` must be at least 1.
|
|
3333
3364
|
axis (int): Which axis to scatter. Accepted range is [-r, r) where r = rank(input).
|
|
3334
|
-
index (Tensor): The index to do update operation whose data
|
|
3335
|
-
mindspore.int64. Same rank as `input` . And accepted range is [-s, s) where s is the size along axis.
|
|
3336
|
-
src (Tensor): The
|
|
3337
|
-
|
|
3365
|
+
index (Tensor): The index to do update operation whose data must be positive number with type of mindspore.int32
|
|
3366
|
+
or mindspore.int64. Same rank as `input` . And accepted range is [-s, s) where s is the size along axis.
|
|
3367
|
+
src (Tensor, float): The data doing the update operation with `input`. Can be a tensor with the same data type
|
|
3368
|
+
as `input` or a float number to scatter.
|
|
3338
3369
|
|
|
3339
3370
|
Returns:
|
|
3340
|
-
|
|
3371
|
+
The backward is supported only for the case `src.shape == index.shape` when `src` is a tensor.
|
|
3341
3372
|
|
|
3342
3373
|
Raises:
|
|
3343
3374
|
TypeError: If `index` is neither int32 nor int64.
|
|
3344
|
-
ValueError: If
|
|
3345
|
-
ValueError: If the shape of `src` is not equal to the shape of `index` .
|
|
3375
|
+
ValueError: If rank of any of `input` , `index` and `src` less than 1.
|
|
3346
3376
|
ValueError: If the rank of `src` is not equal to the rank of `input` .
|
|
3347
|
-
|
|
3348
|
-
|
|
3377
|
+
TypeError: If the data type of `input` and `src` have different dtypes.
|
|
3378
|
+
RuntimeError: If `index` has negative elements.
|
|
3349
3379
|
|
|
3350
3380
|
Supported Platforms:
|
|
3351
3381
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -3381,7 +3411,9 @@ def scatter(input, axis, index, src):
|
|
|
3381
3411
|
[0. 0. 0. 0. 0.]
|
|
3382
3412
|
[0. 0. 0. 0. 0.]]
|
|
3383
3413
|
"""
|
|
3384
|
-
|
|
3414
|
+
if isinstance(src, Tensor):
|
|
3415
|
+
return scatter_(input, axis, index, src)
|
|
3416
|
+
return scatter_value_(input, axis, index, src)
|
|
3385
3417
|
|
|
3386
3418
|
|
|
3387
3419
|
def scatter_add_ext(input, dim, index, src):
|
|
@@ -3516,7 +3548,8 @@ def slice_scatter(input, src, axis=0, start=None, end=None, step=1):
|
|
|
3516
3548
|
_check_is_tensor("input", input, "slice_scatter")
|
|
3517
3549
|
_check_is_tensor("src", src, "slice_scatter")
|
|
3518
3550
|
input_shape = input.shape
|
|
3519
|
-
input_rank, index, axis = _get_slice_scatter_const(
|
|
3551
|
+
input_rank, index, axis = _get_slice_scatter_const(
|
|
3552
|
+
input_shape, axis, start, end, step)
|
|
3520
3553
|
|
|
3521
3554
|
src_shape = src.shape
|
|
3522
3555
|
index_shape = input_shape[:axis] + (len(index),) + input_shape[axis + 1:]
|
|
@@ -3638,7 +3671,8 @@ def space_to_batch_nd(input_x, block_size, paddings):
|
|
|
3638
3671
|
[[[3.]]]
|
|
3639
3672
|
[[[4.]]]]
|
|
3640
3673
|
"""
|
|
3641
|
-
_space_to_batch_nd = _get_cache_prim(
|
|
3674
|
+
_space_to_batch_nd = _get_cache_prim(
|
|
3675
|
+
P.SpaceToBatchND)(block_size, paddings)
|
|
3642
3676
|
return _space_to_batch_nd(input_x)
|
|
3643
3677
|
|
|
3644
3678
|
|
|
@@ -4330,9 +4364,11 @@ def index_select(input, axis, index):
|
|
|
4330
4364
|
[[ 8. 9. 10. 11.]]]
|
|
4331
4365
|
"""
|
|
4332
4366
|
if not (isinstance(input, Tensor) and isinstance(index, Tensor)):
|
|
4333
|
-
raise TypeError(
|
|
4367
|
+
raise TypeError(
|
|
4368
|
+
f"For 'index_select', `input` and `index` must be all tensors.")
|
|
4334
4369
|
if index.ndim != 1:
|
|
4335
|
-
raise ValueError(
|
|
4370
|
+
raise ValueError(
|
|
4371
|
+
f"For 'index_select', the dimension of `index` must be 1, but got {index.ndim}")
|
|
4336
4372
|
axis = _check_check_axis_in_range(axis, input.ndim)
|
|
4337
4373
|
return gather_(input, index, axis)
|
|
4338
4374
|
|
|
@@ -4425,9 +4461,11 @@ def is_nonzero(input):
|
|
|
4425
4461
|
True
|
|
4426
4462
|
"""
|
|
4427
4463
|
if not isinstance(input, Tensor):
|
|
4428
|
-
raise TypeError(
|
|
4464
|
+
raise TypeError(
|
|
4465
|
+
f'For is_nonzero, the input must be a Tensor, but got {type(input)}.')
|
|
4429
4466
|
if input.numel() != 1:
|
|
4430
|
-
raise ValueError(
|
|
4467
|
+
raise ValueError(
|
|
4468
|
+
f"For is_nonzero, the numel of input must be 1, but got {input.numel()}.")
|
|
4431
4469
|
out = ops.squeeze(input)
|
|
4432
4470
|
return bool(out)
|
|
4433
4471
|
|
|
@@ -4622,38 +4660,6 @@ def tuple_to_array(input_x):
|
|
|
4622
4660
|
return tuple_to_tensor_(input_x, dtype)
|
|
4623
4661
|
|
|
4624
4662
|
|
|
4625
|
-
def masked_select(input, mask):
|
|
4626
|
-
"""
|
|
4627
|
-
Returns a new 1-D Tensor which indexes the `x` tensor according to the boolean `mask`.
|
|
4628
|
-
The shapes of the `mask` tensor and the `x` tensor don't need to match, but they must be broadcastable.
|
|
4629
|
-
|
|
4630
|
-
Args:
|
|
4631
|
-
input (Tensor): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
4632
|
-
mask (Tensor[bool]): The shape of tensor is :math:`(x_1, x_2, ..., x_R)`.
|
|
4633
|
-
|
|
4634
|
-
Returns:
|
|
4635
|
-
A 1-D Tensor, with the same type as `input`.
|
|
4636
|
-
|
|
4637
|
-
Raises:
|
|
4638
|
-
TypeError: If `input` or `mask` is not a Tensor.
|
|
4639
|
-
TypeError: If dtype of `mask` is not bool.
|
|
4640
|
-
|
|
4641
|
-
Supported Platforms:
|
|
4642
|
-
``Ascend`` ``GPU`` ``CPU``
|
|
4643
|
-
|
|
4644
|
-
Examples:
|
|
4645
|
-
>>> import numpy as np
|
|
4646
|
-
>>> import mindspore
|
|
4647
|
-
>>> from mindspore import Tensor, ops
|
|
4648
|
-
>>> x = Tensor(np.array([1, 2, 3, 4]), mindspore.int64)
|
|
4649
|
-
>>> mask = Tensor(np.array([1, 0, 1, 0]), mindspore.bool_)
|
|
4650
|
-
>>> output = ops.masked_select(x, mask)
|
|
4651
|
-
>>> print(output)
|
|
4652
|
-
[1 3]
|
|
4653
|
-
"""
|
|
4654
|
-
return masked_select_(input, mask)
|
|
4655
|
-
|
|
4656
|
-
|
|
4657
4663
|
def diagflat(input, offset=0):
|
|
4658
4664
|
r"""
|
|
4659
4665
|
Create a 2-D Tensor which diagonal is the flattened `input` .
|
|
@@ -4687,9 +4693,11 @@ def diagflat(input, offset=0):
|
|
|
4687
4693
|
[0. 0. 0.]]
|
|
4688
4694
|
"""
|
|
4689
4695
|
if not isinstance(input, Tensor):
|
|
4690
|
-
raise TypeError(
|
|
4696
|
+
raise TypeError(
|
|
4697
|
+
f"For diagflat, the input x must be tensor, but got {type(input)}")
|
|
4691
4698
|
if not isinstance(offset, int):
|
|
4692
|
-
raise TypeError(
|
|
4699
|
+
raise TypeError(
|
|
4700
|
+
f"For diagflat, the offset must be int, but got {type(offset)}")
|
|
4693
4701
|
offset_abs = abs(offset)
|
|
4694
4702
|
if input.size == 0:
|
|
4695
4703
|
return zeros((offset_abs, offset_abs), input.dtype)
|
|
@@ -4759,7 +4767,9 @@ def _split_int(x, split_size_or_sections, axis):
|
|
|
4759
4767
|
"""
|
|
4760
4768
|
arr_shape = x.shape
|
|
4761
4769
|
length_along_dim = arr_shape[axis]
|
|
4762
|
-
if
|
|
4770
|
+
if length_along_dim == 0:
|
|
4771
|
+
res = _get_cache_prim(P.Split)(axis)(x)
|
|
4772
|
+
elif split_size_or_sections > length_along_dim:
|
|
4763
4773
|
res = _get_cache_prim(P.Split)(axis, 1)(x)
|
|
4764
4774
|
elif length_along_dim % split_size_or_sections == 0:
|
|
4765
4775
|
sections = length_along_dim // split_size_or_sections
|
|
@@ -4773,7 +4783,7 @@ def _split_int(x, split_size_or_sections, axis):
|
|
|
4773
4783
|
start2 = _tuple_setitem(start1, axis, length1)
|
|
4774
4784
|
size2 = _tuple_setitem(arr_shape, axis, length2)
|
|
4775
4785
|
res = _get_cache_prim(P.Split)(axis, num_sections)(tensor_slice(x, start1, size1)) + \
|
|
4776
|
-
|
|
4786
|
+
_get_cache_prim(P.Split)(axis, 1)(tensor_slice(x, start2, size2))
|
|
4777
4787
|
return res
|
|
4778
4788
|
|
|
4779
4789
|
|
|
@@ -4798,6 +4808,7 @@ def _split_sub_tensors(x, split_size_or_sections, axis):
|
|
|
4798
4808
|
sub_tensors.append(sliced_tensor)
|
|
4799
4809
|
return sub_tensors
|
|
4800
4810
|
|
|
4811
|
+
|
|
4801
4812
|
def split(tensor, split_size_or_sections, axis=0):
|
|
4802
4813
|
"""
|
|
4803
4814
|
Splits the Tensor into chunks along the given axis.
|
|
@@ -4839,7 +4850,8 @@ def split(tensor, split_size_or_sections, axis=0):
|
|
|
4839
4850
|
if not isinstance(tensor, Tensor):
|
|
4840
4851
|
raise TypeError(f'expect `tensor` is a Tensor, but got {type(tensor)}')
|
|
4841
4852
|
if type(axis) is not int:
|
|
4842
|
-
raise TypeError(
|
|
4853
|
+
raise TypeError(
|
|
4854
|
+
f"Type of Argument `axis` should be integer but got {type(axis)}")
|
|
4843
4855
|
arr_axis = _canonicalize_axis(axis, tensor.ndim)
|
|
4844
4856
|
|
|
4845
4857
|
if type(split_size_or_sections) is int:
|
|
@@ -4851,7 +4863,8 @@ def split(tensor, split_size_or_sections, axis=0):
|
|
|
4851
4863
|
elif isinstance(split_size_or_sections, (list, tuple)):
|
|
4852
4864
|
for item in split_size_or_sections:
|
|
4853
4865
|
if type(item) is not int:
|
|
4854
|
-
raise TypeError(
|
|
4866
|
+
raise TypeError(
|
|
4867
|
+
f"Each element in 'split_size_or_sections' should be integer, but got {type(item)}.")
|
|
4855
4868
|
if item < 0:
|
|
4856
4869
|
raise TypeError(f"Each element in 'split_size_or_sections' should be non-negative, "
|
|
4857
4870
|
f"but got {split_size_or_sections}.")
|
|
@@ -4861,10 +4874,11 @@ def split(tensor, split_size_or_sections, axis=0):
|
|
|
4861
4874
|
f"but got {sum(split_size_or_sections)}.")
|
|
4862
4875
|
res = _split_sub_tensors(tensor, split_size_or_sections, arr_axis)
|
|
4863
4876
|
else:
|
|
4864
|
-
raise TypeError(f"Type of Argument `split_size_or_sections` should be integer, tuple(int) or list(int), "
|
|
4877
|
+
raise TypeError(f"Type of Argument `split_size_or_sections` should be integer, tuple(int) or list(int), "
|
|
4865
4878
|
f"but got {type(split_size_or_sections)}")
|
|
4866
4879
|
return tuple(res)
|
|
4867
4880
|
|
|
4881
|
+
|
|
4868
4882
|
def split_ext(tensor, split_size_or_sections, axis=0):
|
|
4869
4883
|
"""
|
|
4870
4884
|
Splits the Tensor into chunks along the given axis.
|
|
@@ -4908,14 +4922,14 @@ def split_ext(tensor, split_size_or_sections, axis=0):
|
|
|
4908
4922
|
elif isinstance(split_size_or_sections, (list, tuple)):
|
|
4909
4923
|
res = split_with_size(tensor, split_size_or_sections, axis)
|
|
4910
4924
|
else:
|
|
4911
|
-
raise TypeError(f"Type of Argument `split_size_or_sections` should be integer, tuple(int) or list(int), "
|
|
4925
|
+
raise TypeError(f"Type of Argument `split_size_or_sections` should be integer, tuple(int) or list(int), "
|
|
4912
4926
|
f"but got {type(split_size_or_sections)}")
|
|
4913
4927
|
return res
|
|
4914
4928
|
|
|
4915
4929
|
|
|
4916
4930
|
def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
|
|
4917
4931
|
"""
|
|
4918
|
-
Returns the lower triangle part of
|
|
4932
|
+
Returns the lower triangle part of `input` (elements that contain the diagonal and below),
|
|
4919
4933
|
and set the other elements to zeros.
|
|
4920
4934
|
|
|
4921
4935
|
Args:
|
|
@@ -4925,13 +4939,13 @@ def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
|
|
|
4925
4939
|
indicating the main diagonal.
|
|
4926
4940
|
|
|
4927
4941
|
Returns:
|
|
4928
|
-
Tensor, the same shape and data type as the input
|
|
4942
|
+
Tensor, the same shape and data type as the `input`.
|
|
4929
4943
|
|
|
4930
4944
|
Raises:
|
|
4931
|
-
TypeError: If `
|
|
4945
|
+
TypeError: If `input` is not a Tensor.
|
|
4932
4946
|
TypeError: If `diagonal` is not an int.
|
|
4933
|
-
TypeError: If the type of `
|
|
4934
|
-
ValueError: If the rank of `
|
|
4947
|
+
TypeError: If the type of `input` is neither number nor bool.
|
|
4948
|
+
ValueError: If the rank of `input` is less than 2.
|
|
4935
4949
|
|
|
4936
4950
|
Supported Platforms:
|
|
4937
4951
|
``Ascend`` ``GPU`` ``CPU``
|
|
@@ -4970,10 +4984,70 @@ def tril(input, diagonal=0): # pylint: disable=redefined-outer-name
|
|
|
4970
4984
|
[10 11 0 0]
|
|
4971
4985
|
[14 15 16 0]]
|
|
4972
4986
|
"""
|
|
4973
|
-
tril_ = Tril(diagonal)
|
|
4987
|
+
tril_ = _get_cache_prim(Tril)(diagonal)
|
|
4974
4988
|
return tril_(input)
|
|
4975
4989
|
|
|
4976
4990
|
|
|
4991
|
+
def tril_ext(input, diagonal=0):
|
|
4992
|
+
"""
|
|
4993
|
+
Returns the lower triangle part of `input` (elements that contain the diagonal and below),
|
|
4994
|
+
and set the other elements to zeros.
|
|
4995
|
+
|
|
4996
|
+
Args:
|
|
4997
|
+
input (Tensor): A Tensor with shape :math:`(x_1, x_2, ..., x_R)`. The rank must be at least 2.
|
|
4998
|
+
Supporting all number types including bool.
|
|
4999
|
+
diagonal (int, optional): An optional attribute indicates the diagonal to consider, default: 0,
|
|
5000
|
+
indicating the main diagonal.
|
|
5001
|
+
|
|
5002
|
+
Returns:
|
|
5003
|
+
Tensor, the same shape and data type as the `input`.
|
|
5004
|
+
|
|
5005
|
+
Raises:
|
|
5006
|
+
TypeError: If `input` is not a Tensor.
|
|
5007
|
+
TypeError: If `diagonal` is not an int.
|
|
5008
|
+
TypeError: If the type of `input` is neither number nor bool.
|
|
5009
|
+
ValueError: If the rank of `input` is less than 2.
|
|
5010
|
+
|
|
5011
|
+
Supported Platforms:
|
|
5012
|
+
``Ascend``
|
|
5013
|
+
|
|
5014
|
+
Examples:
|
|
5015
|
+
>>> import numpy as np
|
|
5016
|
+
>>> from mindspore import Tensor, ops
|
|
5017
|
+
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
5018
|
+
... [ 5, 6, 7, 8],
|
|
5019
|
+
... [10, 11, 12, 13],
|
|
5020
|
+
... [14, 15, 16, 17]]))
|
|
5021
|
+
>>> result = ops.function.array_func.tril_ext(x)
|
|
5022
|
+
>>> print(result)
|
|
5023
|
+
[[ 1 0 0 0]
|
|
5024
|
+
[ 5 6 0 0]
|
|
5025
|
+
[10 11 12 0]
|
|
5026
|
+
[14 15 16 17]]
|
|
5027
|
+
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
5028
|
+
... [ 5, 6, 7, 8],
|
|
5029
|
+
... [10, 11, 12, 13],
|
|
5030
|
+
... [14, 15, 16, 17]]))
|
|
5031
|
+
>>> result = ops.function.array_func.tril_ext(x, diagonal=1)
|
|
5032
|
+
>>> print(result)
|
|
5033
|
+
[[ 1 2 0 0]
|
|
5034
|
+
[ 5 6 7 0]
|
|
5035
|
+
[10 11 12 13]
|
|
5036
|
+
[14 15 16 17]]
|
|
5037
|
+
>>> x = Tensor(np.array([[ 1, 2, 3, 4],
|
|
5038
|
+
... [ 5, 6, 7, 8],
|
|
5039
|
+
... [10, 11, 12, 13],
|
|
5040
|
+
... [14, 15, 16, 17]]))
|
|
5041
|
+
>>> result = ops.function.array_func.tril_ext(x, diagonal=-1)
|
|
5042
|
+
>>> print(result)
|
|
5043
|
+
[[ 0 0 0 0]
|
|
5044
|
+
[ 5 0 0 0]
|
|
5045
|
+
[10 11 0 0]
|
|
5046
|
+
[14 15 16 0]]
|
|
5047
|
+
"""
|
|
5048
|
+
return tril_ext_impl(input, diagonal)
|
|
5049
|
+
|
|
5050
|
+
|
|
4977
5051
|
@_primexpr
|
|
4978
5052
|
def _canonicalize_axis(axis, ndim):
|
|
4979
5053
|
"""
|
|
@@ -4992,7 +5066,8 @@ def _canonicalize_axis(axis, ndim):
|
|
|
4992
5066
|
if not isinstance(ax, int):
|
|
4993
5067
|
raise TypeError(f'axis should be integers, not {type(ax)}')
|
|
4994
5068
|
if not -ndim <= ax < ndim:
|
|
4995
|
-
raise ValueError(
|
|
5069
|
+
raise ValueError(
|
|
5070
|
+
f'axis {ax} is out of bounds for array of dimension {ndim}')
|
|
4996
5071
|
|
|
4997
5072
|
def canonicalizer(ax):
|
|
4998
5073
|
return ax + ndim if ax < 0 else ax
|
|
@@ -5072,7 +5147,9 @@ def _tensor_split_sub_int(x, indices_or_sections, axis):
|
|
|
5072
5147
|
"""
|
|
5073
5148
|
arr_shape = x.shape
|
|
5074
5149
|
length_along_dim = arr_shape[axis]
|
|
5075
|
-
if
|
|
5150
|
+
if length_along_dim == 0:
|
|
5151
|
+
res = _get_cache_prim(P.Split)(axis)(x)
|
|
5152
|
+
elif indices_or_sections > length_along_dim:
|
|
5076
5153
|
res = _get_cache_prim(P.Split)(axis, length_along_dim)(x)
|
|
5077
5154
|
indices_or_sections_n = [length_along_dim, length_along_dim + 1]
|
|
5078
5155
|
res2 = _tensor_split_sub_tensors(x, indices_or_sections_n, axis)
|
|
@@ -5083,14 +5160,16 @@ def _tensor_split_sub_int(x, indices_or_sections, axis):
|
|
|
5083
5160
|
else:
|
|
5084
5161
|
num_long_tensor = length_along_dim % indices_or_sections
|
|
5085
5162
|
num_short_tensor = indices_or_sections - num_long_tensor
|
|
5086
|
-
length1 = num_long_tensor *
|
|
5163
|
+
length1 = num_long_tensor * \
|
|
5164
|
+
(length_along_dim // indices_or_sections + 1)
|
|
5087
5165
|
length2 = length_along_dim - length1
|
|
5088
5166
|
start1 = _list_comprehensions(rank_(x), 0, True)
|
|
5089
5167
|
size1 = _tuple_setitem(arr_shape, axis, length1)
|
|
5090
5168
|
start2 = _tuple_setitem(start1, axis, length1)
|
|
5091
5169
|
size2 = _tuple_setitem(arr_shape, axis, length2)
|
|
5092
5170
|
res = _get_cache_prim(P.Split)(axis, num_long_tensor)(tensor_slice(x, start1, size1)) + \
|
|
5093
|
-
|
|
5171
|
+
_get_cache_prim(P.Split)(axis, num_short_tensor)(
|
|
5172
|
+
tensor_slice(x, start2, size2))
|
|
5094
5173
|
return res
|
|
5095
5174
|
|
|
5096
5175
|
|
|
@@ -5143,21 +5222,25 @@ def tensor_split(input, indices_or_sections, axis=0):
|
|
|
5143
5222
|
raise TypeError(f'expect `x` is a Tensor, but got {type(input)}')
|
|
5144
5223
|
|
|
5145
5224
|
if type(axis) is not int:
|
|
5146
|
-
raise TypeError(
|
|
5225
|
+
raise TypeError(
|
|
5226
|
+
f"Type of Argument `axis` should be integer but got {type(axis)}")
|
|
5147
5227
|
handle_axis = _canonicalize_axis(axis, input.ndim)
|
|
5148
5228
|
if type(indices_or_sections) is int:
|
|
5149
5229
|
if indices_or_sections > 0:
|
|
5150
|
-
res = _tensor_split_sub_int(
|
|
5230
|
+
res = _tensor_split_sub_int(
|
|
5231
|
+
input, indices_or_sections, handle_axis)
|
|
5151
5232
|
else:
|
|
5152
5233
|
raise ValueError(f"For tensor_split, the value of 'indices_or_sections' must be more than zero "
|
|
5153
5234
|
f"but got {indices_or_sections}")
|
|
5154
5235
|
elif isinstance(indices_or_sections, (list, tuple)):
|
|
5155
5236
|
for item in indices_or_sections:
|
|
5156
5237
|
if type(item) is not int:
|
|
5157
|
-
raise TypeError(
|
|
5158
|
-
|
|
5238
|
+
raise TypeError(
|
|
5239
|
+
f"Each element in 'indices_or_sections' should be integer, but got {type(item)}.")
|
|
5240
|
+
res = _tensor_split_sub_tensors(
|
|
5241
|
+
input, indices_or_sections, handle_axis)
|
|
5159
5242
|
else:
|
|
5160
|
-
raise TypeError(f"Type of Argument `indices_or_sections` should be integer, tuple(int) or list(int), "
|
|
5243
|
+
raise TypeError(f"Type of Argument `indices_or_sections` should be integer, tuple(int) or list(int), "
|
|
5161
5244
|
f"but got {type(indices_or_sections)}")
|
|
5162
5245
|
|
|
5163
5246
|
return res
|
|
@@ -5193,7 +5276,8 @@ def vsplit(input, indices_or_sections):
|
|
|
5193
5276
|
if not isinstance(input, Tensor):
|
|
5194
5277
|
raise TypeError(f'expect `x` is a Tensor, but got {type(input)}')
|
|
5195
5278
|
if input.ndim < 1:
|
|
5196
|
-
raise ValueError(
|
|
5279
|
+
raise ValueError(
|
|
5280
|
+
f'vsplit expect `x` is a Tensor with at least 1 dimension, but got {input.ndim}')
|
|
5197
5281
|
return tensor_split(input, indices_or_sections, 0)
|
|
5198
5282
|
|
|
5199
5283
|
|
|
@@ -5229,7 +5313,8 @@ def hsplit(input, indices_or_sections):
|
|
|
5229
5313
|
if not isinstance(input, Tensor):
|
|
5230
5314
|
raise TypeError(f'expect `x` is a Tensor, but got {type(input)}')
|
|
5231
5315
|
if input.ndim < 2:
|
|
5232
|
-
raise ValueError(
|
|
5316
|
+
raise ValueError(
|
|
5317
|
+
f'hsplit expect `x` is a Tensor with at least 2 dimension, but got {input.ndim}')
|
|
5233
5318
|
|
|
5234
5319
|
return tensor_split(input, indices_or_sections, 1)
|
|
5235
5320
|
|
|
@@ -5262,7 +5347,8 @@ def dsplit(input, indices_or_sections):
|
|
|
5262
5347
|
if not isinstance(input, Tensor):
|
|
5263
5348
|
raise TypeError(f'expect `x` is a Tensor, but got {type(input)}')
|
|
5264
5349
|
if input.ndim < 3:
|
|
5265
|
-
raise ValueError(
|
|
5350
|
+
raise ValueError(
|
|
5351
|
+
f'dsplit expect `x` is a Tensor with at least 3 dimension, but got {input.ndim}')
|
|
5266
5352
|
|
|
5267
5353
|
return tensor_split(input, indices_or_sections, 2)
|
|
5268
5354
|
|
|
@@ -5354,7 +5440,8 @@ def max(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
|
|
|
5354
5440
|
if axis is None:
|
|
5355
5441
|
return (max_(input), Tensor(0, dtype=mstype.int64))
|
|
5356
5442
|
if initial is not None and not isinstance(initial, numbers.Number):
|
|
5357
|
-
raise TypeError(
|
|
5443
|
+
raise TypeError(
|
|
5444
|
+
f"For 'max', 'initial' must be a scalar, but got {type(initial)}")
|
|
5358
5445
|
if axis is not None and not isinstance(axis, int):
|
|
5359
5446
|
raise TypeError(f"For 'max', 'axis' must be int, but got {type(axis)}")
|
|
5360
5447
|
input = _init_and_select_elem(input, initial, where, ops.maximum)
|
|
@@ -5408,7 +5495,6 @@ def argmax(input, dim=None, keepdim=False):
|
|
|
5408
5495
|
return out
|
|
5409
5496
|
|
|
5410
5497
|
|
|
5411
|
-
|
|
5412
5498
|
def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylint: disable=redefined-outer-name
|
|
5413
5499
|
"""
|
|
5414
5500
|
Calculates the minimum value along with the given axis for the input tensor. It returns the minimum values and
|
|
@@ -5471,7 +5557,8 @@ def min(input, axis=None, keepdims=False, *, initial=None, where=None): # pylin
|
|
|
5471
5557
|
if axis is None:
|
|
5472
5558
|
return (min_(input), Tensor(0, dtype=mstype.int64))
|
|
5473
5559
|
if initial is not None and not isinstance(initial, numbers.Number):
|
|
5474
|
-
raise TypeError(
|
|
5560
|
+
raise TypeError(
|
|
5561
|
+
f"For 'min', 'initial' must be a scalar, but got {type(initial)}")
|
|
5475
5562
|
if axis is not None and not isinstance(axis, int):
|
|
5476
5563
|
raise TypeError(f"For 'min', 'axis' must be int, but got {type(axis)}")
|
|
5477
5564
|
input = _init_and_select_elem(input, initial, where, ops.minimum)
|
|
@@ -5584,7 +5671,8 @@ def narrow(input, axis, start, length):
|
|
|
5584
5671
|
validator.check_value_type("input", input, Tensor, "narrow")
|
|
5585
5672
|
validator.check_axis_in_range(axis, input.ndim)
|
|
5586
5673
|
validator.check_int_range(start, 0, input.shape[axis], validator.INC_LEFT)
|
|
5587
|
-
validator.check_int_range(
|
|
5674
|
+
validator.check_int_range(
|
|
5675
|
+
length, 1, input.shape[axis] - start, validator.INC_BOTH)
|
|
5588
5676
|
|
|
5589
5677
|
begins = [0] * input.ndim
|
|
5590
5678
|
begins[axis] = start
|
|
@@ -5631,7 +5719,7 @@ def narrow_ext(input, dim, start, length):
|
|
|
5631
5719
|
[ 8 9]]
|
|
5632
5720
|
"""
|
|
5633
5721
|
validator.check_value_type("input", input, Tensor, "narrow")
|
|
5634
|
-
return slice_ext_op(input, dim, start, start+length, 1)
|
|
5722
|
+
return slice_ext_op(input, dim, start, start + length, 1)
|
|
5635
5723
|
|
|
5636
5724
|
|
|
5637
5725
|
def topk(input, k, dim=None, largest=True, sorted=True):
|
|
@@ -5825,7 +5913,8 @@ def _check_unfold_params(param, param_name, param_size):
|
|
|
5825
5913
|
"""Check the parameters of unfold op."""
|
|
5826
5914
|
validator.check_value_type(param_name, param, [int, tuple, list], 'unfold')
|
|
5827
5915
|
param = (param, param) if isinstance(param, int) else param
|
|
5828
|
-
validator.check(param_name + " size", len(param), "",
|
|
5916
|
+
validator.check(param_name + " size", len(param), "",
|
|
5917
|
+
param_size, validator.IN, 'unfold')
|
|
5829
5918
|
if param_name == "padding":
|
|
5830
5919
|
validator.check_non_negative_int_sequence(param, param_name, 'unfold')
|
|
5831
5920
|
else:
|
|
@@ -5928,7 +6017,8 @@ def _check_diagonal_axes(dim1, dim2, x_ndim):
|
|
|
5928
6017
|
def _check_is_tensor(param_name, input, cls_name):
|
|
5929
6018
|
"""Returns True if input is Tensor."""
|
|
5930
6019
|
if not isinstance(input, Tensor):
|
|
5931
|
-
raise TypeError(
|
|
6020
|
+
raise TypeError(
|
|
6021
|
+
f"For {cls_name}, {param_name} must be a Tensor, but got {type(input)}.")
|
|
5932
6022
|
|
|
5933
6023
|
|
|
5934
6024
|
@_primexpr
|
|
@@ -6241,19 +6331,22 @@ def column_stack(tensors):
|
|
|
6241
6331
|
[1 2]]
|
|
6242
6332
|
"""
|
|
6243
6333
|
if not isinstance(tensors, (list, tuple)):
|
|
6244
|
-
raise TypeError(
|
|
6334
|
+
raise TypeError(
|
|
6335
|
+
f"For column_stack, the input must be list or tuple of tensors, but got {type(tensors)}.")
|
|
6245
6336
|
|
|
6246
6337
|
trans_x = ()
|
|
6247
6338
|
for tensor in tensors:
|
|
6248
6339
|
if not isinstance(tensor, Tensor):
|
|
6249
|
-
raise TypeError(
|
|
6340
|
+
raise TypeError(
|
|
6341
|
+
f"For column_stack, the input element must be tensor, but got {type(tensor)}.")
|
|
6250
6342
|
if tensor.ndim < 1:
|
|
6251
6343
|
tensor = expand_dims(tensor, 0)
|
|
6252
6344
|
if tensor.ndim == 1:
|
|
6253
6345
|
tensor = expand_dims(tensor, 1)
|
|
6254
6346
|
trans_x += (tensor,)
|
|
6255
6347
|
if not trans_x:
|
|
6256
|
-
raise ValueError(
|
|
6348
|
+
raise ValueError(
|
|
6349
|
+
f"For column_stack, the input must have at least 1 tensor, but got 0.")
|
|
6257
6350
|
_concat = _get_cache_prim(P.Concat)(1)
|
|
6258
6351
|
return _concat(trans_x)
|
|
6259
6352
|
|
|
@@ -6289,17 +6382,20 @@ def hstack(tensors):
|
|
|
6289
6382
|
[1. 1. 1. 2. 2. 2.]
|
|
6290
6383
|
"""
|
|
6291
6384
|
if not isinstance(tensors, (list, tuple)):
|
|
6292
|
-
raise TypeError(
|
|
6385
|
+
raise TypeError(
|
|
6386
|
+
f"For hstack, the input must be list or tuple, but got {type(tensors)}.")
|
|
6293
6387
|
|
|
6294
6388
|
tuple_of_tensor = ()
|
|
6295
6389
|
for tensor in tensors:
|
|
6296
6390
|
if not isinstance(tensor, Tensor):
|
|
6297
|
-
raise TypeError(
|
|
6391
|
+
raise TypeError(
|
|
6392
|
+
f"For hstack, the input element must be tensor, but got {type(tensor)}.")
|
|
6298
6393
|
if tensor.ndim < 1:
|
|
6299
6394
|
tensor = expand_dims(tensor, 0)
|
|
6300
6395
|
tuple_of_tensor += (tensor,)
|
|
6301
6396
|
if not tuple_of_tensor:
|
|
6302
|
-
raise ValueError(
|
|
6397
|
+
raise ValueError(
|
|
6398
|
+
"For hstack, the input must have at least 1 tensor, but got 0.")
|
|
6303
6399
|
if tuple_of_tensor[0].ndim <= 1:
|
|
6304
6400
|
_concat = _get_cache_prim(P.Concat)(0)
|
|
6305
6401
|
return _concat(tuple_of_tensor)
|
|
@@ -6328,7 +6424,8 @@ def _get_moved_perm(ndim, source, destination):
|
|
|
6328
6424
|
Helper function for movedim, returns permutation after moving axis
|
|
6329
6425
|
from source to destination.
|
|
6330
6426
|
"""
|
|
6331
|
-
dest_sorted_idx = [i for i, _ in sorted(
|
|
6427
|
+
dest_sorted_idx = [i for i, _ in sorted(
|
|
6428
|
+
enumerate(destination), key=operator.itemgetter(1))]
|
|
6332
6429
|
axis_orig = [i for i in builtins.range(0, ndim) if i not in source]
|
|
6333
6430
|
|
|
6334
6431
|
k = 0
|
|
@@ -6455,7 +6552,8 @@ def swapaxes(input, axis0, axis1):
|
|
|
6455
6552
|
(4, 3, 2)
|
|
6456
6553
|
'''
|
|
6457
6554
|
if not isinstance(input, Tensor):
|
|
6458
|
-
raise TypeError(
|
|
6555
|
+
raise TypeError(
|
|
6556
|
+
f'For ops.swapaxes, parameter `input` must be Tensor, but got {type(input)}')
|
|
6459
6557
|
|
|
6460
6558
|
axis0, axis1 = _check_swapaxes_axis((axis0, axis1), input.ndim)
|
|
6461
6559
|
if axis0 == axis1:
|
|
@@ -6466,10 +6564,10 @@ def swapaxes(input, axis0, axis1):
|
|
|
6466
6564
|
perm = ops.make_range(0, input.ndim)
|
|
6467
6565
|
if axis1 + 1 < input.ndim:
|
|
6468
6566
|
new_perm = perm[0:axis0] + perm[axis1:axis1 + 1] + \
|
|
6469
|
-
|
|
6567
|
+
perm[axis0 + 1:axis1] + perm[axis0:axis0 + 1] + perm[axis1 + 1:]
|
|
6470
6568
|
else:
|
|
6471
6569
|
new_perm = perm[0:axis0] + perm[axis1:axis1 + 1] + \
|
|
6472
|
-
|
|
6570
|
+
perm[axis0 + 1:axis1] + perm[axis0:axis0 + 1]
|
|
6473
6571
|
|
|
6474
6572
|
return transpose_(input, new_perm)
|
|
6475
6573
|
|
|
@@ -6515,13 +6613,15 @@ def _check_is_int(arg_value, arg_name, op_name):
|
|
|
6515
6613
|
|
|
6516
6614
|
@_primexpr
|
|
6517
6615
|
def _check_positive_int(arg_value, arg_name, op_name):
|
|
6518
|
-
arg_value = validator.check_int_range(
|
|
6616
|
+
arg_value = validator.check_int_range(
|
|
6617
|
+
arg_value, 0, 2147483647, validator.INC_RIGHT, arg_name, op_name)
|
|
6519
6618
|
return arg_value
|
|
6520
6619
|
|
|
6521
6620
|
|
|
6522
6621
|
@constexpr
|
|
6523
6622
|
def _check_axis_range(arg_value, limit, arg_name, op_name):
|
|
6524
|
-
arg_value = validator.check_int_range(
|
|
6623
|
+
arg_value = validator.check_int_range(
|
|
6624
|
+
arg_value, -limit, limit, validator.INC_LEFT, arg_name, op_name)
|
|
6525
6625
|
return arg_value
|
|
6526
6626
|
|
|
6527
6627
|
|
|
@@ -6539,6 +6639,14 @@ def _cal_reshape(x_shape, rep, axis):
|
|
|
6539
6639
|
return tuple(x_reshape)
|
|
6540
6640
|
|
|
6541
6641
|
|
|
6642
|
+
@_primexpr
|
|
6643
|
+
def _check_rank_range(x_rank, limit, arg_name, op_name):
|
|
6644
|
+
if x_rank > limit:
|
|
6645
|
+
raise ValueError(
|
|
6646
|
+
f"For {op_name}, the rank of {arg_name} should be less than or equal to {limit}, but got {x_rank}.")
|
|
6647
|
+
return x_rank
|
|
6648
|
+
|
|
6649
|
+
|
|
6542
6650
|
def repeat_interleave(input, repeats, axis=None):
|
|
6543
6651
|
"""
|
|
6544
6652
|
Repeat elements of a tensor along an axis, like `numpy.repeat`.
|
|
@@ -6583,6 +6691,9 @@ def repeat_interleave_ext(input, repeats, dim=None, output_size=None):
|
|
|
6583
6691
|
r"""
|
|
6584
6692
|
Repeat elements of a tensor along an axis, like `numpy.repeat`.
|
|
6585
6693
|
|
|
6694
|
+
.. warning::
|
|
6695
|
+
Only support on Atlas A2 training series.
|
|
6696
|
+
|
|
6586
6697
|
Args:
|
|
6587
6698
|
input (Tensor): The tensor to repeat values for. Must be of type: float16,
|
|
6588
6699
|
float32, int8, uint8, int16, int32, or int64.
|
|
@@ -6621,9 +6732,13 @@ def repeat_elements(x, rep, axis=0):
|
|
|
6621
6732
|
"""
|
|
6622
6733
|
Repeat elements of a tensor along an axis, like `numpy.repeat` .
|
|
6623
6734
|
|
|
6735
|
+
Note:
|
|
6736
|
+
It is recommended to use :func:'mindspore.mint.repeat_interleave', the dimension of input 'x' can support
|
|
6737
|
+
a maximum of 8, and get better performance.
|
|
6738
|
+
|
|
6624
6739
|
Args:
|
|
6625
|
-
x (Tensor): The tensor to repeat values for. Must be of type: float16,
|
|
6626
|
-
|
|
6740
|
+
x (Tensor): The tensor to repeat values for. Must be of type: float16, float32, int8, uint8, int16, int32,
|
|
6741
|
+
or int64. The rank of `x` must be less than or equal to 7.
|
|
6627
6742
|
rep (int): The number of times to repeat, must be positive.
|
|
6628
6743
|
axis (int): The axis along which to repeat. Default: 0.
|
|
6629
6744
|
|
|
@@ -6632,6 +6747,9 @@ def repeat_elements(x, rep, axis=0):
|
|
|
6632
6747
|
:math:`(s1, s2, ..., sn)` and axis is i, the output will have shape :math:`(s1, s2, ..., si * rep, ..., sn)`.
|
|
6633
6748
|
The output type will be the same as the type of `x`.
|
|
6634
6749
|
|
|
6750
|
+
Raises:
|
|
6751
|
+
ValueError: If the rank of `x` is greater than 7.
|
|
6752
|
+
|
|
6635
6753
|
Supported Platforms:
|
|
6636
6754
|
``Ascend`` ``GPU`` ``CPU``
|
|
6637
6755
|
|
|
@@ -6658,6 +6776,7 @@ def repeat_elements(x, rep, axis=0):
|
|
|
6658
6776
|
rep = _check_positive_int(rep, "rep", "repeat_elements")
|
|
6659
6777
|
axis = _check_is_int(axis, "axis", "repeat_elements")
|
|
6660
6778
|
x_rank = rank_(x)
|
|
6779
|
+
x_rank = _check_rank_range(x_rank, 7, "x", "repeat_elements")
|
|
6661
6780
|
axis = _check_axis_range(axis, x_rank, "axis", "repeat_elements")
|
|
6662
6781
|
axis = axis + x.ndim if axis < 0 else axis
|
|
6663
6782
|
expand_axis = axis + 1
|
|
@@ -6722,7 +6841,8 @@ def sequence_mask(lengths, maxlen=None):
|
|
|
6722
6841
|
[[ True True False False ]
|
|
6723
6842
|
[ True True True True ]]]
|
|
6724
6843
|
"""
|
|
6725
|
-
const_utils.check_type_valid(
|
|
6844
|
+
const_utils.check_type_valid(
|
|
6845
|
+
ops.dtype(lengths), [mstype.int64, mstype.int32], 'lengths')
|
|
6726
6846
|
|
|
6727
6847
|
if maxlen is None:
|
|
6728
6848
|
flatten_data = reshape_(lengths, (-1,))
|
|
@@ -6733,7 +6853,8 @@ def sequence_mask(lengths, maxlen=None):
|
|
|
6733
6853
|
maxlen = _check_positive_int(maxlen, "maxlen", "sequence_mask")
|
|
6734
6854
|
maxlen = scalar_to_tensor_(maxlen, mstype.int32)
|
|
6735
6855
|
|
|
6736
|
-
range_vector = range_(scalar_to_tensor_(0, mstype.int32),
|
|
6856
|
+
range_vector = range_(scalar_to_tensor_(0, mstype.int32),
|
|
6857
|
+
maxlen, scalar_to_tensor_(1, mstype.int32))
|
|
6737
6858
|
mask = expand_dims(lengths, -1)
|
|
6738
6859
|
result = range_vector < mask
|
|
6739
6860
|
return result
|
|
@@ -6747,6 +6868,221 @@ def top_k(input_x, k, sorted=True):
|
|
|
6747
6868
|
return top_k_(input_x, k)
|
|
6748
6869
|
|
|
6749
6870
|
|
|
6871
|
+
def gather_ext(input, dim, index):
|
|
6872
|
+
r"""
|
|
6873
|
+
Gather data from a tensor by indices.
|
|
6874
|
+
|
|
6875
|
+
.. math::
|
|
6876
|
+
output[(i_0, i_1, ..., i_{dim}, i_{dim+1}, ..., i_n)] =
|
|
6877
|
+
input[(i_0, i_1, ..., index[(i_0, i_1, ..., i_{dim}, i_{dim+1}, ..., i_n)], i_{dim+1}, ..., i_n)]
|
|
6878
|
+
|
|
6879
|
+
.. warning::
|
|
6880
|
+
On Ascend, the behavior is unpredictable in the following cases:
|
|
6881
|
+
|
|
6882
|
+
- the value of `index` is not in the range `[-input.shape[dim], input.shape[dim])` in forward;
|
|
6883
|
+
- the value of `index` is not in the range `[0, input.shape[dim])` in backward.
|
|
6884
|
+
|
|
6885
|
+
Args:
|
|
6886
|
+
input (Tensor): The target tensor to gather values.
|
|
6887
|
+
dim (int): the axis to index along, must be in range `[-input.rank, input.rank)`.
|
|
6888
|
+
index (Tensor): The index tensor, with int32 or int64 data type. An valid `index` should be:
|
|
6889
|
+
|
|
6890
|
+
- `index.rank == input.rank`;
|
|
6891
|
+
- for `axis != dim`, `index.shape[axis] <= input.shape[axis]`;
|
|
6892
|
+
- the value of `index` is in range `[-input.shape[dim], input.shape[dim])`.
|
|
6893
|
+
|
|
6894
|
+
Returns:
|
|
6895
|
+
Tensor, has the same type as `input` and the same shape as `index`.
|
|
6896
|
+
|
|
6897
|
+
Raises:
|
|
6898
|
+
ValueError: If the shape of `index` is illegal.
|
|
6899
|
+
ValueError: If `dim` is not in `[-input.rank, input.rank)`.
|
|
6900
|
+
ValueError: If the value of `index` is out of the valid range.
|
|
6901
|
+
TypeError: If the type of `index` is illegal.
|
|
6902
|
+
|
|
6903
|
+
Supported Platforms:
|
|
6904
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
6905
|
+
|
|
6906
|
+
Examples:
|
|
6907
|
+
>>> import mindspore
|
|
6908
|
+
>>> import numpy as np
|
|
6909
|
+
>>> from mindspore import Tensor, ops
|
|
6910
|
+
>>> from mindspore.ops.function.array_func import gather_ext
|
|
6911
|
+
>>> input = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
|
|
6912
|
+
>>> index = Tensor(np.array([[0, 0], [1, 1]]), mindspore.int32)
|
|
6913
|
+
>>> output = gather_ext(input, 1, index)
|
|
6914
|
+
>>> print(output)
|
|
6915
|
+
[[-0.1 -0.1]
|
|
6916
|
+
[0.5 0.5]]
|
|
6917
|
+
"""
|
|
6918
|
+
return gather_d_op(input, dim, index)
|
|
6919
|
+
|
|
6920
|
+
|
|
6921
|
+
def max_ext(input, dim=None, keepdim=False):
|
|
6922
|
+
"""
|
|
6923
|
+
Calculates the maximum value along with the given dimension for the input tensor.
|
|
6924
|
+
|
|
6925
|
+
Args:
|
|
6926
|
+
input (Tensor): The input tensor, can be any dimension. Complex tensor is not supported for now.
|
|
6927
|
+
dim (int, optional): The dimension to reduce. Default: ``None`` .
|
|
6928
|
+
keepdim (bool, optional): Whether to reduce dimension, if true, the output will keep same dimension
|
|
6929
|
+
with the input, the output will reduce dimension if false. Default: ``False`` .
|
|
6930
|
+
|
|
6931
|
+
Returns:
|
|
6932
|
+
Tensor if `dim` is the default value ``None`` , the maximum value of input tensor, with the shape :math:`()` ,
|
|
6933
|
+
and same dtype as `input`.
|
|
6934
|
+
|
|
6935
|
+
tuple (Tensor) if `dim` is not the default value ``None`` , tuple of 2 tensors, containing the maximum
|
|
6936
|
+
value of the input tensor along the given dimension `dim` and the corresponding index.
|
|
6937
|
+
|
|
6938
|
+
- **values (Tensor)** - The maximum value of input tensor along the given dimension `dim`, with same dtype as
|
|
6939
|
+
`input`. If `keepdim` is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ...,
|
|
6940
|
+
input_{axis-1}, 1, input_{axis+1}, ..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ...,
|
|
6941
|
+
input_{axis-1}, input_{axis+1}, ..., input_N)` .
|
|
6942
|
+
- **index (Tensor)** - The index for the maximum value of the input tensor along the given dimension `dim`, with
|
|
6943
|
+
the same shape as `values`.
|
|
6944
|
+
|
|
6945
|
+
Raises:
|
|
6946
|
+
ValueError: If `dim` is the default value ``None`` and `keepdim` is not ``False`` .
|
|
6947
|
+
|
|
6948
|
+
Supported Platforms:
|
|
6949
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
6950
|
+
|
|
6951
|
+
Examples:
|
|
6952
|
+
>>> import mindspore
|
|
6953
|
+
>>> import numpy as np
|
|
6954
|
+
>>> from mindspore import Tensor, ops
|
|
6955
|
+
>>> from mindspore.ops.function.array_func import max_ext
|
|
6956
|
+
>>> y = Tensor(np.array([[0.0, 0.3, 0.4, 0.5, 0.1],
|
|
6957
|
+
... [3.2, 0.4, 0.1, 2.9, 4.0]]), mindspore.float32)
|
|
6958
|
+
>>> output, index = max_ext(y, 0, True)
|
|
6959
|
+
>>> print(output, index)
|
|
6960
|
+
[[3.2 0.4 0.4 2.9 4. ]] [[1 1 0 1 1]]
|
|
6961
|
+
"""
|
|
6962
|
+
if dim is None:
|
|
6963
|
+
if keepdim is not False:
|
|
6964
|
+
raise ValueError(
|
|
6965
|
+
f"For 'max', the `keepdim` must be False when the `dim` is None, but got {keepdim}")
|
|
6966
|
+
return max_(input)
|
|
6967
|
+
argmax_with_value_op = _get_cache_prim(ArgMaxWithValue)(dim, keepdim)
|
|
6968
|
+
indices, values = argmax_with_value_op(input)
|
|
6969
|
+
return values, indices
|
|
6970
|
+
|
|
6971
|
+
|
|
6972
|
+
def min_ext(input, dim=None, keepdim=False):
|
|
6973
|
+
"""
|
|
6974
|
+
Calculates the minimum value along with the given dimension for the input tensor.
|
|
6975
|
+
|
|
6976
|
+
Args:
|
|
6977
|
+
input (Tensor): The input tensor, can be any dimension. Complex tensor is not supported for now.
|
|
6978
|
+
dim (int, optional): The dimension to reduce. Default: ``None`` .
|
|
6979
|
+
keepdim (bool, optional): Whether to reduce dimension, if true, the output will keep same dimension
|
|
6980
|
+
with the input, the output will reduce dimension if false. Default: ``False`` .
|
|
6981
|
+
|
|
6982
|
+
Returns:
|
|
6983
|
+
Tensor if `dim` is the default value ``None`` , the minimum value of input tensor, with the shape :math:`()` ,
|
|
6984
|
+
and same dtype as `input`.
|
|
6985
|
+
|
|
6986
|
+
tuple (Tensor) if `dim` is not the default value ``None`` , tuple of 2 tensors, containing the minimum value
|
|
6987
|
+
of the input tensor along the given dimension `dim` and the corresponding index.
|
|
6988
|
+
|
|
6989
|
+
- **values (Tensor)** - The minimum value of input tensor along the given dimension `dim`, with same dtype as
|
|
6990
|
+
`input`. If `keepdim` is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ...,
|
|
6991
|
+
input_{axis-1}, 1, input_{axis+1}, ..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ...,
|
|
6992
|
+
input_{axis-1}, input_{axis+1}, ..., input_N)` .
|
|
6993
|
+
- **index (Tensor)** - The index for the minimum value of the input tensor along the given dimension `dim`,
|
|
6994
|
+
with the same shape as `values`.
|
|
6995
|
+
|
|
6996
|
+
Raises:
|
|
6997
|
+
ValueError: If `dim` is the default value ``None`` and `keepdim` is not ``False`` .
|
|
6998
|
+
|
|
6999
|
+
Supported Platforms:
|
|
7000
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
7001
|
+
|
|
7002
|
+
Examples:
|
|
7003
|
+
>>> import mindspore
|
|
7004
|
+
>>> import numpy as np
|
|
7005
|
+
>>> from mindspore import Tensor, ops
|
|
7006
|
+
>>> from mindspore.ops.function.array_func import min_ext
|
|
7007
|
+
>>> x = Tensor(np.array([0.0, 0.4, 0.6, 0.7, 0.1]), mindspore.float32)
|
|
7008
|
+
>>> output, index = min_ext(x, 0, keepdim=True)
|
|
7009
|
+
>>> print(output, index)
|
|
7010
|
+
[0.0] [0]
|
|
7011
|
+
"""
|
|
7012
|
+
if dim is None:
|
|
7013
|
+
if keepdim is not False:
|
|
7014
|
+
raise ValueError(
|
|
7015
|
+
f"For 'min', the `keepdim` must be False when the `dim` is None, but got {keepdim}")
|
|
7016
|
+
return min_(input)
|
|
7017
|
+
argmin_with_value_op = _get_cache_prim(ArgMinWithValue)(dim, keepdim)
|
|
7018
|
+
indices, values = argmin_with_value_op(input)
|
|
7019
|
+
return values, indices
|
|
7020
|
+
|
|
7021
|
+
|
|
7022
|
+
def one_hot_ext(tensor, num_classes):
|
|
7023
|
+
r"""
|
|
7024
|
+
Computes a one-hot tensor.
|
|
7025
|
+
|
|
7026
|
+
The locations represented by tensor in `tensor` take value `1`, while all
|
|
7027
|
+
other locations take value `0`.
|
|
7028
|
+
|
|
7029
|
+
Args:
|
|
7030
|
+
tensor (Tensor): A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
|
|
7031
|
+
Data type must be int32 or int64.
|
|
7032
|
+
num_classes (int): A scalar defining the depth of the one-hot dimension.
|
|
7033
|
+
|
|
7034
|
+
Returns:
|
|
7035
|
+
Tensor, one-hot tensor.
|
|
7036
|
+
|
|
7037
|
+
Raises:
|
|
7038
|
+
TypeError: If `num_classes` is not an int.
|
|
7039
|
+
TypeError: If dtype of `tensor` is not int32 or int64.
|
|
7040
|
+
ValueError: If `num_classes` is less than 0.
|
|
7041
|
+
|
|
7042
|
+
Supported Platforms:
|
|
7043
|
+
``Ascend`` ``GPU`` ``CPU``
|
|
7044
|
+
|
|
7045
|
+
Examples:
|
|
7046
|
+
>>> import mindspore
|
|
7047
|
+
>>> import numpy as np
|
|
7048
|
+
>>> from mindspore import ops
|
|
7049
|
+
>>> from mindspore import Tensor
|
|
7050
|
+
>>> from mindspore.ops.function.array_func import one_hot_ext
|
|
7051
|
+
>>> tensor = Tensor(np.array([0, 1, 2]), mindspore.int32)
|
|
7052
|
+
>>> num_classes = 3
|
|
7053
|
+
>>> output = one_hot_ext(tensor, num_classes)
|
|
7054
|
+
>>> print(output)
|
|
7055
|
+
[[1. 0. 0.]
|
|
7056
|
+
[0. 1. 0.]
|
|
7057
|
+
[0. 0. 1.]]
|
|
7058
|
+
"""
|
|
7059
|
+
on_value = Tensor(1, dtype=tensor.dtype)
|
|
7060
|
+
off_value = Tensor(0, dtype=tensor.dtype)
|
|
7061
|
+
return one_hot_ext_impl(tensor, num_classes, on_value, off_value, -1)
|
|
7062
|
+
|
|
7063
|
+
|
|
7064
|
+
def from_numpy(array):
|
|
7065
|
+
r"""
|
|
7066
|
+
Convert numpy array to Tensor.
|
|
7067
|
+
If the data is not C contiguous, the data will be copied to C contiguous to construct the tensor.
|
|
7068
|
+
Otherwise, the tensor will be constructed using this numpy array without copy.
|
|
7069
|
+
|
|
7070
|
+
Args:
|
|
7071
|
+
array (numpy.array): The input array.
|
|
7072
|
+
|
|
7073
|
+
Returns:
|
|
7074
|
+
Tensor, has the same data type as input array.
|
|
7075
|
+
|
|
7076
|
+
Examples:
|
|
7077
|
+
>>> import numpy as np
|
|
7078
|
+
>>> import mindspore as ms
|
|
7079
|
+
>>> x = np.array([1, 2])
|
|
7080
|
+
>>> output = ms.from_numpy(x)
|
|
7081
|
+
>>> print(output)
|
|
7082
|
+
[1 2]
|
|
7083
|
+
"""
|
|
7084
|
+
return Tensor.from_numpy(array)
|
|
7085
|
+
|
|
6750
7086
|
__all__ = [
|
|
6751
7087
|
'unique',
|
|
6752
7088
|
'unique_with_pad',
|
|
@@ -6763,6 +7099,7 @@ __all__ = [
|
|
|
6763
7099
|
'ones_like',
|
|
6764
7100
|
'zeros',
|
|
6765
7101
|
'zeros_like',
|
|
7102
|
+
'zero_',
|
|
6766
7103
|
'shape',
|
|
6767
7104
|
'shape_',
|
|
6768
7105
|
'reverse',
|