mindspore 2.3.0rc1__cp39-cp39-manylinux1_x86_64.whl → 2.3.0rc2__cp39-cp39-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (226) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +1 -1
  3. mindspore/_akg/akg/utils/tbe_codegen_utils.py +13 -3
  4. mindspore/_c_dataengine.cpython-39-x86_64-linux-gnu.so +0 -0
  5. mindspore/_c_expression.cpython-39-x86_64-linux-gnu.so +0 -0
  6. mindspore/_checkparam.py +20 -0
  7. mindspore/_extends/parse/parser.py +1 -1
  8. mindspore/_extends/parse/standard_method.py +6 -5
  9. mindspore/_mindspore_offline_debug.cpython-39-x86_64-linux-gnu.so +0 -0
  10. mindspore/amp.py +5 -5
  11. mindspore/bin/cache_admin +0 -0
  12. mindspore/bin/cache_server +0 -0
  13. mindspore/boost/boost_cell_wrapper.py +1 -1
  14. mindspore/boost/group_loss_scale_manager.py +1 -1
  15. mindspore/common/__init__.py +4 -2
  16. mindspore/common/_register_for_recompute.py +48 -0
  17. mindspore/common/_stub_tensor.py +1 -0
  18. mindspore/common/api.py +56 -4
  19. mindspore/common/dtype.py +5 -3
  20. mindspore/common/dump.py +2 -2
  21. mindspore/common/hook_handle.py +51 -4
  22. mindspore/common/initializer.py +1 -1
  23. mindspore/common/jit_config.py +17 -6
  24. mindspore/common/parameter.py +7 -2
  25. mindspore/common/recompute.py +247 -0
  26. mindspore/common/sparse_tensor.py +2 -2
  27. mindspore/common/symbol.py +1 -1
  28. mindspore/common/tensor.py +74 -36
  29. mindspore/communication/__init__.py +3 -3
  30. mindspore/communication/management.py +30 -30
  31. mindspore/context.py +28 -15
  32. mindspore/dataset/__init__.py +5 -5
  33. mindspore/dataset/audio/__init__.py +2 -2
  34. mindspore/dataset/audio/transforms.py +51 -51
  35. mindspore/dataset/callback/ds_callback.py +2 -2
  36. mindspore/dataset/engine/cache_client.py +1 -1
  37. mindspore/dataset/engine/datasets.py +3 -3
  38. mindspore/dataset/engine/datasets_audio.py +14 -14
  39. mindspore/dataset/engine/datasets_standard_format.py +3 -3
  40. mindspore/dataset/engine/datasets_text.py +38 -38
  41. mindspore/dataset/engine/datasets_user_defined.py +3 -3
  42. mindspore/dataset/engine/datasets_vision.py +68 -68
  43. mindspore/dataset/text/__init__.py +3 -3
  44. mindspore/dataset/text/transforms.py +26 -26
  45. mindspore/dataset/transforms/__init__.py +1 -1
  46. mindspore/dataset/vision/__init__.py +3 -3
  47. mindspore/dataset/vision/transforms.py +92 -92
  48. mindspore/dataset/vision/utils.py +1 -1
  49. mindspore/experimental/optim/adadelta.py +2 -2
  50. mindspore/experimental/optim/adagrad.py +2 -2
  51. mindspore/experimental/optim/adam.py +2 -2
  52. mindspore/experimental/optim/adamax.py +2 -2
  53. mindspore/experimental/optim/adamw.py +2 -2
  54. mindspore/experimental/optim/asgd.py +2 -2
  55. mindspore/experimental/optim/lr_scheduler.py +24 -20
  56. mindspore/experimental/optim/nadam.py +2 -2
  57. mindspore/experimental/optim/optimizer.py +1 -1
  58. mindspore/experimental/optim/radam.py +2 -2
  59. mindspore/experimental/optim/rmsprop.py +2 -2
  60. mindspore/experimental/optim/rprop.py +2 -2
  61. mindspore/experimental/optim/sgd.py +2 -2
  62. mindspore/hal/stream.py +2 -0
  63. mindspore/include/mindapi/base/types.h +5 -0
  64. mindspore/lib/libdnnl.so.2 +0 -0
  65. mindspore/lib/libmindspore.so +0 -0
  66. mindspore/lib/libmindspore_backend.so +0 -0
  67. mindspore/lib/libmindspore_common.so +0 -0
  68. mindspore/lib/libmindspore_core.so +0 -0
  69. mindspore/lib/libmindspore_glog.so.0 +0 -0
  70. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  71. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  72. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  73. mindspore/lib/libmindspore_shared_lib.so +0 -0
  74. mindspore/lib/libopencv_core.so.4.5 +0 -0
  75. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  76. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  77. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  78. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6 -6
  79. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  80. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  81. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  82. mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
  83. mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
  84. mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
  85. mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
  86. mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
  87. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  88. mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
  89. mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
  90. mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
  91. mindspore/log.py +2 -2
  92. mindspore/mint/__init__.py +457 -0
  93. mindspore/mint/nn/__init__.py +430 -0
  94. mindspore/mint/nn/functional.py +424 -0
  95. mindspore/mint/optim/__init__.py +24 -0
  96. mindspore/mint/optim/adamw.py +186 -0
  97. mindspore/multiprocessing/__init__.py +4 -0
  98. mindspore/nn/__init__.py +3 -0
  99. mindspore/nn/cell.py +51 -47
  100. mindspore/nn/extend/__init__.py +29 -0
  101. mindspore/nn/extend/basic.py +140 -0
  102. mindspore/nn/extend/embedding.py +143 -0
  103. mindspore/nn/extend/layer/__init__.py +27 -0
  104. mindspore/nn/extend/layer/normalization.py +107 -0
  105. mindspore/nn/extend/pooling.py +117 -0
  106. mindspore/nn/generator.py +297 -0
  107. mindspore/nn/layer/basic.py +109 -1
  108. mindspore/nn/layer/container.py +2 -2
  109. mindspore/nn/layer/conv.py +6 -6
  110. mindspore/nn/layer/embedding.py +1 -1
  111. mindspore/nn/layer/normalization.py +21 -43
  112. mindspore/nn/layer/padding.py +4 -0
  113. mindspore/nn/optim/ada_grad.py +2 -2
  114. mindspore/nn/optim/adadelta.py +1 -1
  115. mindspore/nn/optim/adafactor.py +1 -1
  116. mindspore/nn/optim/adam.py +7 -7
  117. mindspore/nn/optim/adamax.py +2 -2
  118. mindspore/nn/optim/adasum.py +2 -2
  119. mindspore/nn/optim/asgd.py +2 -2
  120. mindspore/nn/optim/ftrl.py +1 -1
  121. mindspore/nn/optim/lamb.py +3 -3
  122. mindspore/nn/optim/lars.py +1 -1
  123. mindspore/nn/optim/lazyadam.py +2 -2
  124. mindspore/nn/optim/momentum.py +2 -2
  125. mindspore/nn/optim/optimizer.py +2 -2
  126. mindspore/nn/optim/proximal_ada_grad.py +2 -2
  127. mindspore/nn/optim/rmsprop.py +2 -2
  128. mindspore/nn/optim/rprop.py +2 -2
  129. mindspore/nn/optim/sgd.py +2 -2
  130. mindspore/nn/optim/thor.py +2 -2
  131. mindspore/nn/wrap/cell_wrapper.py +9 -9
  132. mindspore/nn/wrap/grad_reducer.py +5 -5
  133. mindspore/ops/_grad_experimental/grad_comm_ops.py +4 -2
  134. mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -2
  135. mindspore/ops/_vmap/vmap_math_ops.py +27 -8
  136. mindspore/ops/_vmap/vmap_nn_ops.py +66 -8
  137. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +73 -1
  138. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +12 -3
  139. mindspore/ops/auto_generate/gen_arg_handler.py +24 -0
  140. mindspore/ops/auto_generate/gen_extend_func.py +274 -0
  141. mindspore/ops/auto_generate/gen_ops_def.py +889 -22
  142. mindspore/ops/auto_generate/gen_ops_prim.py +3541 -253
  143. mindspore/ops/auto_generate/pyboost_inner_prim.py +282 -0
  144. mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -1
  145. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +9 -0
  146. mindspore/ops/extend/__init__.py +9 -1
  147. mindspore/ops/extend/array_func.py +134 -27
  148. mindspore/ops/extend/math_func.py +3 -3
  149. mindspore/ops/extend/nn_func.py +363 -2
  150. mindspore/ops/function/__init__.py +19 -2
  151. mindspore/ops/function/array_func.py +463 -439
  152. mindspore/ops/function/clip_func.py +7 -18
  153. mindspore/ops/function/grad/grad_func.py +5 -5
  154. mindspore/ops/function/linalg_func.py +4 -4
  155. mindspore/ops/function/math_func.py +260 -243
  156. mindspore/ops/function/nn_func.py +825 -62
  157. mindspore/ops/function/random_func.py +73 -4
  158. mindspore/ops/function/sparse_unary_func.py +1 -1
  159. mindspore/ops/function/vmap_func.py +1 -1
  160. mindspore/ops/functional.py +2 -2
  161. mindspore/ops/op_info_register.py +1 -31
  162. mindspore/ops/operations/__init__.py +2 -3
  163. mindspore/ops/operations/_grad_ops.py +2 -107
  164. mindspore/ops/operations/_inner_ops.py +5 -5
  165. mindspore/ops/operations/_sequence_ops.py +2 -2
  166. mindspore/ops/operations/array_ops.py +11 -233
  167. mindspore/ops/operations/comm_ops.py +32 -32
  168. mindspore/ops/operations/custom_ops.py +7 -89
  169. mindspore/ops/operations/manually_defined/ops_def.py +329 -4
  170. mindspore/ops/operations/math_ops.py +13 -163
  171. mindspore/ops/operations/nn_ops.py +9 -316
  172. mindspore/ops/operations/random_ops.py +1 -1
  173. mindspore/ops/operations/sparse_ops.py +3 -3
  174. mindspore/ops/primitive.py +2 -2
  175. mindspore/ops_generate/arg_dtype_cast.py +12 -3
  176. mindspore/ops_generate/arg_handler.py +24 -0
  177. mindspore/ops_generate/gen_ops_inner_prim.py +2 -0
  178. mindspore/ops_generate/gen_pyboost_func.py +13 -6
  179. mindspore/ops_generate/pyboost_utils.py +2 -17
  180. mindspore/parallel/__init__.py +3 -2
  181. mindspore/parallel/_auto_parallel_context.py +106 -1
  182. mindspore/parallel/_parallel_serialization.py +34 -2
  183. mindspore/parallel/_utils.py +16 -0
  184. mindspore/parallel/algo_parameter_config.py +4 -4
  185. mindspore/parallel/checkpoint_transform.py +249 -77
  186. mindspore/parallel/cluster/process_entity/_api.py +1 -1
  187. mindspore/parallel/parameter_broadcast.py +1 -1
  188. mindspore/parallel/shard.py +1 -1
  189. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +1 -0
  190. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +17 -5
  191. mindspore/profiler/parser/ascend_msprof_exporter.py +3 -3
  192. mindspore/profiler/parser/ascend_msprof_generator.py +10 -3
  193. mindspore/profiler/parser/ascend_op_generator.py +26 -9
  194. mindspore/profiler/parser/ascend_timeline_generator.py +7 -4
  195. mindspore/profiler/parser/profiler_info.py +11 -1
  196. mindspore/profiler/profiling.py +13 -5
  197. mindspore/rewrite/api/node.py +12 -12
  198. mindspore/rewrite/api/symbol_tree.py +11 -11
  199. mindspore/run_check/_check_version.py +1 -1
  200. mindspore/safeguard/rewrite_obfuscation.py +2 -2
  201. mindspore/train/amp.py +4 -4
  202. mindspore/train/anf_ir_pb2.py +8 -2
  203. mindspore/train/callback/_backup_and_restore.py +2 -2
  204. mindspore/train/callback/_callback.py +4 -4
  205. mindspore/train/callback/_checkpoint.py +2 -2
  206. mindspore/train/callback/_early_stop.py +2 -2
  207. mindspore/train/callback/_landscape.py +4 -4
  208. mindspore/train/callback/_loss_monitor.py +2 -2
  209. mindspore/train/callback/_on_request_exit.py +2 -2
  210. mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
  211. mindspore/train/callback/_summary_collector.py +2 -2
  212. mindspore/train/callback/_time_monitor.py +2 -2
  213. mindspore/train/dataset_helper.py +8 -3
  214. mindspore/train/loss_scale_manager.py +2 -2
  215. mindspore/train/metrics/metric.py +3 -3
  216. mindspore/train/mind_ir_pb2.py +22 -17
  217. mindspore/train/model.py +15 -15
  218. mindspore/train/serialization.py +18 -18
  219. mindspore/train/summary/summary_record.py +7 -7
  220. mindspore/train/train_thor/convert_utils.py +3 -3
  221. mindspore/version.py +1 -1
  222. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +1 -1
  223. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +226 -212
  224. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
  225. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
  226. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
@@ -17,10 +17,36 @@ from mindspore.common._stub_tensor import _convert_stub
17
17
  from mindspore.ops.auto_generate.gen_arg_handler import *
18
18
  from mindspore._c_expression import ArgMaxWithValuePrim_
19
19
  from mindspore._c_expression import ArgMinWithValuePrim_
20
+ from mindspore._c_expression import BatchMatMulPrim_
21
+ from mindspore._c_expression import BatchNormGradExtPrim_
20
22
  from mindspore._c_expression import BroadcastToPrim_
21
23
  from mindspore._c_expression import ConcatPrim_
24
+ from mindspore._c_expression import ConvolutionGradPrim_
25
+ from mindspore._c_expression import ConvolutionPrim_
26
+ from mindspore._c_expression import FFNExtPrim_
27
+ from mindspore._c_expression import FlashAttentionScoreGradPrim_
28
+ from mindspore._c_expression import FlashAttentionScorePrim_
29
+ from mindspore._c_expression import GridSampler2DGradPrim_
30
+ from mindspore._c_expression import GridSampler2DPrim_
31
+ from mindspore._c_expression import GridSampler3DGradPrim_
32
+ from mindspore._c_expression import GridSampler3DPrim_
33
+ from mindspore._c_expression import MatMulPrim_
34
+ from mindspore._c_expression import MaxPoolGradWithIndicesPrim_
35
+ from mindspore._c_expression import MaxPoolGradWithMaskPrim_
36
+ from mindspore._c_expression import MaxPoolWithIndicesPrim_
37
+ from mindspore._c_expression import MaxPoolWithMaskPrim_
38
+ from mindspore._c_expression import OneHotExtPrim_
39
+ from mindspore._c_expression import QuantBatchMatmulPrim_
40
+ from mindspore._c_expression import ReduceAllPrim_
22
41
  from mindspore._c_expression import ReduceAnyPrim_
42
+ from mindspore._c_expression import ReverseV2Prim_
23
43
  from mindspore._c_expression import SoftmaxPrim_
44
+ from mindspore._c_expression import StackExtPrim_
45
+ from mindspore._c_expression import TrilPrim_
46
+ from mindspore._c_expression import TriuPrim_
47
+ from mindspore._c_expression import UpsampleTrilinear3DGradPrim_
48
+ from mindspore._c_expression import UpsampleTrilinear3DPrim_
49
+ from mindspore._c_expression import WeightQuantBatchMatmulPrim_
24
50
 
25
51
 
26
52
  class _PyboostArgMaxWithValuePrim(ArgMaxWithValuePrim_):
@@ -41,6 +67,24 @@ class _PyboostArgMinWithValuePrim(ArgMinWithValuePrim_):
41
67
  argmin_with_value_impl = _PyboostArgMinWithValuePrim()
42
68
 
43
69
 
70
+ class _PyboostBatchMatMulPrim(BatchMatMulPrim_):
71
+ def __call__(self, x, y, transpose_a, transpose_b):
72
+
73
+ return _convert_stub(super().__call__(x, y, transpose_a, transpose_b))
74
+
75
+
76
+ batch_mat_mul_impl = _PyboostBatchMatMulPrim()
77
+
78
+
79
+ class _PyboostBatchNormGradExtPrim(BatchNormGradExtPrim_):
80
+ def __call__(self, dout, input, weight, running_mean, running_var, saved_mean, saved_rstd, training, eps):
81
+
82
+ return _convert_stub(super().__call__(dout, input, weight, running_mean, running_var, saved_mean, saved_rstd, training, eps))
83
+
84
+
85
+ batch_norm_grad_ext_impl = _PyboostBatchNormGradExtPrim()
86
+
87
+
44
88
  class _PyboostBroadcastToPrim(BroadcastToPrim_):
45
89
  def __call__(self, input, shape):
46
90
 
@@ -59,6 +103,181 @@ class _PyboostConcatPrim(ConcatPrim_):
59
103
  concat_impl = _PyboostConcatPrim()
60
104
 
61
105
 
106
+ class _PyboostConvolutionGradPrim(ConvolutionGradPrim_):
107
+ def __call__(self, dout, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, output_mask):
108
+ converted_stride = to_strides(stride)
109
+ converted_padding = to_2d_paddings(padding)
110
+ converted_dilation = to_dilations(dilation)
111
+ converted_output_padding = to_output_padding(output_padding)
112
+ return _convert_stub(super().__call__(dout, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, output_mask))
113
+
114
+
115
+ convolution_grad_impl = _PyboostConvolutionGradPrim()
116
+
117
+
118
+ class _PyboostConvolutionPrim(ConvolutionPrim_):
119
+ def __call__(self, input, weight, bias, stride, padding, dilation, transposed, output_padding, groups):
120
+ converted_stride = to_strides(stride)
121
+ converted_padding = to_2d_paddings(padding)
122
+ converted_dilation = to_dilations(dilation)
123
+ converted_output_padding = to_output_padding(output_padding)
124
+ return _convert_stub(super().__call__(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups))
125
+
126
+
127
+ convolution_impl = _PyboostConvolutionPrim()
128
+
129
+
130
+ class _PyboostFFNExtPrim(FFNExtPrim_):
131
+ def __call__(self, x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2, activation, inner_precise):
132
+ converted_activation = str_to_enum(activation)
133
+ return _convert_stub(super().__call__(x, weight1, weight2, expertTokens, bias1, bias2, scale, offset, deqScale1, deqScale2, antiquant_scale1, antiquant_scale2, antiquant_offset1, antiquant_offset2, activation, inner_precise))
134
+
135
+
136
+ ffn_ext_impl = _PyboostFFNExtPrim()
137
+
138
+
139
+ class _PyboostFlashAttentionScoreGradPrim(FlashAttentionScoreGradPrim_):
140
+ def __call__(self, query, key, value, dy, pse_shift, drop_mask, padding_mask, atten_mask, softmax_max, softmax_sum, softmax_in, attention_in, prefix, actual_seq_qlen, actual_seq_kvlen, head_num, keep_prob, scale_value, pre_tokens, next_tokens, inner_precise, input_layout, sparse_mode):
141
+ converted_input_layout = str_to_enum(input_layout)
142
+ return _convert_stub(super().__call__(query, key, value, dy, pse_shift, drop_mask, padding_mask, atten_mask, softmax_max, softmax_sum, softmax_in, attention_in, prefix, actual_seq_qlen, actual_seq_kvlen, head_num, keep_prob, scale_value, pre_tokens, next_tokens, inner_precise, input_layout, sparse_mode))
143
+
144
+
145
+ flash_attention_score_grad_impl = _PyboostFlashAttentionScoreGradPrim()
146
+
147
+
148
+ class _PyboostFlashAttentionScorePrim(FlashAttentionScorePrim_):
149
+ def __call__(self, query, key, value, real_shift, drop_mask, padding_mask, attn_mask, prefix, actual_seq_qlen, actual_seq_kvlen, head_num, keep_prob, scale_value, pre_tokens, next_tokens, inner_precise, input_layout, sparse_mode):
150
+ converted_input_layout = str_to_enum(input_layout)
151
+ return _convert_stub(super().__call__(query, key, value, real_shift, drop_mask, padding_mask, attn_mask, prefix, actual_seq_qlen, actual_seq_kvlen, head_num, keep_prob, scale_value, pre_tokens, next_tokens, inner_precise, input_layout, sparse_mode))
152
+
153
+
154
+ flash_attention_score_impl = _PyboostFlashAttentionScorePrim()
155
+
156
+
157
+ class _PyboostGridSampler2DGradPrim(GridSampler2DGradPrim_):
158
+ def __call__(self, grad, input_x, grid, interpolation_mode, padding_mode, align_corners):
159
+ converted_interpolation_mode = str_to_enum(interpolation_mode)
160
+ converted_padding_mode = str_to_enum(padding_mode)
161
+ return _convert_stub(super().__call__(grad, input_x, grid, interpolation_mode, padding_mode, align_corners))
162
+
163
+
164
+ grid_sampler_2d_grad_impl = _PyboostGridSampler2DGradPrim()
165
+
166
+
167
+ class _PyboostGridSampler2DPrim(GridSampler2DPrim_):
168
+ def __call__(self, input_x, grid, interpolation_mode, padding_mode, align_corners):
169
+ converted_interpolation_mode = str_to_enum(interpolation_mode)
170
+ converted_padding_mode = str_to_enum(padding_mode)
171
+ return _convert_stub(super().__call__(input_x, grid, interpolation_mode, padding_mode, align_corners))
172
+
173
+
174
+ grid_sampler_2d_impl = _PyboostGridSampler2DPrim()
175
+
176
+
177
+ class _PyboostGridSampler3DGradPrim(GridSampler3DGradPrim_):
178
+ def __call__(self, grad, input_x, grid, interpolation_mode, padding_mode, align_corners):
179
+ converted_interpolation_mode = str_to_enum(interpolation_mode)
180
+ converted_padding_mode = str_to_enum(padding_mode)
181
+ return _convert_stub(super().__call__(grad, input_x, grid, interpolation_mode, padding_mode, align_corners))
182
+
183
+
184
+ grid_sampler_3d_grad_impl = _PyboostGridSampler3DGradPrim()
185
+
186
+
187
+ class _PyboostGridSampler3DPrim(GridSampler3DPrim_):
188
+ def __call__(self, input_x, grid, interpolation_mode, padding_mode, align_corners):
189
+ converted_interpolation_mode = str_to_enum(interpolation_mode)
190
+ converted_padding_mode = str_to_enum(padding_mode)
191
+ return _convert_stub(super().__call__(input_x, grid, interpolation_mode, padding_mode, align_corners))
192
+
193
+
194
+ grid_sampler_3d_impl = _PyboostGridSampler3DPrim()
195
+
196
+
197
+ class _PyboostMatMulPrim(MatMulPrim_):
198
+ def __call__(self, input, mat2, transpose_a, transpose_b):
199
+
200
+ return _convert_stub(super().__call__(input, mat2, transpose_a, transpose_b))
201
+
202
+
203
+ matmul_impl = _PyboostMatMulPrim()
204
+
205
+
206
+ class _PyboostMaxPoolGradWithIndicesPrim(MaxPoolGradWithIndicesPrim_):
207
+ def __call__(self, x, grad, argmax, kernel_size, strides, pads, dilation, ceil_mode, argmax_type):
208
+ converted_kernel_size = to_kernel_size(kernel_size)
209
+ converted_strides = to_strides(strides)
210
+ converted_pads = to_output_padding(pads)
211
+ converted_dilation = to_dilations(dilation)
212
+ return _convert_stub(super().__call__(x, grad, argmax, kernel_size, strides, pads, dilation, ceil_mode, argmax_type))
213
+
214
+
215
+ max_pool_grad_with_indices_impl = _PyboostMaxPoolGradWithIndicesPrim()
216
+
217
+
218
+ class _PyboostMaxPoolGradWithMaskPrim(MaxPoolGradWithMaskPrim_):
219
+ def __call__(self, x, grad, mask, kernel_size, strides, pads, dilation, ceil_mode, argmax_type):
220
+ converted_kernel_size = to_kernel_size(kernel_size)
221
+ converted_strides = to_strides(strides)
222
+ converted_pads = to_output_padding(pads)
223
+ converted_dilation = to_dilations(dilation)
224
+ return _convert_stub(super().__call__(x, grad, mask, kernel_size, strides, pads, dilation, ceil_mode, argmax_type))
225
+
226
+
227
+ max_pool_grad_with_mask_impl = _PyboostMaxPoolGradWithMaskPrim()
228
+
229
+
230
+ class _PyboostMaxPoolWithIndicesPrim(MaxPoolWithIndicesPrim_):
231
+ def __call__(self, x, kernel_size, strides, pads, dilation, ceil_mode, argmax_type):
232
+ converted_kernel_size = to_kernel_size(kernel_size)
233
+ converted_strides = to_strides(strides)
234
+ converted_pads = to_output_padding(pads)
235
+ converted_dilation = to_dilations(dilation)
236
+ return _convert_stub(super().__call__(x, kernel_size, strides, pads, dilation, ceil_mode, argmax_type))
237
+
238
+
239
+ max_pool_with_indices_impl = _PyboostMaxPoolWithIndicesPrim()
240
+
241
+
242
+ class _PyboostMaxPoolWithMaskPrim(MaxPoolWithMaskPrim_):
243
+ def __call__(self, x, kernel_size, strides, pads, dilation, ceil_mode, argmax_type):
244
+ converted_kernel_size = to_kernel_size(kernel_size)
245
+ converted_strides = to_strides(strides)
246
+ converted_pads = to_output_padding(pads)
247
+ converted_dilation = to_dilations(dilation)
248
+ return _convert_stub(super().__call__(x, kernel_size, strides, pads, dilation, ceil_mode, argmax_type))
249
+
250
+
251
+ max_pool_with_mask_impl = _PyboostMaxPoolWithMaskPrim()
252
+
253
+
254
+ class _PyboostOneHotExtPrim(OneHotExtPrim_):
255
+ def __call__(self, tensor, num_classes, on_value, off_value, axis):
256
+
257
+ return _convert_stub(super().__call__(tensor, num_classes, on_value, off_value, axis))
258
+
259
+
260
+ one_hot_ext_impl = _PyboostOneHotExtPrim()
261
+
262
+
263
+ class _PyboostQuantBatchMatmulPrim(QuantBatchMatmulPrim_):
264
+ def __call__(self, x1, x2, scale, offset, bias, transpose_x1, transpose_x2, dtype):
265
+
266
+ return _convert_stub(super().__call__(x1, x2, scale, offset, bias, transpose_x1, transpose_x2, dtype))
267
+
268
+
269
+ quant_batch_matmul_impl = _PyboostQuantBatchMatmulPrim()
270
+
271
+
272
+ class _PyboostReduceAllPrim(ReduceAllPrim_):
273
+ def __call__(self, input, axis, keep_dims):
274
+
275
+ return _convert_stub(super().__call__(input, axis, keep_dims))
276
+
277
+
278
+ reduce_all_impl = _PyboostReduceAllPrim()
279
+
280
+
62
281
  class _PyboostReduceAnyPrim(ReduceAnyPrim_):
63
282
  def __call__(self, x, axis, keep_dims):
64
283
 
@@ -68,6 +287,15 @@ class _PyboostReduceAnyPrim(ReduceAnyPrim_):
68
287
  reduce_any_impl = _PyboostReduceAnyPrim()
69
288
 
70
289
 
290
+ class _PyboostReverseV2Prim(ReverseV2Prim_):
291
+ def __call__(self, input, axis):
292
+
293
+ return _convert_stub(super().__call__(input, axis))
294
+
295
+
296
+ reverse_v2_impl = _PyboostReverseV2Prim()
297
+
298
+
71
299
  class _PyboostSoftmaxPrim(SoftmaxPrim_):
72
300
  def __call__(self, input, axis):
73
301
 
@@ -75,3 +303,57 @@ class _PyboostSoftmaxPrim(SoftmaxPrim_):
75
303
 
76
304
 
77
305
  softmax_impl = _PyboostSoftmaxPrim()
306
+
307
+
308
+ class _PyboostStackExtPrim(StackExtPrim_):
309
+ def __call__(self, tensors, dim):
310
+
311
+ return _convert_stub(super().__call__(tensors, dim))
312
+
313
+
314
+ stack_ext_impl = _PyboostStackExtPrim()
315
+
316
+
317
+ class _PyboostTrilPrim(TrilPrim_):
318
+ def __call__(self, input, diagonal):
319
+
320
+ return _convert_stub(super().__call__(input, diagonal))
321
+
322
+
323
+ tril_impl = _PyboostTrilPrim()
324
+
325
+
326
+ class _PyboostTriuPrim(TriuPrim_):
327
+ def __call__(self, input, diagonal):
328
+
329
+ return _convert_stub(super().__call__(input, diagonal))
330
+
331
+
332
+ triu_impl = _PyboostTriuPrim()
333
+
334
+
335
+ class _PyboostUpsampleTrilinear3DGradPrim(UpsampleTrilinear3DGradPrim_):
336
+ def __call__(self, dy, input_size, output_size, scales, align_corners):
337
+
338
+ return _convert_stub(super().__call__(dy, input_size, output_size, scales, align_corners))
339
+
340
+
341
+ upsample_trilinear3d_grad_impl = _PyboostUpsampleTrilinear3DGradPrim()
342
+
343
+
344
+ class _PyboostUpsampleTrilinear3DPrim(UpsampleTrilinear3DPrim_):
345
+ def __call__(self, x, output_size, scales, align_corners):
346
+
347
+ return _convert_stub(super().__call__(x, output_size, scales, align_corners))
348
+
349
+
350
+ upsample_trilinear3d_impl = _PyboostUpsampleTrilinear3DPrim()
351
+
352
+
353
+ class _PyboostWeightQuantBatchMatmulPrim(WeightQuantBatchMatmulPrim_):
354
+ def __call__(self, x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias, transpose_x, transpose_weight, antiquant_group_size):
355
+
356
+ return _convert_stub(super().__call__(x, weight, antiquant_scale, antiquant_offset, quant_scale, quant_offset, bias, transpose_x, transpose_weight, antiquant_group_size))
357
+
358
+
359
+ weight_quant_batch_matmul_impl = _PyboostWeightQuantBatchMatmulPrim()
@@ -137,7 +137,8 @@ def data_update_by_ops(transfer_type, arg, data, new_index, origin_data, value=N
137
137
  elif transfer_type == ValueTransferType.kGatherND:
138
138
  if isinstance(new_index, list):
139
139
  new_index = handle_multi_dim_index_tensor(new_index, arg)
140
- data = F.gather_nd(data, Tensor(new_index))
140
+ new_index = format_index_tensor(new_index, (None, F.shape(data)[:F.shape(new_index)[-1]]))
141
+ data = F.gather_nd(data, new_index)
141
142
  elif transfer_type == ValueTransferType.kTensorScatterUpdate:
142
143
  if isinstance(new_index, list):
143
144
  new_index = handle_multi_dim_index_tensor(new_index, arg)
@@ -459,13 +459,22 @@ def tuple_index_type_cnt(types, op_name):
459
459
  def check_value_elements(types):
460
460
  """Judges the type of all elements of the tuple."""
461
461
  tensor_number = 0
462
+ last_type = None
463
+ mix_but_no_tensor = False
462
464
  for ele in types:
463
465
  if isinstance(ele, mstype.TensorType):
464
466
  tensor_number += 1
465
467
  elif isinstance(ele, (list, tuple)):
466
468
  return MIXED
467
469
 
470
+ if last_type is None:
471
+ last_type = type(ele)
472
+ elif not isinstance(ele, last_type):
473
+ mix_but_no_tensor = True
474
+
468
475
  if tensor_number == 0:
476
+ if mix_but_no_tensor:
477
+ return MIXED
469
478
  return NO_TENSOR
470
479
  if tensor_number == len(types):
471
480
  return ALL_TENSOR
@@ -33,13 +33,21 @@ from . import (
33
33
  nn_func,
34
34
  )
35
35
 
36
- from .array_func import gather, max, min
36
+ from .array_func import gather, max, min, one_hot, narrow
37
37
  from .math_func import (
38
38
  baddbmm,
39
+ bmm,
39
40
  add,
40
41
  sub
41
42
  )
42
43
 
44
+ from .nn_func import (
45
+ conv2d,
46
+ max_pool2d,
47
+ leaky_relu_ext,
48
+ batch_norm
49
+ )
50
+
43
51
  __all__ = []
44
52
  __all__.extend(array_func.__all__)
45
53
  __all__.extend(math_func.__all__)
@@ -18,12 +18,54 @@
18
18
  Array Operators
19
19
 
20
20
  """
21
-
21
+ from mindspore.common import Tensor
22
22
  from mindspore.ops.operations.array_ops import ArgMaxWithValue, ArgMinWithValue
23
23
  from mindspore.ops._primitive_cache import _get_cache_prim
24
- from mindspore.ops.auto_generate.gen_ops_prim import gather_d_op
24
+ from mindspore.ops.auto_generate.gen_ops_prim import gather_d_op, slice_ext_op, OneHotExt
25
+ from mindspore.ops.auto_generate.gen_ops_def import max_, min_
26
+ from mindspore import _checkparam as validator
27
+
25
28
 
26
29
  # define Primitive global variables
30
+ def narrow(input, dim, start, length):
31
+ """
32
+ Returns a narrowed tensor from input tensor, and
33
+ the dimension axis is input from start to start + length.
34
+
35
+ Args:
36
+ input (Tensor): the tensor to narrow.
37
+ dim (int): dimension along which to narrow.
38
+ start (int): the starting dimension.
39
+ length (int): the distance to the ending dimension.
40
+
41
+ Returns:
42
+ Tensor.
43
+
44
+ - output (Tensors) - The narrowed tensor.
45
+
46
+ Raises:
47
+ TypeError: If the input is not a tensor or tuple or list of tensors.
48
+
49
+ Supported Platforms:
50
+ ``Ascend`` ``GPU`` ``CPU``
51
+
52
+ Examples:
53
+ >>> import mindspore
54
+ >>> from mindspore import ops
55
+ >>> from mindspore import Tensor
56
+ >>> x = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], mindspore.int32)
57
+ >>> output = ops.narrow(x, 0, 0, 2)
58
+ >>> print(output)
59
+ [[ 1 2 3]
60
+ [ 4 5 6]]
61
+ >>> output = ops.narrow(x, 1, 1, 2)
62
+ >>> print(output)
63
+ [[ 2 3]
64
+ [ 5 6]
65
+ [ 8 9]]
66
+ """
67
+ validator.check_value_type("input", input, Tensor, "narrow")
68
+ return slice_ext_op(input, dim, start, start+length, 1)
27
69
 
28
70
 
29
71
  def gather(input, dim, index):
@@ -75,25 +117,32 @@ def gather(input, dim, index):
75
117
  return gather_d_op(input, dim, index)
76
118
 
77
119
 
78
- def max(input, dim, keepdim=False):
120
+ def max(input, dim=None, keepdim=False):
79
121
  """
80
- Calculates the maximum value along with the given axis for the input tensor.
122
+ Calculates the maximum value along with the given dimension for the input tensor.
81
123
 
82
124
  Args:
83
125
  input (Tensor): The input tensor, can be any dimension. Complex tensor is not supported for now.
84
- dim (int): The dimension to reduce.
85
- keepdim (bool): Whether to reduce dimension, if true, the output will keep same dimension with the input,
86
- the output will reduce dimension if false. Default: ``False`` .
126
+ dim (int, optional): The dimension to reduce. Default: ``None`` .
127
+ keepdim (bool, optional): Whether to reduce dimension, if true, the output will keep same dimension
128
+ with the input, the output will reduce dimension if false. Default: ``False`` .
87
129
 
88
130
  Returns:
89
- tuple (Tensor), tuple of 2 tensors, containing the maximum value of the input tensor and the corresponding
90
- index.
131
+ Tensor if `dim` is the default value ``None`` , the maximum value of input tensor, with the shape :math:`()` ,
132
+ and same dtype as `input`.
133
+
134
+ tuple (Tensor) if `dim` is not the default value ``None`` , tuple of 2 tensors, containing the maximum
135
+ value of the input tensor along the given dimension `dim` and the corresponding index:
91
136
 
92
- - values (Tensor) - The maximum value of input tensor, with same dtype as `input`. If `keepdim`
93
- is true, the shape of output tensors is :math:`(input_1, input_2, ..., input_{axis-1}, 1, input_{axis+1},
94
- ..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ..., input_{axis-1}, input_{axis+1},
95
- ..., input_N)` .
96
- - index (Tensor) - The index for the maximum value of the input tensor, with the same shape as `values`.
137
+ - **values (Tensor)** - The maximum value of input tensor along the given dimension `dim`, with same dtype as
138
+ `input`. If `keepdim` is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ...,
139
+ input_{axis-1}, 1, input_{axis+1}, ..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ...,
140
+ input_{axis-1}, input_{axis+1}, ..., input_N)` .
141
+ - **index (Tensor)** - The index for the maximum value of the input tensor along the given dimension `dim`, with
142
+ the same shape as `values`.
143
+
144
+ Raises:
145
+ ValueError: If `dim` is the default value ``None`` and `keepdim` is not ``False`` .
97
146
 
98
147
  Supported Platforms:
99
148
  ``Ascend`` ``GPU`` ``CPU``
@@ -108,30 +157,41 @@ def max(input, dim, keepdim=False):
108
157
  >>> print(output, index)
109
158
  [[3.2 0.4 0.4 2.9 4. ]] [[1 1 0 1 1]]
110
159
  """
160
+ if dim is None:
161
+ if keepdim is not False:
162
+ raise ValueError(f"For 'max', the `keepdim` must be False when the `dim` is None, but got {keepdim}")
163
+ return max_(input)
111
164
  argmax_with_value_op = _get_cache_prim(ArgMaxWithValue)(dim, keepdim)
112
165
  indices, values = argmax_with_value_op(input)
113
166
  return values, indices
114
167
 
115
168
 
116
- def min(input, dim, keepdim=False):
169
+ def min(input, dim=None, keepdim=False):
117
170
  """
118
- Calculates the minimum value along with the given axis for the input tensor.
171
+ Calculates the minimum value along with the given dimension for the input tensor.
119
172
 
120
173
  Args:
121
174
  input (Tensor): The input tensor, can be any dimension. Complex tensor is not supported for now.
122
- dim (int): The dimension to reduce.
123
- keepdim (bool): Whether to reduce dimension, if true, the output will keep same dimension with the input,
124
- the output will reduce dimension if false. Default: ``False`` .
175
+ dim (int, optional): The dimension to reduce. Default: ``None`` .
176
+ keepdim (bool, optional): Whether to reduce dimension, if true, the output will keep same dimension
177
+ with the input, the output will reduce dimension if false. Default: ``False`` .
125
178
 
126
179
  Returns:
127
- tuple (Tensor), tuple of 2 tensors, containing the minimum value of the input tensor and the corresponding
128
- index.
180
+ Tensor if `dim` is the default value ``None`` , the minimum value of input tensor, with the shape :math:`()` ,
181
+ and same dtype as `input`.
182
+
183
+ tuple (Tensor) if `dim` is not the default value ``None`` , tuple of 2 tensors, containing the minimum value
184
+ of the input tensor along the given dimension `dim` and the corresponding index:
129
185
 
130
- - values (Tensor) - The minimum value of input tensor, with same dtype as `input`. If `keepdim`
131
- is true, the shape of output tensors is :math:`(input_1, input_2, ..., input_{axis-1}, 1, input_{axis+1},
132
- ..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ..., input_{axis-1}, input_{axis+1},
133
- ..., input_N)` .
134
- - index (Tensor) - The index for the minimum value of the input tensor, with same shape as `values`.
186
+ - **values (Tensor)** - The minimum value of input tensor along the given dimension `dim`, with same dtype as
187
+ `input`. If `keepdim` is ``True`` , the shape of output tensors is :math:`(input_1, input_2, ...,
188
+ input_{axis-1}, 1, input_{axis+1}, ..., input_N)` . Otherwise, the shape is :math:`(input_1, input_2, ...,
189
+ input_{axis-1}, input_{axis+1}, ..., input_N)` .
190
+ - **index (Tensor)** - The index for the minimum value of the input tensor along the given dimension `dim`,
191
+ with the same shape as `values`.
192
+
193
+ Raises:
194
+ ValueError: If `dim` is the default value ``None`` and `keepdim` is not ``False`` .
135
195
 
136
196
  Supported Platforms:
137
197
  ``Ascend`` ``GPU`` ``CPU``
@@ -145,8 +205,55 @@ def min(input, dim, keepdim=False):
145
205
  >>> print(output, index)
146
206
  [0.0] [0]
147
207
  """
208
+ if dim is None:
209
+ if keepdim is not False:
210
+ raise ValueError(f"For 'min', the `keepdim` must be False when the `dim` is None, but got {keepdim}")
211
+ return min_(input)
148
212
  argmin_with_value_op = _get_cache_prim(ArgMinWithValue)(dim, keepdim)
149
213
  indices, values = argmin_with_value_op(input)
150
214
  return values, indices
151
215
 
152
- __all__ = ['gather', 'max', 'min']
216
+
217
+ def one_hot(tensor, num_classes):
218
+ r"""
219
+ Computes a one-hot tensor.
220
+
221
+ The locations represented by tensor in `tensor` take value `1`, while all
222
+ other locations take value `0`.
223
+
224
+ Args:
225
+ tensor (Tensor): A tensor of indices. Tensor of shape :math:`(X_0, \ldots, X_n)`.
226
+ Data type must be int32 or int64.
227
+ num_classes (Union[int, Tensor]): A scalar defining the depth of the one-hot dimension.
228
+
229
+ Returns:
230
+ Tensor, one-hot tensor.
231
+
232
+ Raises:
233
+ TypeError: If `num_classes` is not an int.
234
+ TypeError: If dtype of `tensor` is not int32 or int64.
235
+ ValueError: If `num_classes` is less than 0.
236
+
237
+ Supported Platforms:
238
+ ``Ascend`` ``GPU`` ``CPU``
239
+
240
+ Examples:
241
+ >>> import mindspore
242
+ >>> import numpy as np
243
+ >>> import mindspore.ops as ops
244
+ >>> from mindspore import Tensor
245
+ >>> tensor = Tensor(np.array([0, 1, 2]), mindspore.int32)
246
+ >>> num_classes = 3
247
+ >>> output = ops.extend.one_hot(tensor, num_classes)
248
+ >>> print(output)
249
+ [[1. 0. 0.]
250
+ [0. 1. 0.]
251
+ [0. 0. 1.]]
252
+ """
253
+ on_value = Tensor(1, dtype=tensor.dtype)
254
+ off_value = Tensor(0, dtype=tensor.dtype)
255
+ onehot = _get_cache_prim(OneHotExt)(-1)
256
+ return onehot(tensor, num_classes, on_value, off_value)
257
+
258
+
259
+ __all__ = ['gather', 'max', 'min', 'one_hot']
@@ -1,4 +1,4 @@
1
- # Copyright 2020 Huawei Technologies Co., Ltd
1
+ # Copyright 2023 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -20,7 +20,7 @@ Math Operators with better performance
20
20
  """
21
21
 
22
22
  from mindspore.ops import auto_generate as P
23
- from mindspore.ops.auto_generate.gen_ops_def import add_ext as add, sub_ext as sub
23
+ from mindspore.ops.auto_generate.gen_ops_def import add_ext as add, sub_ext as sub, bmm_ext as bmm
24
24
 
25
25
 
26
26
  # define Primitive global variables
@@ -73,4 +73,4 @@ def baddbmm(input, batch1, batch2, beta=1, alpha=1):
73
73
  return P.baddbmm(input, batch1, batch2, beta, alpha)
74
74
 
75
75
 
76
- __all__ = ['baddbmm', 'add', 'sub']
76
+ __all__ = ['baddbmm', 'add', 'sub', 'bmm']