mindspore 2.3.0rc1__cp37-cp37m-manylinux1_x86_64.whl → 2.3.0rc2__cp37-cp37m-manylinux1_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (226) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +1 -1
  3. mindspore/_akg/akg/utils/tbe_codegen_utils.py +13 -3
  4. mindspore/_c_dataengine.cpython-37m-x86_64-linux-gnu.so +0 -0
  5. mindspore/_c_expression.cpython-37m-x86_64-linux-gnu.so +0 -0
  6. mindspore/_checkparam.py +20 -0
  7. mindspore/_extends/parse/parser.py +1 -1
  8. mindspore/_extends/parse/standard_method.py +6 -5
  9. mindspore/_mindspore_offline_debug.cpython-37m-x86_64-linux-gnu.so +0 -0
  10. mindspore/amp.py +5 -5
  11. mindspore/bin/cache_admin +0 -0
  12. mindspore/bin/cache_server +0 -0
  13. mindspore/boost/boost_cell_wrapper.py +1 -1
  14. mindspore/boost/group_loss_scale_manager.py +1 -1
  15. mindspore/common/__init__.py +4 -2
  16. mindspore/common/_register_for_recompute.py +48 -0
  17. mindspore/common/_stub_tensor.py +1 -0
  18. mindspore/common/api.py +56 -4
  19. mindspore/common/dtype.py +5 -3
  20. mindspore/common/dump.py +2 -2
  21. mindspore/common/hook_handle.py +51 -4
  22. mindspore/common/initializer.py +1 -1
  23. mindspore/common/jit_config.py +17 -6
  24. mindspore/common/parameter.py +7 -2
  25. mindspore/common/recompute.py +247 -0
  26. mindspore/common/sparse_tensor.py +2 -2
  27. mindspore/common/symbol.py +1 -1
  28. mindspore/common/tensor.py +74 -36
  29. mindspore/communication/__init__.py +3 -3
  30. mindspore/communication/management.py +30 -30
  31. mindspore/context.py +28 -15
  32. mindspore/dataset/__init__.py +5 -5
  33. mindspore/dataset/audio/__init__.py +2 -2
  34. mindspore/dataset/audio/transforms.py +51 -51
  35. mindspore/dataset/callback/ds_callback.py +2 -2
  36. mindspore/dataset/engine/cache_client.py +1 -1
  37. mindspore/dataset/engine/datasets.py +3 -3
  38. mindspore/dataset/engine/datasets_audio.py +14 -14
  39. mindspore/dataset/engine/datasets_standard_format.py +3 -3
  40. mindspore/dataset/engine/datasets_text.py +38 -38
  41. mindspore/dataset/engine/datasets_user_defined.py +3 -3
  42. mindspore/dataset/engine/datasets_vision.py +68 -68
  43. mindspore/dataset/text/__init__.py +3 -3
  44. mindspore/dataset/text/transforms.py +26 -26
  45. mindspore/dataset/transforms/__init__.py +1 -1
  46. mindspore/dataset/vision/__init__.py +3 -3
  47. mindspore/dataset/vision/transforms.py +92 -92
  48. mindspore/dataset/vision/utils.py +1 -1
  49. mindspore/experimental/optim/adadelta.py +2 -2
  50. mindspore/experimental/optim/adagrad.py +2 -2
  51. mindspore/experimental/optim/adam.py +2 -2
  52. mindspore/experimental/optim/adamax.py +2 -2
  53. mindspore/experimental/optim/adamw.py +2 -2
  54. mindspore/experimental/optim/asgd.py +2 -2
  55. mindspore/experimental/optim/lr_scheduler.py +24 -20
  56. mindspore/experimental/optim/nadam.py +2 -2
  57. mindspore/experimental/optim/optimizer.py +1 -1
  58. mindspore/experimental/optim/radam.py +2 -2
  59. mindspore/experimental/optim/rmsprop.py +2 -2
  60. mindspore/experimental/optim/rprop.py +2 -2
  61. mindspore/experimental/optim/sgd.py +2 -2
  62. mindspore/hal/stream.py +2 -0
  63. mindspore/include/mindapi/base/types.h +5 -0
  64. mindspore/lib/libdnnl.so.2 +0 -0
  65. mindspore/lib/libmindspore.so +0 -0
  66. mindspore/lib/libmindspore_backend.so +0 -0
  67. mindspore/lib/libmindspore_common.so +0 -0
  68. mindspore/lib/libmindspore_core.so +0 -0
  69. mindspore/lib/libmindspore_glog.so.0 +0 -0
  70. mindspore/lib/libmindspore_gpr.so.15 +0 -0
  71. mindspore/lib/libmindspore_grpc++.so.1 +0 -0
  72. mindspore/lib/libmindspore_grpc.so.15 +0 -0
  73. mindspore/lib/libmindspore_shared_lib.so +0 -0
  74. mindspore/lib/libopencv_core.so.4.5 +0 -0
  75. mindspore/lib/libopencv_imgcodecs.so.4.5 +0 -0
  76. mindspore/lib/libopencv_imgproc.so.4.5 +0 -0
  77. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/aicpu_kernel/impl/libcust_cpu_kernels.so +0 -0
  78. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_impl/cpu/config/cust_aicpu_kernel.json +6 -6
  79. mindspore/lib/plugin/ascend/custom_aicpu_ops/op_proto/libcust_op_proto.so +0 -0
  80. mindspore/lib/plugin/ascend/libdvpp_utils.so +0 -0
  81. mindspore/lib/plugin/ascend/libmindspore_cpu_kernels.so +0 -0
  82. mindspore/lib/plugin/gpu/libcuda_ops.so.10 +0 -0
  83. mindspore/lib/plugin/gpu/libcuda_ops.so.11 +0 -0
  84. mindspore/lib/plugin/gpu10.1/libnccl.so.2 +0 -0
  85. mindspore/lib/plugin/gpu11.1/libnccl.so.2 +0 -0
  86. mindspore/lib/plugin/gpu11.6/libnccl.so.2 +0 -0
  87. mindspore/lib/plugin/libmindspore_ascend.so.2 +0 -0
  88. mindspore/lib/plugin/libmindspore_gpu.so.10.1 +0 -0
  89. mindspore/lib/plugin/libmindspore_gpu.so.11.1 +0 -0
  90. mindspore/lib/plugin/libmindspore_gpu.so.11.6 +0 -0
  91. mindspore/log.py +2 -2
  92. mindspore/mint/__init__.py +457 -0
  93. mindspore/mint/nn/__init__.py +430 -0
  94. mindspore/mint/nn/functional.py +424 -0
  95. mindspore/mint/optim/__init__.py +24 -0
  96. mindspore/mint/optim/adamw.py +186 -0
  97. mindspore/multiprocessing/__init__.py +4 -0
  98. mindspore/nn/__init__.py +3 -0
  99. mindspore/nn/cell.py +51 -47
  100. mindspore/nn/extend/__init__.py +29 -0
  101. mindspore/nn/extend/basic.py +140 -0
  102. mindspore/nn/extend/embedding.py +143 -0
  103. mindspore/nn/extend/layer/__init__.py +27 -0
  104. mindspore/nn/extend/layer/normalization.py +107 -0
  105. mindspore/nn/extend/pooling.py +117 -0
  106. mindspore/nn/generator.py +297 -0
  107. mindspore/nn/layer/basic.py +109 -1
  108. mindspore/nn/layer/container.py +2 -2
  109. mindspore/nn/layer/conv.py +6 -6
  110. mindspore/nn/layer/embedding.py +1 -1
  111. mindspore/nn/layer/normalization.py +21 -43
  112. mindspore/nn/layer/padding.py +4 -0
  113. mindspore/nn/optim/ada_grad.py +2 -2
  114. mindspore/nn/optim/adadelta.py +1 -1
  115. mindspore/nn/optim/adafactor.py +1 -1
  116. mindspore/nn/optim/adam.py +7 -7
  117. mindspore/nn/optim/adamax.py +2 -2
  118. mindspore/nn/optim/adasum.py +2 -2
  119. mindspore/nn/optim/asgd.py +2 -2
  120. mindspore/nn/optim/ftrl.py +1 -1
  121. mindspore/nn/optim/lamb.py +3 -3
  122. mindspore/nn/optim/lars.py +1 -1
  123. mindspore/nn/optim/lazyadam.py +2 -2
  124. mindspore/nn/optim/momentum.py +2 -2
  125. mindspore/nn/optim/optimizer.py +2 -2
  126. mindspore/nn/optim/proximal_ada_grad.py +2 -2
  127. mindspore/nn/optim/rmsprop.py +2 -2
  128. mindspore/nn/optim/rprop.py +2 -2
  129. mindspore/nn/optim/sgd.py +2 -2
  130. mindspore/nn/optim/thor.py +2 -2
  131. mindspore/nn/wrap/cell_wrapper.py +9 -9
  132. mindspore/nn/wrap/grad_reducer.py +5 -5
  133. mindspore/ops/_grad_experimental/grad_comm_ops.py +4 -2
  134. mindspore/ops/_vmap/vmap_grad_nn_ops.py +41 -2
  135. mindspore/ops/_vmap/vmap_math_ops.py +27 -8
  136. mindspore/ops/_vmap/vmap_nn_ops.py +66 -8
  137. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +73 -1
  138. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +12 -3
  139. mindspore/ops/auto_generate/gen_arg_handler.py +24 -0
  140. mindspore/ops/auto_generate/gen_extend_func.py +274 -0
  141. mindspore/ops/auto_generate/gen_ops_def.py +889 -22
  142. mindspore/ops/auto_generate/gen_ops_prim.py +3541 -253
  143. mindspore/ops/auto_generate/pyboost_inner_prim.py +282 -0
  144. mindspore/ops/composite/multitype_ops/_compile_utils.py +2 -1
  145. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +9 -0
  146. mindspore/ops/extend/__init__.py +9 -1
  147. mindspore/ops/extend/array_func.py +134 -27
  148. mindspore/ops/extend/math_func.py +3 -3
  149. mindspore/ops/extend/nn_func.py +363 -2
  150. mindspore/ops/function/__init__.py +19 -2
  151. mindspore/ops/function/array_func.py +463 -439
  152. mindspore/ops/function/clip_func.py +7 -18
  153. mindspore/ops/function/grad/grad_func.py +5 -5
  154. mindspore/ops/function/linalg_func.py +4 -4
  155. mindspore/ops/function/math_func.py +260 -243
  156. mindspore/ops/function/nn_func.py +825 -62
  157. mindspore/ops/function/random_func.py +73 -4
  158. mindspore/ops/function/sparse_unary_func.py +1 -1
  159. mindspore/ops/function/vmap_func.py +1 -1
  160. mindspore/ops/functional.py +2 -2
  161. mindspore/ops/op_info_register.py +1 -31
  162. mindspore/ops/operations/__init__.py +2 -3
  163. mindspore/ops/operations/_grad_ops.py +2 -107
  164. mindspore/ops/operations/_inner_ops.py +5 -5
  165. mindspore/ops/operations/_sequence_ops.py +2 -2
  166. mindspore/ops/operations/array_ops.py +11 -233
  167. mindspore/ops/operations/comm_ops.py +32 -32
  168. mindspore/ops/operations/custom_ops.py +7 -89
  169. mindspore/ops/operations/manually_defined/ops_def.py +329 -4
  170. mindspore/ops/operations/math_ops.py +13 -163
  171. mindspore/ops/operations/nn_ops.py +9 -316
  172. mindspore/ops/operations/random_ops.py +1 -1
  173. mindspore/ops/operations/sparse_ops.py +3 -3
  174. mindspore/ops/primitive.py +2 -2
  175. mindspore/ops_generate/arg_dtype_cast.py +12 -3
  176. mindspore/ops_generate/arg_handler.py +24 -0
  177. mindspore/ops_generate/gen_ops_inner_prim.py +2 -0
  178. mindspore/ops_generate/gen_pyboost_func.py +13 -6
  179. mindspore/ops_generate/pyboost_utils.py +2 -17
  180. mindspore/parallel/__init__.py +3 -2
  181. mindspore/parallel/_auto_parallel_context.py +106 -1
  182. mindspore/parallel/_parallel_serialization.py +34 -2
  183. mindspore/parallel/_utils.py +16 -0
  184. mindspore/parallel/algo_parameter_config.py +4 -4
  185. mindspore/parallel/checkpoint_transform.py +249 -77
  186. mindspore/parallel/cluster/process_entity/_api.py +1 -1
  187. mindspore/parallel/parameter_broadcast.py +1 -1
  188. mindspore/parallel/shard.py +1 -1
  189. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +1 -0
  190. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +17 -5
  191. mindspore/profiler/parser/ascend_msprof_exporter.py +3 -3
  192. mindspore/profiler/parser/ascend_msprof_generator.py +10 -3
  193. mindspore/profiler/parser/ascend_op_generator.py +26 -9
  194. mindspore/profiler/parser/ascend_timeline_generator.py +7 -4
  195. mindspore/profiler/parser/profiler_info.py +11 -1
  196. mindspore/profiler/profiling.py +13 -5
  197. mindspore/rewrite/api/node.py +12 -12
  198. mindspore/rewrite/api/symbol_tree.py +11 -11
  199. mindspore/run_check/_check_version.py +1 -1
  200. mindspore/safeguard/rewrite_obfuscation.py +2 -2
  201. mindspore/train/amp.py +4 -4
  202. mindspore/train/anf_ir_pb2.py +8 -2
  203. mindspore/train/callback/_backup_and_restore.py +2 -2
  204. mindspore/train/callback/_callback.py +4 -4
  205. mindspore/train/callback/_checkpoint.py +2 -2
  206. mindspore/train/callback/_early_stop.py +2 -2
  207. mindspore/train/callback/_landscape.py +4 -4
  208. mindspore/train/callback/_loss_monitor.py +2 -2
  209. mindspore/train/callback/_on_request_exit.py +2 -2
  210. mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
  211. mindspore/train/callback/_summary_collector.py +2 -2
  212. mindspore/train/callback/_time_monitor.py +2 -2
  213. mindspore/train/dataset_helper.py +8 -3
  214. mindspore/train/loss_scale_manager.py +2 -2
  215. mindspore/train/metrics/metric.py +3 -3
  216. mindspore/train/mind_ir_pb2.py +22 -17
  217. mindspore/train/model.py +15 -15
  218. mindspore/train/serialization.py +18 -18
  219. mindspore/train/summary/summary_record.py +7 -7
  220. mindspore/train/train_thor/convert_utils.py +3 -3
  221. mindspore/version.py +1 -1
  222. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/METADATA +1 -1
  223. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/RECORD +226 -212
  224. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/WHEEL +0 -0
  225. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/entry_points.txt +0 -0
  226. {mindspore-2.3.0rc1.dist-info → mindspore-2.3.0rc2.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- # Copyright 2020 Huawei Technologies Co., Ltd
1
+ # Copyright 2023 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -18,6 +18,367 @@
18
18
  NN Operators with better performance
19
19
 
20
20
  """
21
+ from mindspore.ops._primitive_cache import _get_cache_prim
22
+ from mindspore.ops.auto_generate.gen_ops_prim import Convolution, ConstantPadND, MaxPoolWithIndices, MaxPoolWithMask
23
+ from mindspore.ops.auto_generate import leaky_relu_ext
24
+ from mindspore.ops.auto_generate import BatchNormExt
25
+ from mindspore import ops
26
+ from mindspore import _checkparam as validator
21
27
 
22
28
 
23
- __all__ = []
29
+ def _check_stride_when_same_mode(stride):
30
+ """ stride must be 1 when pad mode is same """
31
+ if isinstance(stride, int):
32
+ if stride != 1:
33
+ raise ValueError(f"For conv2d, 'padding=same' is not supported for stride convolution, " \
34
+ f"but got {stride}")
35
+ elif isinstance(stride, tuple):
36
+ validator.check_int(len(stride), 2, validator.EQ, "stride", 'conv2d')
37
+ if not all(s == 1 for s in stride):
38
+ raise ValueError(f"For conv2d, 'padding=same' is not supported for stride convolution, " \
39
+ f"but got {stride}")
40
+ else:
41
+ raise TypeError(f"For conv2d, the parameter 'stride' must be a int/tuple, but got {type(stride)}")
42
+
43
+
44
+ def _get_pad_info(dilation, weight):
45
+ """ Get pad list by dilation and weight shape """
46
+ need_pad_nd = False
47
+ pad_l = ()
48
+ pad_r = ()
49
+ for i in range(2):
50
+ d = dilation[i]
51
+ weight_size = weight.shape[i + 2]
52
+ pad = d * (weight_size - 1)
53
+ pad_l += (int(pad / 2),)
54
+ pad_r += (int(pad - pad_l[i]),)
55
+ if pad_l[i] != pad_r[i]:
56
+ need_pad_nd = True
57
+ return need_pad_nd, pad_l, pad_r
58
+
59
+
60
+ def _get_pad_nd_info(pad_l, pad_r):
61
+ """ Get pad_nd list if input need to exec pad_nd """
62
+ pad_nd = ()
63
+ new_pad_l = ()
64
+ for i in range(2):
65
+ delta_pad = pad_r[i] - pad_l[i]
66
+ if delta_pad > 0:
67
+ pad_nd = (0, delta_pad,) + pad_nd
68
+ new_pad_l += (pad_l[i],)
69
+ else:
70
+ pad_nd = (delta_pad, 0,) + pad_nd
71
+ new_pad_l += (pad_r[i],)
72
+ return pad_nd, new_pad_l
73
+
74
+
75
+ def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
76
+ r"""
77
+ Applies a 2D convolution over an input tensor. The input tenor is typically of
78
+ shape :math:`(N, C_{in}, H_{in}, W_{in})`, where :math:`N` is batch size, :math:`C` is
79
+ channel number, :math:`H` is feature height, :math:`W` is feature width.
80
+
81
+ The output is calculated based on formula:
82
+
83
+ .. math::
84
+
85
+ \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
86
+ \sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
87
+
88
+ where :math:`bias` is the output channel bias, :math:`ccor` is
89
+ the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
90
+ , :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
91
+
92
+ Here are the indices' meanings:
93
+
94
+ - :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
95
+ where :math:`N` is the batch size of the input.
96
+
97
+ - :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
98
+ where :math:`C_{out}` is the number of output channels, which is also equal to the number of kernels.
99
+
100
+ - :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
101
+ where :math:`C_{in}` is the number of
102
+ input channels, which is also equal to the number of channels in the convolutional kernels.
103
+
104
+ Therefore, in the above formula, :math:`{bias}(C_{out_j})` represents the bias of the :math:`j`-th
105
+ output channel, :math:`{weight}(C_{out_j}, k)` represents the slice of the :math:`j`-th convolutional
106
+ kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
107
+ channel in the :math:`i`-th batch of the input feature map.
108
+
109
+ The shape of the convolutional kernel is given by :math:`(\text{kernel_size[0]}, \text{kernel_size[1]})`,
110
+ where :math:`\text{kernel_size[0]}` and :math:`\text{kernel_size[1]}` are the height and width of the kernel,
111
+ respectively.
112
+ If we consider the input and output channels as well as the `group` parameter, the complete kernel shape
113
+ will be :math:`(C_{out}, C_{in} / \text{group}, \text{kernel_size[0]}, \text{kernel_size[1]})`,
114
+ where `group` is the number of groups dividing `x`'s input channel when applying group convolution.
115
+
116
+ For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
117
+ <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_ and
118
+ `ConvNets <http://cs231n.github.io/convolutional-networks/>`_.
119
+
120
+ Note:
121
+ On Ascend platform, only group convolution in depthwise convolution scenarios is supported.
122
+ That is, when `groups>1`, condition :math:`C_{in}` = :math:`C_{out}` = `groups` must be satisfied.
123
+
124
+ Args:
125
+ input (Tensor): Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
126
+ weight (Tensor): Tensor of shape
127
+ :math:`(N, C_{in} / \text{groups}, \text{kernel_size[0]}, \text{kernel_size[1]})`, then the size of kernel
128
+ is :math:`(\text{kernel_size[0]}, \text{kernel_size[1]})`.
129
+ bias (Tensor, optional): Bias Tensor with shape :math:`(C_{out})`.
130
+ When bias is ``None`` , zeros will be used. Default: ``None`` .
131
+ stride (Union(int, tuple[int]), optional): The distance of kernel moving, an int number that represents
132
+ the height and width of movement are both strides, or a tuple of two int numbers that
133
+ represent height and width of movement respectively. Default: ``1`` .
134
+ padding (Union(int, tuple[int], list[int], str), optional): Implicit paddings on both sides of the input `x`.
135
+ Can be a string, one integer or a tuple/list with 2 integers.
136
+ If `padding` is a string, the optional values are ``"same"`` , ``"valid"``.
137
+
138
+ - same: Adopts the way of completion. The height and width of the output will be equal to
139
+ the input `x` divided by stride. The padding will be evenly calculated in top and bottom,
140
+ left and right possiblily. Otherwise, the last extra padding will be calculated from the bottom
141
+ and the right side. If this mode is set, `padding` must be 0.
142
+
143
+ - valid: Adopts the way of discarding. The possible largest height and width of output will be returned
144
+ without padding. Extra pixels will be discarded. If this mode is set, `padding` must be 0.
145
+
146
+ If `padding` is one integer, the paddings of top, bottom, left and right are the same, equal to padding.
147
+ If `padding` is a tuple/list with 2 integers, the padding of top adn bottom is padding[0],
148
+ and the padding of left and right is padding[1]. Default: ``0`` .
149
+ dilation (Union(int, tuple[int]), optional): Gaps between kernel elements.The data type is int or a tuple of
150
+ 2 integers. Specifies the dilation rate to use for dilated convolution. If set to be :math:`k > 1`,
151
+ there will be :math:`k - 1` pixels skipped for each sampling location. Its value must
152
+ be greater than or equal to 1 and bounded by the height and width of the input `x`. Default: ``1`` .
153
+ groups (int, optional): Splits `input` into groups. Default: ``1`` .
154
+
155
+ Returns:
156
+ Tensor, the value that applied 2D convolution. The shape is :math:`(N, C_{out}, H_{out}, W_{out})`.
157
+ To see how different pad modes affect the output shape, please refer to
158
+ :class:`mindspore.nn.Conv2d` for more details.
159
+
160
+
161
+ Raises:
162
+ TypeError: If `stride`, `padding` or `dilation` is neither an int nor a tuple.
163
+ TypeError: `groups` is not an int.
164
+ TypeError: If `bias` is not a Tensor.
165
+ ValueError: If the shape of `bias` is not :math:`(C_{out})` .
166
+ ValueError: If `stride` or `dilation` is less than 1.
167
+ ValueError: If `pad_mode` is not one of 'same', 'valid' or 'pad'.
168
+ ValueError: If `padding` is a tuple/list whose length is not equal to 2.
169
+
170
+ Supported Platforms:
171
+ ``Ascend``
172
+
173
+ Examples:
174
+ >>> import mindspore
175
+ >>> import numpy as np
176
+ >>> from mindspore import Tensor, ops
177
+ >>> x = Tensor(np.ones([10, 32, 32, 32]), mindspore.float32)
178
+ >>> weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32)
179
+ >>> output = ops.extend.conv2d(x, weight)
180
+ >>> print(output.shape)
181
+ (10, 32, 30, 30)
182
+ """
183
+
184
+ def _convolution_same(input, weight, bias, dilation, groups):
185
+ """ convolution when mode is 'same' """
186
+ if isinstance(dilation, int):
187
+ dilation = (dilation,) * 2
188
+ validator.check_int(len(weight.shape), 4, validator.EQ, "weight.shape", 'conv2d')
189
+ validator.check_int(len(dilation), 2, validator.EQ, "dilation", 'conv2d')
190
+
191
+ # Calc padding info
192
+ need_pad_nd, pad_l, pad_r = _get_pad_info(dilation, weight)
193
+ if not need_pad_nd:
194
+ conv = _get_cache_prim(Convolution)(stride, pad_l, dilation, False, (0, 0), groups)
195
+ return conv(input, weight, bias)
196
+
197
+ # Calc pad nd info
198
+ pad_nd, pad_l = _get_pad_nd_info(pad_l, pad_r)
199
+ pad_nd_op = _get_cache_prim(ConstantPadND)()
200
+ padded_input = pad_nd_op(input, pad_nd, 0)
201
+ conv = _get_cache_prim(Convolution)(stride, pad_l, dilation, False, (0, 0), groups)
202
+ return conv(padded_input, weight, bias)
203
+
204
+ if isinstance(padding, int):
205
+ padding = (padding,) * 2
206
+
207
+ if isinstance(padding, (tuple, list)):
208
+ conv = _get_cache_prim(Convolution)(stride, padding, dilation, False, (0, 0), groups)
209
+ return conv(input, weight, bias)
210
+ if isinstance(padding, str):
211
+ if padding == 'valid':
212
+ conv = _get_cache_prim(Convolution)(stride, (0, 0), dilation, False, (0, 0), groups)
213
+ return conv(input, weight, bias)
214
+ if padding == 'same':
215
+ _check_stride_when_same_mode(stride)
216
+ return _convolution_same(input, weight, bias, dilation, groups)
217
+ raise ValueError(f"For conv2d, the parameter 'padding' must be 'same' or 'valid' when " \
218
+ f"the type of 'padding' is string.")
219
+ raise TypeError(f"For conv2d, the parameter 'padding' must be a tuple/list " \
220
+ f"or a string, but got {type(padding)}")
221
+
222
+
223
+ def max_pool2d(input, kernel_size, stride=None, padding=0, dilation=1, *, ceil_mode=False, return_indices=False):
224
+ r"""
225
+ Performs a 2D max pooling on the input Tensor.
226
+
227
+ Typically, the input is a Tensor with shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})`, outputs
228
+ regional maximum in the :math:`(H_{in}, W_{in})`-dimension. Given `kernel_size`
229
+ :math:`ks = (h_{ker}, w_{ker})` and `stride` :math:`s = (s_0, s_1)`, the operation is as follows:
230
+
231
+ .. math::
232
+ \text{output}(N_i, C_j, h, w) =
233
+ \max_{m=0, \ldots, h_{ker}-1} \max_{n=0, \ldots, w_{ker}-1}
234
+ \text{input}(N_i, C_j, s_0 \times h + m, s_1 \times w + n)
235
+
236
+ .. warning::
237
+ Only support on Atlas training series.
238
+
239
+ Args:
240
+ input (Tensor): Tensor of shape :math:`(N_{in}, C_{in}, H_{in}, W_{in})` with data type of float32
241
+ in Ascend.
242
+ kernel_size (Union[int, tuple[int]]): The size of kernel used to take the maximum value and arg
243
+ value, is an int number that represents height and width of the kernel, or a tuple of
244
+ two int numbers that represent height and width respectively.
245
+ stride (Union[int, tuple[int], None]): The distance of kernel moving, an int number that represents
246
+ the height and width of movement are both stride, or a tuple of two int numbers that
247
+ represent height and width of movement respectively.
248
+ Default: ``None`` , which indicates the moving step is `kernel_size` .
249
+ padding (Union[int, tuple[int]]): An int number that represents the height and width of movement are both
250
+ strides, or a tuple of two int numbers that represent height and width of movement respectively.
251
+ Default: ``0`` .
252
+ dilation (Union[int, tuple[int]]): Control the stride of elements in the kernel. Default: ``1`` .
253
+ ceil_mode (bool): Whether to use ceil instead of floor to calculate output shape. Default: ``False`` .
254
+ return_indices (bool): Whether to output the indices of max value. Default: ``False`` .
255
+
256
+ Returns:
257
+ If `return_indices` is ``False`` , return a Tensor `output`, else return a tuple (`output`, `argmax`).
258
+
259
+ - **output** (Tensor) - Maxpooling result, with shape :math:`(N_{out}, C_{out}, H_{out}, W_{out})`.
260
+ It has the same data type as `input`.
261
+
262
+ .. math::
263
+ H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding[0]} - \text{dilation[0]}
264
+ \times (\text{kernel_size[0]} - 1) - 1}{\text{stride[0]}} + 1\right\rfloor
265
+
266
+ .. math::
267
+ W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding[1]} - \text{dilation[1]}
268
+ \times (\text{kernel_size[1]} - 1) - 1}{\text{stride[1]}} + 1\right\rfloor
269
+
270
+ - **argmax** (Tensor) - Index corresponding to the maximum value. In Ascend, data type is int32.
271
+ It will be return only when `return_indices` is True.
272
+
273
+ Raises:
274
+ TypeError: If `input` is not a Tensor.
275
+ ValueError: If length of shape of `input` is not equal to 4.
276
+ TypeError: If `kernel_size` , `stride` , `padding` or `dilation` is not int or tuple.
277
+ ValueError: If `kernel_size`, `stride` or `dilation` is less than 1.
278
+ ValueError: If `dilation` is not all 1.
279
+ ValueError: If `padding` is less than 0.
280
+ ValueError: If `padding` is more than half of `kernel_size`.
281
+ TypeError: If `ceil_mode` is not bool.
282
+
283
+ Supported Platforms:
284
+ ``Ascend``
285
+
286
+ Examples:
287
+ >>> import mindspore
288
+ >>> import numpy as np
289
+ >>> from mindspore import Tensor, ops
290
+ >>> input = Tensor(np.arange(20 * 16 * 50 * 32).reshape((20, 16, 50, 32)), mindspore.float32)
291
+ >>> output_tensor, argmax = ops.extend.max_pool2d(input, kernel_size=(3, 2), stride=(2, 1),
292
+ ceil_mode=False, return_indices=True)
293
+ >>> print(output_tensor.shape)
294
+ (20, 16, 24, 31)
295
+ >>> print(argmax.shape)
296
+ (20, 16, 24, 31)
297
+ """
298
+ strides = stride if (stride is not None) else kernel_size
299
+ if return_indices:
300
+ max_pool_func_ = _get_cache_prim(MaxPoolWithIndices)(kernel_size, strides, padding, dilation, ceil_mode)
301
+ out, indices = max_pool_func_(input)
302
+ else:
303
+ max_pool_func_ = _get_cache_prim(MaxPoolWithMask)(kernel_size, strides, padding, dilation, ceil_mode)
304
+ out, indices = max_pool_func_(input)
305
+ if return_indices:
306
+ return out, indices
307
+ return out
308
+
309
+
310
+ def batch_norm(input, running_mean, running_var, weight=None, bias=None, training=False, momentum=0.1, eps=1e-5):
311
+ r"""
312
+ Batch Normalization for input data and updated parameters.
313
+
314
+ Batch Normalization is widely used in convolutional neural networks. This operation
315
+ applies Batch Normalization over inputs to avoid internal covariate shift as described
316
+ in the paper `Batch Normalization: Accelerating Deep Network Training by Reducing Internal
317
+ Covariate Shift <https://arxiv.org/abs/1502.03167>`_. It rescales and recenters the
318
+ features using a mini-batch of data and the learned parameters can be described
319
+ in the following formula,
320
+
321
+ .. math::
322
+
323
+ y = \frac{x - mean}{\sqrt{variance + \epsilon}} * \gamma + \beta
324
+
325
+ where :math:`\gamma` is `weight`, :math:`\beta` is `bias`, :math:`\epsilon` is `eps`, :math:`mean` is the
326
+ mean of :math:`x`, :math:`variance` is the variance of :math:`x`.
327
+
328
+ .. warning::
329
+ - For Atlas 200/300/500 inference product,
330
+ the result accuracy fails to reach 1‰ due to the square root instruction.
331
+
332
+ Note:
333
+ - If `training` is `False`, `weight`, `bias`, `running_mean` and `running_var` are Tensors.
334
+ - If `training` is `True`, `weight`, `bias`, `running_mean` and `running_var` are Parameters.
335
+
336
+ Args:
337
+ input (Tensor): Tensor of shape :math:`(N, C, *)`, with bfloat16, float16 or float32 data type.
338
+ running_mean (Union[Tensor, Parameter]): The shape :math:`(C,)`, has the same data type with `weight`.
339
+ running_var (Union[Tensor, Parameter]): The shape :math:`(C,)`, has the same data type with `weight`.
340
+ weight (Union[Tensor, Parameter]): The shape :math:`(C,)`, with bfloat, float16 or float32 data type.
341
+ bias (Union[Tensor, Parameter]): The shape :math:`(C,)`, has the same data type with `weight`.
342
+ training (bool, optional): If `training` is `True`, `mean` and `variance` are computed during training.
343
+ If `training` is `False`, they're loaded from checkpoint during inference. Default: ``False`` .
344
+ momentum (float, optional): The hyper parameter to compute moving average for `running_mean` and `running_var`
345
+ (e.g. :math:`new\_running\_mean = (1 - momentum) * running\_mean + momentum * current\_mean`).
346
+ Default: ``0.1`` .
347
+ eps (float, optional): A small value added for numerical stability. Default: ``1e-5``.
348
+
349
+ Returns:
350
+ output_x (Tensor) - The same type and shape as the `input_x`. The shape is :math:`(N, C, *)`.
351
+
352
+ Raises:
353
+ TypeError: If `training` is not a bool.
354
+ TypeError: If dtype of `eps` or `momentum` is not float.
355
+ TypeError: If `input_x`, `weight`, `bias`, `running_mean` or `running_var` is not a Tensor.
356
+ TypeError: If dtype of `input_x`, `weight` is not bfloat16, float16 or float32.
357
+
358
+ Supported Platforms:
359
+ ``Ascend``
360
+
361
+ Examples:
362
+ >>> import mindspore
363
+ >>> from mindspore import Tensor, ops
364
+ >>> input_x = Tensor([[1.0, 2.0], [3.0, 4.0]], mindspore.float32)
365
+ >>> running_mean = Tensor([0.5, 1.5], mindspore.float32)
366
+ >>> running_var = Tensor([0.1, 0.2], mindspore.float32)
367
+ >>> weight = Tensor([2.0, 2.0], mindspore.float32)
368
+ >>> bias = Tensor([-1.0, -1.0], mindspore.float32)
369
+ >>> output = ops.batch_norm(input_x, running_mean, running_var, weight, bias)
370
+ >>> print(output)
371
+ [[ 2.1621194 1.2360122]
372
+ [14.810596 10.180061 ]]
373
+ """
374
+ if weight is None:
375
+ weight = ops.ones([input.shape[1]], dtype=input.dtype)
376
+ if bias is None:
377
+ bias = ops.zeros([input.shape[1]], dtype=input.dtype)
378
+ batch_norm_op = BatchNormExt(training=training, momentum=momentum, eps=eps)
379
+ output = batch_norm_op(input, weight, bias, running_mean, running_var)
380
+ return output[0]
381
+
382
+
383
+
384
+ __all__ = ['conv2d', 'max_pool2d', 'leaky_relu_ext', 'batch_norm']
@@ -1,4 +1,4 @@
1
- # Copyright 2022-2023 Huawei Technologies Co., Ltd
1
+ # Copyright 2022-2024 Huawei Technologies Co., Ltd
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -99,6 +99,7 @@ from .array_func import (
99
99
  tensor_scatter_elements,
100
100
  scatter,
101
101
  scatter_add,
102
+ scatter_add_ext,
102
103
  scatter_mul,
103
104
  scatter_max,
104
105
  scatter_min,
@@ -129,6 +130,7 @@ from .array_func import (
129
130
  unsorted_segment_sum,
130
131
  col2im,
131
132
  split,
133
+ split_ext,
132
134
  tensor_split,
133
135
  vsplit,
134
136
  hsplit,
@@ -136,7 +138,6 @@ from .array_func import (
136
138
  index_fill,
137
139
  index_select,
138
140
  max,
139
- argmax,
140
141
  min,
141
142
  population_count,
142
143
  topk,
@@ -163,6 +164,10 @@ from .array_func import (
163
164
  sort,
164
165
  top_k,
165
166
  deepcopy,
167
+ arange_ext,
168
+ zeros_like_ext,
169
+ ones_like_ext,
170
+ full_ext,
166
171
  )
167
172
  from .parameter_func import (
168
173
  assign,
@@ -175,6 +180,7 @@ from .math_func import (
175
180
  addn,
176
181
  absolute,
177
182
  abs,
183
+ argmax,
178
184
  argmin,
179
185
  angle,
180
186
  bincount,
@@ -194,6 +200,7 @@ from .math_func import (
194
200
  le,
195
201
  lerp,
196
202
  norm,
203
+ norm_ext,
197
204
  vector_norm,
198
205
  matrix_norm,
199
206
  round,
@@ -261,6 +268,7 @@ from .math_func import (
261
268
  matrix_determinant,
262
269
  det,
263
270
  linspace,
271
+ linspace_ext,
264
272
  lu_solve,
265
273
  matrix_solve,
266
274
  maximum,
@@ -366,6 +374,7 @@ from .math_func import (
366
374
  amin,
367
375
  amax,
368
376
  mean,
377
+ mean_ext,
369
378
  prod,
370
379
  all,
371
380
  any,
@@ -464,6 +473,7 @@ from .nn_func import (
464
473
  dropout3d,
465
474
  dense,
466
475
  deformable_conv2d,
476
+ embedding,
467
477
  fast_gelu,
468
478
  flip,
469
479
  fliplr,
@@ -479,6 +489,7 @@ from .nn_func import (
479
489
  interpolate,
480
490
  upsample,
481
491
  kl_div,
492
+ layer_norm,
482
493
  log_softmax,
483
494
  lrn,
484
495
  mish,
@@ -499,6 +510,7 @@ from .nn_func import (
499
510
  softplus,
500
511
  pdist,
501
512
  pad,
513
+ pad_ext,
502
514
  prelu,
503
515
  mirror_pad,
504
516
  nll_loss,
@@ -514,6 +526,7 @@ from .nn_func import (
514
526
  conv3d_transpose,
515
527
  conv1d,
516
528
  conv2d,
529
+ conv_transpose2d,
517
530
  sigmoid,
518
531
  logsigmoid,
519
532
  relu,
@@ -535,6 +548,8 @@ from .nn_func import (
535
548
  msort,
536
549
  channel_shuffle,
537
550
  hardsigmoid,
551
+ group_norm,
552
+ dropout_ext,
538
553
  )
539
554
  from .linalg_func import (
540
555
  cond,
@@ -585,6 +600,7 @@ from .random_func import (
585
600
  standard_laplace,
586
601
  random_categorical,
587
602
  uniform,
603
+ uniform_ext,
588
604
  standard_normal,
589
605
  random_gamma,
590
606
  uniform_candidate_sampler,
@@ -592,6 +608,7 @@ from .random_func import (
592
608
  log_uniform_candidate_sampler,
593
609
  shuffle,
594
610
  choice_with_mask,
611
+ normal_ext,
595
612
  normal,
596
613
  laplace,
597
614
  gamma,