mindspore 2.7.0__cp311-cp311-win_amd64.whl → 2.7.1__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (290) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +4 -1
  3. mindspore/_c_dataengine.cp311-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp311-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp311-win_amd64.pyd +0 -0
  6. mindspore/_extends/parse/compile_config.py +24 -1
  7. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +6 -2
  8. mindspore/_extends/parse/resources.py +1 -1
  9. mindspore/_extends/parse/standard_method.py +8 -1
  10. mindspore/_extends/parse/trope.py +2 -1
  11. mindspore/_extends/pijit/pijit_func_white_list.py +7 -22
  12. mindspore/avcodec-59.dll +0 -0
  13. mindspore/avdevice-59.dll +0 -0
  14. mindspore/avfilter-8.dll +0 -0
  15. mindspore/avformat-59.dll +0 -0
  16. mindspore/avutil-57.dll +0 -0
  17. mindspore/boost/base.py +29 -2
  18. mindspore/common/_decorator.py +3 -2
  19. mindspore/common/_grad_function.py +3 -1
  20. mindspore/common/_tensor_cpp_method.py +1 -1
  21. mindspore/common/_tensor_docs.py +275 -64
  22. mindspore/common/_utils.py +0 -44
  23. mindspore/common/api.py +285 -35
  24. mindspore/common/dump.py +7 -108
  25. mindspore/common/dynamic_shape/auto_dynamic_shape.py +1 -3
  26. mindspore/common/hook_handle.py +60 -0
  27. mindspore/common/jit_config.py +5 -1
  28. mindspore/common/jit_trace.py +27 -12
  29. mindspore/common/lazy_inline.py +5 -3
  30. mindspore/common/parameter.py +13 -107
  31. mindspore/common/recompute.py +4 -11
  32. mindspore/common/tensor.py +16 -169
  33. mindspore/communication/_comm_helper.py +11 -1
  34. mindspore/communication/comm_func.py +138 -4
  35. mindspore/communication/management.py +85 -1
  36. mindspore/config/op_info.config +0 -15
  37. mindspore/context.py +5 -85
  38. mindspore/dataset/engine/datasets.py +8 -4
  39. mindspore/dataset/engine/datasets_vision.py +1 -1
  40. mindspore/dataset/engine/validators.py +1 -15
  41. mindspore/dnnl.dll +0 -0
  42. mindspore/{experimental/llm_boost/ascend_native → graph}/__init__.py +7 -7
  43. mindspore/graph/custom_pass.py +55 -0
  44. mindspore/include/dataset/execute.h +2 -2
  45. mindspore/jpeg62.dll +0 -0
  46. mindspore/mindrecord/__init__.py +3 -3
  47. mindspore/mindrecord/common/exceptions.py +1 -0
  48. mindspore/mindrecord/config.py +1 -1
  49. mindspore/{parallel/mpi → mindrecord/core}/__init__.py +4 -1
  50. mindspore/mindrecord/{shardheader.py → core/shardheader.py} +2 -1
  51. mindspore/mindrecord/{shardindexgenerator.py → core/shardindexgenerator.py} +1 -1
  52. mindspore/mindrecord/{shardreader.py → core/shardreader.py} +2 -1
  53. mindspore/mindrecord/{shardsegment.py → core/shardsegment.py} +2 -2
  54. mindspore/mindrecord/{shardutils.py → core/shardutils.py} +1 -1
  55. mindspore/mindrecord/{shardwriter.py → core/shardwriter.py} +1 -1
  56. mindspore/mindrecord/filereader.py +4 -4
  57. mindspore/mindrecord/filewriter.py +5 -5
  58. mindspore/mindrecord/mindpage.py +2 -2
  59. mindspore/mindrecord/tools/cifar10.py +1 -1
  60. mindspore/mindrecord/tools/cifar100.py +1 -1
  61. mindspore/mindrecord/tools/cifar100_to_mr.py +1 -1
  62. mindspore/mindrecord/tools/cifar10_to_mr.py +1 -1
  63. mindspore/mindrecord/tools/csv_to_mr.py +1 -1
  64. mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
  65. mindspore/mindrecord/tools/mnist_to_mr.py +1 -1
  66. mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -1
  67. mindspore/mindspore_backend_common.dll +0 -0
  68. mindspore/mindspore_backend_manager.dll +0 -0
  69. mindspore/mindspore_cluster.dll +0 -0
  70. mindspore/mindspore_common.dll +0 -0
  71. mindspore/mindspore_core.dll +0 -0
  72. mindspore/mindspore_cpu.dll +0 -0
  73. mindspore/mindspore_dump.dll +0 -0
  74. mindspore/mindspore_frontend.dll +0 -0
  75. mindspore/mindspore_glog.dll +0 -0
  76. mindspore/mindspore_hardware_abstract.dll +0 -0
  77. mindspore/mindspore_memory_pool.dll +0 -0
  78. mindspore/mindspore_ms_backend.dll +0 -0
  79. mindspore/mindspore_ops.dll +0 -0
  80. mindspore/{mindspore_ops_host.dll → mindspore_ops_cpu.dll} +0 -0
  81. mindspore/mindspore_profiler.dll +0 -0
  82. mindspore/mindspore_pyboost.dll +0 -0
  83. mindspore/mindspore_pynative.dll +0 -0
  84. mindspore/mindspore_runtime_pipeline.dll +0 -0
  85. mindspore/mindspore_runtime_utils.dll +0 -0
  86. mindspore/mindspore_tools.dll +0 -0
  87. mindspore/mint/__init__.py +15 -10
  88. mindspore/mint/distributed/distributed.py +182 -62
  89. mindspore/mint/nn/__init__.py +2 -16
  90. mindspore/mint/nn/functional.py +4 -110
  91. mindspore/mint/nn/layer/__init__.py +0 -2
  92. mindspore/mint/nn/layer/activation.py +0 -6
  93. mindspore/mint/nn/layer/basic.py +0 -47
  94. mindspore/mint/nn/layer/conv.py +4 -4
  95. mindspore/mint/nn/layer/normalization.py +8 -13
  96. mindspore/mint/nn/layer/pooling.py +0 -4
  97. mindspore/nn/__init__.py +1 -3
  98. mindspore/nn/cell.py +16 -66
  99. mindspore/nn/layer/basic.py +49 -1
  100. mindspore/nn/layer/container.py +16 -0
  101. mindspore/nn/layer/embedding.py +4 -169
  102. mindspore/nn/layer/normalization.py +2 -1
  103. mindspore/nn/layer/thor_layer.py +4 -85
  104. mindspore/nn/optim/ada_grad.py +0 -1
  105. mindspore/nn/optim/adafactor.py +0 -1
  106. mindspore/nn/optim/adam.py +31 -124
  107. mindspore/nn/optim/adamax.py +0 -1
  108. mindspore/nn/optim/asgd.py +0 -1
  109. mindspore/nn/optim/ftrl.py +8 -102
  110. mindspore/nn/optim/lamb.py +0 -1
  111. mindspore/nn/optim/lars.py +0 -3
  112. mindspore/nn/optim/lazyadam.py +25 -218
  113. mindspore/nn/optim/momentum.py +5 -43
  114. mindspore/nn/optim/optimizer.py +6 -55
  115. mindspore/nn/optim/proximal_ada_grad.py +0 -1
  116. mindspore/nn/optim/rmsprop.py +0 -1
  117. mindspore/nn/optim/rprop.py +0 -1
  118. mindspore/nn/optim/sgd.py +0 -1
  119. mindspore/nn/optim/tft_wrapper.py +0 -1
  120. mindspore/nn/optim/thor.py +0 -2
  121. mindspore/nn/probability/bijector/bijector.py +7 -8
  122. mindspore/nn/probability/bijector/gumbel_cdf.py +2 -2
  123. mindspore/nn/probability/bijector/power_transform.py +20 -21
  124. mindspore/nn/probability/bijector/scalar_affine.py +5 -5
  125. mindspore/nn/probability/bijector/softplus.py +13 -14
  126. mindspore/nn/wrap/grad_reducer.py +4 -74
  127. mindspore/numpy/array_creations.py +2 -2
  128. mindspore/numpy/fft.py +9 -9
  129. mindspore/{nn/reinforcement → onnx}/__init__.py +5 -8
  130. mindspore/onnx/onnx_export.py +137 -0
  131. mindspore/opencv_core4110.dll +0 -0
  132. mindspore/opencv_imgcodecs4110.dll +0 -0
  133. mindspore/{opencv_imgproc452.dll → opencv_imgproc4110.dll} +0 -0
  134. mindspore/ops/__init__.py +2 -0
  135. mindspore/ops/_grad_experimental/grad_comm_ops.py +38 -2
  136. mindspore/ops/_op_impl/aicpu/__init__.py +0 -10
  137. mindspore/ops/_op_impl/cpu/__init__.py +0 -5
  138. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +16 -22
  139. mindspore/ops/auto_generate/gen_extend_func.py +2 -7
  140. mindspore/ops/auto_generate/gen_ops_def.py +98 -141
  141. mindspore/ops/auto_generate/gen_ops_prim.py +12708 -12686
  142. mindspore/ops/communication.py +97 -0
  143. mindspore/ops/composite/__init__.py +5 -2
  144. mindspore/ops/composite/base.py +15 -1
  145. mindspore/ops/composite/multitype_ops/__init__.py +3 -1
  146. mindspore/ops/composite/multitype_ops/_compile_utils.py +150 -8
  147. mindspore/ops/composite/multitype_ops/add_impl.py +7 -0
  148. mindspore/ops/composite/multitype_ops/mod_impl.py +27 -0
  149. mindspore/ops/function/__init__.py +1 -0
  150. mindspore/ops/function/array_func.py +14 -12
  151. mindspore/ops/function/comm_func.py +3883 -0
  152. mindspore/ops/function/debug_func.py +3 -4
  153. mindspore/ops/function/math_func.py +45 -54
  154. mindspore/ops/function/nn_func.py +75 -294
  155. mindspore/ops/function/random_func.py +9 -18
  156. mindspore/ops/functional.py +2 -0
  157. mindspore/ops/functional_overload.py +354 -18
  158. mindspore/ops/operations/__init__.py +2 -5
  159. mindspore/ops/operations/_custom_ops_utils.py +7 -9
  160. mindspore/ops/operations/_inner_ops.py +1 -38
  161. mindspore/ops/operations/_rl_inner_ops.py +0 -933
  162. mindspore/ops/operations/array_ops.py +1 -0
  163. mindspore/ops/operations/comm_ops.py +94 -2
  164. mindspore/ops/operations/custom_ops.py +228 -19
  165. mindspore/ops/operations/debug_ops.py +27 -29
  166. mindspore/ops/operations/manually_defined/ops_def.py +27 -306
  167. mindspore/ops/operations/nn_ops.py +2 -2
  168. mindspore/ops/operations/sparse_ops.py +0 -83
  169. mindspore/ops/primitive.py +1 -17
  170. mindspore/ops/tensor_method.py +72 -3
  171. mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +5 -5
  172. mindspore/ops_generate/aclnn/gen_aclnn_implement.py +8 -8
  173. mindspore/ops_generate/api/functions_cc_generator.py +53 -4
  174. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +25 -11
  175. mindspore/ops_generate/common/gen_constants.py +11 -10
  176. mindspore/ops_generate/common/op_proto.py +18 -1
  177. mindspore/ops_generate/common/template.py +102 -245
  178. mindspore/ops_generate/common/template_utils.py +212 -0
  179. mindspore/ops_generate/gen_custom_ops.py +69 -0
  180. mindspore/ops_generate/op_def/ops_def_cc_generator.py +78 -7
  181. mindspore/ops_generate/op_def_py/base_op_prim_py_generator.py +360 -0
  182. mindspore/ops_generate/op_def_py/custom_op_prim_py_generator.py +140 -0
  183. mindspore/ops_generate/op_def_py/op_def_py_generator.py +54 -7
  184. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -312
  185. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +74 -17
  186. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +22 -5
  187. mindspore/ops_generate/pyboost/op_template_parser.py +3 -2
  188. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +21 -5
  189. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +2 -2
  190. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +30 -10
  191. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +10 -3
  192. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +1 -1
  193. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +19 -9
  194. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +71 -28
  195. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +10 -9
  196. mindspore/ops_generate/pyboost/pyboost_utils.py +27 -16
  197. mindspore/ops_generate/resources/yaml_loader.py +13 -0
  198. mindspore/ops_generate/tensor_py_cc_generator.py +2 -2
  199. mindspore/parallel/_cell_wrapper.py +1 -1
  200. mindspore/parallel/_parallel_serialization.py +1 -4
  201. mindspore/parallel/_utils.py +29 -6
  202. mindspore/parallel/checkpoint_transform.py +18 -2
  203. mindspore/parallel/cluster/process_entity/_api.py +24 -32
  204. mindspore/parallel/cluster/process_entity/_utils.py +9 -5
  205. mindspore/{experimental/llm_boost/atb → parallel/distributed}/__init__.py +21 -23
  206. mindspore/parallel/distributed/distributed_data_parallel.py +393 -0
  207. mindspore/parallel/distributed/flatten_grad_buffer.py +295 -0
  208. mindspore/parallel/strategy.py +336 -0
  209. mindspore/parallel/transform_safetensors.py +117 -16
  210. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +3 -0
  211. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +1 -1
  212. mindspore/profiler/common/constant.py +5 -0
  213. mindspore/profiler/common/file_manager.py +9 -0
  214. mindspore/profiler/common/msprof_cmd_tool.py +38 -2
  215. mindspore/profiler/common/path_manager.py +56 -24
  216. mindspore/profiler/common/profiler_context.py +2 -12
  217. mindspore/profiler/common/profiler_info.py +3 -3
  218. mindspore/profiler/common/profiler_path_manager.py +13 -0
  219. mindspore/profiler/common/util.py +30 -3
  220. mindspore/profiler/experimental_config.py +2 -1
  221. mindspore/profiler/platform/npu_profiler.py +33 -6
  222. mindspore/run_check/_check_version.py +108 -24
  223. mindspore/runtime/__init__.py +3 -2
  224. mindspore/runtime/executor.py +11 -3
  225. mindspore/runtime/memory.py +112 -0
  226. mindspore/swresample-4.dll +0 -0
  227. mindspore/swscale-6.dll +0 -0
  228. mindspore/tinyxml2.dll +0 -0
  229. mindspore/{experimental/llm_boost → tools}/__init__.py +5 -5
  230. mindspore/tools/data_dump.py +130 -0
  231. mindspore/tools/sdc_detect.py +91 -0
  232. mindspore/tools/stress_detect.py +63 -0
  233. mindspore/train/__init__.py +6 -6
  234. mindspore/train/_utils.py +5 -18
  235. mindspore/train/amp.py +6 -4
  236. mindspore/train/callback/_checkpoint.py +0 -9
  237. mindspore/train/callback/_train_fault_tolerance.py +69 -18
  238. mindspore/train/data_sink.py +1 -5
  239. mindspore/train/model.py +38 -211
  240. mindspore/train/serialization.py +126 -387
  241. mindspore/turbojpeg.dll +0 -0
  242. mindspore/utils/__init__.py +6 -3
  243. mindspore/utils/dlpack.py +92 -0
  244. mindspore/utils/dryrun.py +1 -1
  245. mindspore/utils/runtime_execution_order_check.py +10 -0
  246. mindspore/utils/sdc_detect.py +14 -12
  247. mindspore/utils/stress_detect.py +43 -0
  248. mindspore/utils/utils.py +144 -8
  249. mindspore/version.py +1 -1
  250. {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/METADATA +3 -2
  251. {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/RECORD +254 -267
  252. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -210
  253. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +0 -52
  254. mindspore/experimental/llm_boost/atb/boost_base.py +0 -385
  255. mindspore/experimental/llm_boost/atb/llama_boost.py +0 -137
  256. mindspore/experimental/llm_boost/atb/qwen_boost.py +0 -124
  257. mindspore/experimental/llm_boost/register.py +0 -130
  258. mindspore/experimental/llm_boost/utils.py +0 -31
  259. mindspore/include/OWNERS +0 -7
  260. mindspore/mindspore_cpu_res_manager.dll +0 -0
  261. mindspore/mindspore_ops_kernel_common.dll +0 -0
  262. mindspore/mindspore_res_manager.dll +0 -0
  263. mindspore/nn/optim/_dist_optimizer_registry.py +0 -111
  264. mindspore/nn/reinforcement/_batch_read_write.py +0 -142
  265. mindspore/nn/reinforcement/_tensors_queue.py +0 -152
  266. mindspore/nn/reinforcement/tensor_array.py +0 -145
  267. mindspore/opencv_core452.dll +0 -0
  268. mindspore/opencv_imgcodecs452.dll +0 -0
  269. mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +0 -113
  270. mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +0 -96
  271. mindspore/ops/_op_impl/aicpu/sparse_cross.py +0 -42
  272. mindspore/ops/_op_impl/cpu/buffer_append.py +0 -28
  273. mindspore/ops/_op_impl/cpu/buffer_get.py +0 -28
  274. mindspore/ops/_op_impl/cpu/buffer_sample.py +0 -28
  275. mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +0 -42
  276. mindspore/ops/operations/_tensor_array.py +0 -359
  277. mindspore/ops/operations/rl_ops.py +0 -288
  278. mindspore/parallel/_offload_context.py +0 -275
  279. mindspore/parallel/_recovery_context.py +0 -115
  280. mindspore/parallel/_transformer/__init__.py +0 -35
  281. mindspore/parallel/_transformer/layers.py +0 -765
  282. mindspore/parallel/_transformer/loss.py +0 -251
  283. mindspore/parallel/_transformer/moe.py +0 -693
  284. mindspore/parallel/_transformer/op_parallel_config.py +0 -222
  285. mindspore/parallel/_transformer/transformer.py +0 -3124
  286. mindspore/parallel/mpi/_mpi_config.py +0 -116
  287. mindspore/train/memory_profiling_pb2.py +0 -298
  288. {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/WHEEL +0 -0
  289. {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/entry_points.txt +0 -0
  290. {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/top_level.txt +0 -0
@@ -28,6 +28,7 @@ from mindspore.ops.operations import _sequence_ops as seq
28
28
  import mindspore.common.dtype as mstype
29
29
  from mindspore.ops.function.math_func import logsumexp, div
30
30
  from mindspore.ops.function.random_func import _get_seed, _set_prim_op_user_data
31
+ from mindspore.ops.functional_overload import conv2d as conv2d_op
31
32
  from mindspore.common.tensor import Tensor
32
33
  from mindspore._c_expression import TensorPy as Tensor_
33
34
  from mindspore.ops._primitive_cache import _get_cache_prim
@@ -41,7 +42,7 @@ from mindspore.ops.operations.nn_ops import TripletMarginLoss
41
42
  from mindspore.ops.operations._sequence_ops import TupleToTensor, TensorToTuple, ListToTensor
42
43
  from mindspore.common.api import _function_forbid_reuse
43
44
  from mindspore.ops.auto_generate import log_softmax, dense, prelu, celu, fast_gelu, silu, elu, sigmoid, relu6, \
44
- softmax_impl, swiglu, logsigmoid_op, kl_div_op, divs_op, l1_loss_ext
45
+ softmax_impl, swiglu, logsigmoid_op, kl_div_op, divs_op, l1_loss_ext, inplace_sigmoid
45
46
  from mindspore.ops.auto_generate import relu_op, inplace_relu_op
46
47
  from mindspore.ops.auto_generate import group_norm_op, rms_norm, add_rms_norm, layer_norm_ext_op, batch_norm_ext_op,\
47
48
  mse_loss_ext
@@ -54,7 +55,7 @@ from mindspore.ops.auto_generate import (reflection_pad_1d_op, reflection_pad_2d
54
55
  upsample_linear1d_op, upsample_bilinear2d_op, upsample_bicubic2d_op,
55
56
  upsample_trilinear3d_impl, fill_scalar_op, floor_op, nllloss_2d_op,
56
57
  masked_fill_op, masked_select, ones, flatten_ext, conv_transpose2d,
57
- func_max_pool2d_op)
58
+ func_max_pool2d_op, dropout2d_ext_op)
58
59
  # 2
59
60
  from mindspore.ops.auto_generate.pyboost_inner_prim import grid_sampler_2d_impl, grid_sampler_3d_impl
60
61
  # 3
@@ -92,11 +93,9 @@ from mindspore.ops.auto_generate import avg_pool3d_ext_op
92
93
  # 19
93
94
 
94
95
  # 20
95
- from mindspore.ops.functional_overload import conv3d as conv3d_op
96
96
  from mindspore.ops.auto_generate.gen_ops_prim import embedding_op, MaxPoolWithIndices, \
97
97
  PromptFlashAttention, MaxPoolWithMask
98
- from mindspore.ops.auto_generate.gen_ops_prim import conv2d_ext_op, \
99
- conv2d_padding_op, conv1d_ext_op, conv1d_padding_op, speed_fusion_attention_op
98
+ from mindspore.ops.auto_generate.gen_ops_prim import speed_fusion_attention_op
100
99
  from mindspore.common.generator import default_generator
101
100
  from mindspore.ops.auto_generate import hardshrink, hardsigmoid, hardswish
102
101
  from mindspore.ops.auto_generate import softshrink
@@ -280,9 +279,6 @@ def adaptive_avg_pool2d_ext(input, output_size):
280
279
  * (w_{end}- w_{start})}
281
280
  \end{align}
282
281
 
283
- .. warning::
284
- This is an experimental API that is subject to change or deletion.
285
-
286
282
  Args:
287
283
  input (Tensor): The input of adaptive_avg_pool2d, which is a 3D or 4D tensor,
288
284
  with float16 or float32 data type.
@@ -952,7 +948,7 @@ def adaptive_max_pool1d(input, output_size):
952
948
  >>> import mindspore
953
949
  >>> import numpy as np
954
950
  >>> from mindspore import Tensor, ops
955
- >>> input = Tensor(np.random.randint(0, 10, [1, 3, 6]), mindspore.float32)
951
+ >>> input = Tensor(np.random.randint(0, 10, [1, 3, 6]), mindspore.float16)
956
952
  >>> output = ops.adaptive_max_pool1d(input, output_size=2)
957
953
  >>> print(output.shape)
958
954
  (1, 3, 2)
@@ -1771,6 +1767,58 @@ def dropout2d(input, p=0.5, training=True):
1771
1767
  return out
1772
1768
 
1773
1769
 
1770
+ def dropout2d_ext(input, p=0.5, training=True, inplace=False):
1771
+ r"""
1772
+ During training, randomly zeroes some channels of the input tensor with probability `p`
1773
+ from a Bernoulli distribution (For a 4-dimensional tensor with a shape of :math:`(N, C, H, W)`,
1774
+ the channel feature map refers to a 2-dimensional feature map with the shape of :math:`(H, W)`).
1775
+
1776
+ For example, the :math:`j\_th` channel of the :math:`i\_th` sample in the batched input is a to-be-processed
1777
+ `2D` tensor input[i,j].
1778
+ Each channel will be zeroed out independently on every forward call which based on Bernoulli distribution
1779
+ probability `p`.
1780
+ The parper `Dropout: A Simple Way to Prevent Neural Networks from Overfitting
1781
+ <http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf>`_ mentioned this technology, and it is proved that
1782
+ it can effectively reduce over fitting and prevent neuronal coadaptation.
1783
+ For more details, refer to `Improving neural networks by preventing co-adaptation of feature detectors
1784
+ <https://arxiv.org/pdf/1207.0580.pdf>`_ .
1785
+
1786
+ `dropout2d` can improve the independence between channel feature maps.
1787
+
1788
+ .. warning::
1789
+ This is an experimental API that is subject to change or deletion.
1790
+
1791
+ Args:
1792
+ input (Tensor): A `4D` tensor with shape :math:`(N, C, H, W)`, where `N` is the batch size, `C` is the number
1793
+ of channels, `H` is the feature height, and `W` is the feature width.
1794
+ p (float, optional): The dropping probability of a channel, between 0 and 1, e.g. `p` = 0.8,
1795
+ which means dropping out 80% of channels. Default: ``0.5`` .
1796
+ training(bool, optional): If `training` is True, applying dropout, otherwise, not applying. Default: ``True`` .
1797
+ inplace (bool, optional): If set to ``True`` , will do this operation in-place. Default: ``False`` .
1798
+
1799
+ Returns:
1800
+ Tensor, output, with the same shape and data type as `input`.
1801
+
1802
+ Raises:
1803
+ TypeError: If `input` is not a Tensor.
1804
+ ValueError: If `p` is out of the range `[0.0, 1.0]`.
1805
+
1806
+ Supported Platforms:
1807
+ ``Ascend``
1808
+
1809
+ Examples:
1810
+ >>> import mindspore
1811
+ >>> import numpy as np
1812
+ >>> from mindspore import Tensor, mint
1813
+ >>> input = Tensor(np.ones([2, 1, 2, 3]), mindspore.float32)
1814
+ >>> output = mint.nn.functional.dropout2d(input, 0.5)
1815
+ >>> print(output.shape)
1816
+ (2, 1, 2, 3)
1817
+ """
1818
+ seed, offset = default_generator._step(generator_step_) # pylint: disable=protected-access
1819
+ return dropout2d_ext_op(input, p, training, inplace, seed, offset)
1820
+
1821
+
1774
1822
  def dropout3d(input, p=0.5, training=True):
1775
1823
  r"""
1776
1824
  During training, randomly zeroes some channels of the input tensor
@@ -2855,9 +2903,6 @@ def interpolate_ext(input,
2855
2903
  r"""
2856
2904
  Samples the input Tensor to the given size or scale_factor by using one of the interpolate algorithms.
2857
2905
 
2858
- .. warning::
2859
- This is an experimental API that is subject to change or deletion.
2860
-
2861
2906
  .. note::
2862
2907
  - In 'linear' mode, the scenarios, where `scale_factor` is not None and `align_corners` is False,
2863
2908
  is not supported.
@@ -3443,9 +3488,6 @@ def logsigmoid_ext(input):
3443
3488
  .. image:: ../images/LogSigmoid.png
3444
3489
  :align: center
3445
3490
 
3446
- .. warning::
3447
- This is an experimental API that is subject to change or deletion.
3448
-
3449
3491
  Args:
3450
3492
  input (Tensor): The input of LogSigmoid with data type of bfloat16, float16 or float32.
3451
3493
  The shape is :math:`(*)` where :math:`*` means, any number of additional dimensions.
@@ -4430,9 +4472,6 @@ def nll_loss_ext(input, target, weight=None, ignore_index=-100, reduction='mean'
4430
4472
  \sum_{n=1}^{N} l_{n}, & \text { if reduction }=\text { 'sum' }
4431
4473
  \end{array}\right.
4432
4474
 
4433
- .. warning::
4434
- This is an experimental API that is subject to change or deletion.
4435
-
4436
4475
  Args:
4437
4476
  input (Tensor): :math:`(N)` or :math:`(N, C)` where `C = number of classes` , `N = batch size` ,
4438
4477
  or :math:`(N, C, d_1, d_2, ..., d_K)` (for high-dimensional data).
@@ -5193,7 +5232,7 @@ def margin_ranking_loss(input1, input2, target, margin=0.0, reduction='mean'):
5193
5232
  _check_is_tensor('target', target, "margin_ranking_loss")
5194
5233
  check_input_dtype('input1', input1, 'input2', input2, 'margin_ranking_loss')
5195
5234
  check_input_dtype('target', target, 'input1', input1, 'margin_ranking_loss')
5196
- x = maximum_(-target * (input1 - input2) + margin, 0)
5235
+ x = ops.clamp(-target * (input1 - input2) + margin, min=0)
5197
5236
  return _get_loss(x, reduction, "margin_ranking_loss")
5198
5237
 
5199
5238
 
@@ -5534,8 +5573,8 @@ def ctc_loss(log_probs, targets, input_lengths, target_lengths, blank=0, reducti
5534
5573
  >>> print(loss)
5535
5574
  -2.2986124
5536
5575
  >>> print(log_alpha)
5537
- [[[0.3 0.3 -inf -inf -inf]
5538
- [1.2 1.8931472 1.2 -inf -inf]]]
5576
+ [[[0.3 0.3 -inf -inf 1.8931472 1.2 0. 0. ]
5577
+ [0. 0. 0. 0. 0. 0. 0. 0. ]]]
5539
5578
  """
5540
5579
  _check_ctc_loss_inputs(blank, reduction, zero_infinity, 'ctc_loss')
5541
5580
  ctc_loss_op = NN_OPS.CTCLossV2(blank=blank, reduction="none", zero_infinity=zero_infinity)
@@ -6229,127 +6268,6 @@ def conv2d(input, weight, bias=None, stride=1, pad_mode="valid", padding=0, dila
6229
6268
  return output
6230
6269
 
6231
6270
 
6232
- def conv1d_ext(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
6233
- r"""
6234
- Applies a 1D convolution over an input tensor. The input tenor is typically
6235
- of shape :math:`(N, C_{in}, L_{in})`,
6236
- where :math:`N` is batch size, :math:`C` is channel number, :math:`L` is sequence length.
6237
-
6238
- The output is calculated based on formula:
6239
-
6240
- .. math::
6241
-
6242
- \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
6243
- \sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
6244
-
6245
- where :math:`bias` is the output channel bias, :math:`ccor` is
6246
- the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
6247
- :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
6248
-
6249
- - :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
6250
- where :math:`N` is the batch size of the input.
6251
-
6252
- - :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
6253
- where :math:`C_{out}` is the number of
6254
- output channels, which is also equal to the number of kernels.
6255
-
6256
- - :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
6257
- where :math:`C_{in}` is the number of
6258
- input channels, which is also equal to the number of channels in the convolutional kernels.
6259
-
6260
- Therefore, in the above formula, :math:`{bias}(C_{\text{out}_j})` represents the bias of the :math:`j`-th
6261
- output channel, :math:`{weight}(C_{\text{out}_j}, k)` represents the slice of the :math:`j`-th convolutional
6262
- kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
6263
- channel in the :math:`i`-th batch of the input feature map.
6264
-
6265
- The shape of the convolutional kernel is given by :math:`(\text{kernel_size})`,
6266
- where :math:`\text{kernel_size}` is the length of the kernel.
6267
- If we consider the input and output channels as well as the `groups` parameter, the complete kernel shape
6268
- will be :math:`(C_{out}, C_{in} / \text{groups}, \text{kernel_size})`,
6269
- where `groups` is the number of groups dividing `x`'s input channel when applying groups convolution.
6270
-
6271
- For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
6272
- <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
6273
-
6274
- .. warning::
6275
- This is an experimental API that is subject to change or deletion.
6276
-
6277
- Args:
6278
- input (Tensor): Tensor of shape :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`.
6279
- weight (Tensor): Tensor of shape
6280
- :math:`(N, C_{in} / \text{groups}, \text{kernel_size})`, then the size of kernel
6281
- is :math:`(\text{kernel_size})`.
6282
- bias (Tensor, optional): Bias Tensor with shape :math:`(C_{out})`.
6283
- When bias is ``None`` , zeros will be used. Default: ``None`` .
6284
- stride (Union[int, tuple[int], list[int]], optional): The movement stride of the 1D convolution kernel.
6285
- The data type is an integer or a tuple of one integer. Default: ``1`` .` .
6286
- padding (Union[int, tuple[int], list[int], str], optional): The number of padding
6287
- on the input.
6288
- The data type is an integer or a tuple of one integer or string {`valid`, `same`}.
6289
- The value should be greater than or equal to 0. Default: ``0`` .
6290
-
6291
- - ``"same"``: Pad the input around its edges so that the shape of input and output
6292
- are the same when `stride` is set to ``1``.
6293
- The amount of padding to is calculated by the operator internally, If the amount is even, it is
6294
- uniformly distributed around the input, if it is odd, the excess amount goes to the right side.
6295
- If this mode is set, `stride` must be 1.
6296
-
6297
- - ``"valid"``: No padding is applied to the input, and the output returns the maximum
6298
- possible length. Extra sequence that could not complete a full stride will
6299
- be discarded.
6300
-
6301
- dilation (Union[int, tuple[int], list[int]], optional): Specifies the dilation rate to use for
6302
- dilated convolution. It can be a single int or a tuple of 1 integer.
6303
- Assuming :math:`dilation=(d)`, the convolutional kernel samples the input with a
6304
- spacing of :math:`d-1` elements in the length direction.
6305
- Default: ``1`` .
6306
- groups (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
6307
- divisible by `groups`. If the groups is equal to `in_channels` and `out_channels`,
6308
- this 1D convolution layer also can be called 1D depthwise convolution layer. Default: ``1`` .
6309
-
6310
- - :math:`(C_{in} \text{ % } \text{groups} == 0)` , :math:`(C_{out} \text{ % } \text{groups} == 0)` ,
6311
- :math:`(C_{out} >= \text{groups})` , :math:`(\text{weight[1]} = C_{in} / \text{groups})`。
6312
-
6313
- Returns:
6314
- Tensor, the value that applied 1D convolution. The shape is :math:`(N, C_{out}, L_{out})`.
6315
- To see how different pad modes affect the output shape, please refer to
6316
- :class:`mindspore.mint.nn.Conv1d` for more details.
6317
-
6318
- Raises:
6319
- ValueError: Args and size of the input feature map should satisfy the output formula to ensure that the size of
6320
- the output feature map is positive; otherwise, an error will be reported.
6321
- RuntimeError: On Ascend, due to the limitation of the L1 cache size of different NPU chip, if input size or
6322
- kernel size is too large, it may trigger an error.
6323
- TypeError: If `in_channels`, `out_channels` or `groups` is not an int.
6324
- TypeError: If `kernel_size`, `stride` or `dilation` is neither an int not a tuple.
6325
- ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
6326
- ValueError: If `padding` is less than 0.
6327
- ValueError: If `padding` is `same` , `stride` is not equal to 1.
6328
- ValueError: The input parameters do not satisfy the convolution output formula.
6329
- ValueError: The KernelSize cannot exceed the size of the input feature map.
6330
- ValueError: The value of padding cannot cause the calculation area to exceed the input size.
6331
-
6332
- Supported Platforms:
6333
- ``Ascend``
6334
-
6335
- Examples:
6336
- >>> import mindspore
6337
- >>> import numpy as np
6338
- >>> from mindspore import Tensor, ops, mint
6339
- >>> x = Tensor(np.ones([10, 32, 32]), mindspore.float32)
6340
- >>> weight = Tensor(np.ones([32, 32, 3]), mindspore.float32)
6341
- >>> output = mint.nn.functional.conv1d(x, weight)
6342
- >>> print(output.shape)
6343
- (10, 32, 30)
6344
- """
6345
- if isinstance(padding, (int, tuple, list)):
6346
- return conv1d_ext_op(input, weight, bias, stride, padding, dilation, groups)
6347
- if isinstance(padding, str):
6348
- return conv1d_padding_op(input, weight, bias, stride, padding, dilation, groups)
6349
- raise TypeError(f"For conv1d, the parameter 'padding' must be a tuple/list " \
6350
- f"or a string, but got {type(padding)}")
6351
-
6352
-
6353
6271
  def _check_stride_when_same_mode(stride):
6354
6272
  """ stride must be 1 when pad mode is same """
6355
6273
  if isinstance(stride, int):
@@ -6473,9 +6391,12 @@ def conv2d_ext(input, weight, bias=None, stride=1, padding=0, dilation=1, groups
6473
6391
  there will be :math:`k - 1` pixels skipped for each sampling location. Its value must
6474
6392
  be greater than or equal to 1 and bounded by the height and width of the input `x`. Default: ``1`` .
6475
6393
  groups (int, optional): Splits `input` into groups. Default: ``1`` .
6394
+ The following restraints should be met:
6476
6395
 
6477
- - :math:`(C_{in} \text{ % } \text{groups} == 0)` , :math:`(C_{out} \text{ % } \text{groups} == 0)` ,
6478
- :math:`(C_{out} >= \text{groups})` , :math:`(\text{weight[1]} = C_{in} / \text{groups})`
6396
+ - :math:`(C_{in} \text{ % } \text{groups} == 0)`
6397
+ - :math:`(C_{out} \text{ % } \text{groups} == 0)`
6398
+ - :math:`(C_{out} >= \text{groups})`
6399
+ - :math:`(\text{weight[1]} = C_{in} / \text{groups})`
6479
6400
 
6480
6401
  Returns:
6481
6402
  Tensor, the value that applied 2D convolution. The shape is :math:`(N, C_{out}, H_{out}, W_{out})`.
@@ -6511,12 +6432,7 @@ def conv2d_ext(input, weight, bias=None, stride=1, padding=0, dilation=1, groups
6511
6432
  >>> print(output.shape)
6512
6433
  (10, 32, 30, 30)
6513
6434
  """
6514
- if isinstance(padding, (int, tuple, list)):
6515
- return conv2d_ext_op(input, weight, bias, stride, padding, dilation, groups)
6516
- if isinstance(padding, str):
6517
- return conv2d_padding_op(input, weight, bias, stride, padding, dilation, groups)
6518
- raise TypeError(f"For conv2d, the parameter 'padding' must be a tuple/list " \
6519
- f"or a string, but got {type(padding)}")
6435
+ return conv2d_op(input, weight, bias, stride, padding, dilation, groups)
6520
6436
 
6521
6437
 
6522
6438
  def hardtanh(input, min_val=-1.0, max_val=1.0):
@@ -7310,142 +7226,6 @@ def conv3d(input, weight, bias=None, stride=1, pad_mode="valid", padding=0, dila
7310
7226
  return output
7311
7227
 
7312
7228
 
7313
- def conv3d_ext(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
7314
- r"""
7315
- Applies a 3D convolution over an input tensor. The input tensor is typically of
7316
- shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` or :math:`(C_{in}, D_{in}, H_{in}, W_{in})`,
7317
- where :math:`N` is batch size, :math:`C` is channel number, :math:`D, H, W` are the depth,
7318
- height and width of the feature graph, respectively.
7319
-
7320
- The output is calculated based on formula:
7321
-
7322
- .. math::
7323
-
7324
- \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
7325
- \sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
7326
-
7327
- where :math:`bias` is the output channel bias, :math:`ccor` is
7328
- the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_
7329
- , :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
7330
-
7331
- Here are the indices' meanings:
7332
-
7333
- - :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
7334
- where :math:`N` is the batch size of the input.
7335
-
7336
- - :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
7337
- where :math:`C_{out}` is the number of
7338
- output channels, which is also equal to the number of kernels.
7339
-
7340
- - :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
7341
- where :math:`C_{in}` is the number of
7342
- input channels, which is also equal to the number of channels in the convolutional kernels.
7343
-
7344
- Therefore, in the above formula, :math:`{bias}(C_{\text{out}_j})` represents the bias of the :math:`j`-th
7345
- output channel, :math:`{weight}(C_{\text{out}_j}, k)` represents the slice of the :math:`j`-th convolutional
7346
- kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
7347
- channel in the :math:`i`-th batch of the input feature map.
7348
-
7349
- The shape of the convolutional kernel is given by :math:`(kd, kh, kw)` where :math:`kd` , :math:`kd` and\
7350
- :math:`kw` are the depth, height and width of the kernel, respectively.
7351
- If we consider the input and output channels as well as the `group` parameter, the complete kernel shape
7352
- will be :math:`(C_{out}, C_{in} / \text{group}, kd, kh, kw)`,
7353
- where `group` is the number of groups dividing `x`'s input channel when applying group convolution.
7354
-
7355
- For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
7356
- <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
7357
-
7358
- The following lists some of the limitations of the parameters.
7359
-
7360
- - input -- The input to the conv3d. The input must have each dimension size within the range [1, int32_max].
7361
- - weight -- Filters of shape :math:`(C_{out}, C_{in} / groups, kd, kh, kw)`. The value of :math:`kh`
7362
- and :math:`kw` is in the range [1, 511]. The remaining values are in the range [1, int32_max].
7363
- And :math:`kh*kw*k0` is less 65536 (k0 is 16. If data type is float32, k0 is 8).
7364
- - bias -- Bias Tensor with shape :math:`(C_{out})`. The shape must equal the first dimension of the weight.
7365
- - stride -- The distance of kernel moving. It can be an int number or
7366
- tuple (noted by :math:`(stride_d, stride_h, stride_w)`). stride_h and stride_w are in the range [1, 63].
7367
- stride_d is in the range [1, 255].
7368
- - padding -- If padding is an int number, it is in the range [0, 255].
7369
- - dilation -- The value is in the range [1, 255].
7370
- - groups -- The value is in the range [1, 65535].
7371
- - :math:`C_{in} \% \text{groups} == 0 \quad \text{and} \quad C_{out} \% \text{groups} == 0` .
7372
- - :math:`weight[1] == C_{in} / groups` .
7373
- - :math:`H_{in} + PadUp + PadDown >= (kh - 1) * DilationH + 1` .
7374
- - :math:`W_{in} + PadLeft + PadRight >= (kw - 1) * DilationW + 1` .
7375
- - :math:`D_{in} + PadFront + PadBack >= (kd - 1) * DilationD + 1` .
7376
- - :math:`H_{out} = (H_{in} + PadUp + PadDown - ((kh - 1) * DilationH + 1)) / StrideH + 1` .
7377
- - :math:`W_{out} = (W_{in} + PadLeft + PadRight - ((kw - 1) * DilationW + 1)) / StrideW + 1` .
7378
- - :math:`D_{out} = (D_{in} + PadFront + PadBack - ((kd - 1) * DilationD + 1)) / StrideD + 1` .
7379
- - :math:`(D_{in}+PadFront+PadBack - ((kd-1)*DilationD+1)) \% StrideD <= PadBack` .
7380
- - :math:`(H_{in}+PadUp+PadDown - ((kh-1)*Dilationh+1)) \% StrideH <= PadDown` .
7381
- - :math:`stride_d <= kernel_d` .
7382
- - :math:`PadUp < kh` and :math:`PadDown < kh` . When `padding` = ``'valid'``, both PadUp and PadDown are zeros.
7383
- When `padding` = ``'same'``, pad can be calculated by
7384
- :math:`floor(((H_{out}-1) * strideH + (kh - 1) * DilationH + 1 - H_{in}) / 2)` for high dimension.
7385
- It is similar way to calculate the padding for depth and width dimension. And the depth and width
7386
- dimensions also have the same constraints.
7387
- - :math:`((kh - 1) * DilationH - PadUp)` should be in [0, 255]. It is the same constraint for depth
7388
- and width dimension.
7389
- - If `padding` is ``'same'``, `stride` must be 1.
7390
-
7391
- .. warning::
7392
- This API does not support Atlas series products.
7393
-
7394
- Args:
7395
- input (Tensor): Tensor of shape :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`.
7396
- weight (Tensor): Set size of kernel is :math:`(kd, kh,
7397
- kw)`, then the shape is :math:`(C_{out}, C_{in} / groups, kd, kh, kw)`.
7398
- bias (Tensor, optional): Bias Tensor with shape :math:`(C_{out})`.
7399
- When bias is ``None`` , zeros will be used. Default: ``None`` .
7400
- stride (Union(int, tuple[int], list[int]), optional): The distance of kernel moving, an int
7401
- number that represents the depth, the height and width of movement are both strides, or a
7402
- tuple of triple int numbers that
7403
- represent the depth, height and width of movement respectively. Default: ``1`` .
7404
- padding (Union(int, tuple[int], list[int], str), optional): Implicit paddings on both sides of the input `x`.
7405
- Can be a string, one integer or a tuple/list with 3 integers.
7406
- If `padding` is a string, the optional values are ``"same"`` , ``"valid"``.
7407
-
7408
- - same: Adopts the way of completion. The height and width of the output will be equal to
7409
- the input `x` divided by stride. The padding will be evenly calculated in top and bottom,
7410
- left and right possiblily. Otherwise, the last extra padding will be calculated from the bottom
7411
- and the right side. If this mode is set, `stride` must be 1.
7412
-
7413
- - valid: Adopts the way of discarding. The possible largest height and width of output will be returned
7414
- without padding. Extra pixels will be discarded.
7415
-
7416
- If `padding` is one integer, the paddings of top, bottom, left and right are the same, equal to padding.
7417
- If `padding` is a tuple/list with 3 integers, the padding of head, tail, top, bottom,
7418
- left and right equal to pad[0], pad[0], pad[1], pad[1], pad[2] and pad[2] correspondingly. Default: ``0`` .
7419
- dilation (Union[int, tuple[int], list[int]], optional): Controlling the space between the kernel points.
7420
- Default: ``1`` .
7421
- groups (int, optional): Splits `input` into groups. Default: ``1`` .
7422
-
7423
- Returns:
7424
- Tensor, the same dtype as the `input`, with the shape :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
7425
- or :math:`(C_{out}, D_{out}, H_{out}, W_{out})`.
7426
-
7427
- Raises:
7428
- TypeError: If `stride`, `padding` or `dilation` is neither an int nor a tuple.
7429
- TypeError: `groups` is not an int.
7430
- TypeError: If `bias` is not a Tensor.
7431
-
7432
- Supported Platforms:
7433
- ``Ascend``
7434
-
7435
- Examples:
7436
- >>> import mindspore
7437
- >>> import numpy as np
7438
- >>> from mindspore import mint
7439
- >>> x = mindspore.Tensor(np.random.randn(12, 1, 60, 50, 8), mindspore.float16)
7440
- >>> w = mindspore.Tensor(np.random.randn(26, 1, 2, 4, 4), mindspore.float16)
7441
- >>> out = mint.nn.functional.conv3d(x, w)
7442
- >>> print(out.shape)
7443
- (12, 26, 59, 47, 5)
7444
- """
7445
-
7446
- return conv3d_op(input, weight, bias, stride, padding, dilation, groups)
7447
-
7448
-
7449
7229
  @_primexpr
7450
7230
  def _check_positive_int(arg_value, arg_name=None, prim_name=None):
7451
7231
  validator.check_positive_int(arg_value, arg_name=arg_name, prim_name=prim_name)
@@ -9329,14 +9109,14 @@ def embedding(input, weight, padding_idx=None, max_norm=None, norm_type=2.0, sca
9329
9109
  >>> weight = Parameter(np.random.randn(3, 3).astype(np.float32))
9330
9110
  >>> output = ops.embedding(input, weight, max_norm=0.4)
9331
9111
  >>> print(output)
9332
- [[[ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01],
9333
- [ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01],
9334
- [ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01],
9335
- [ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01]],
9336
- [[ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01],
9337
- [ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01],
9338
- [ 5.49015924e-02, 3.47811311e-01, -1.89771220e-01],
9339
- [ 2.09307984e-01, -2.24846993e-02, 3.40124398e-01]]]
9112
+ [[[ 5.49015924 3.47811311 -1.89771220],
9113
+ [ 2.09307984 -2.24846993 3.40124398],
9114
+ [ 5.49015924 3.47811311 -1.89771220],
9115
+ [ 5.49015924 3.47811311 -1.89771220]],
9116
+ [[ 2.09307984 -2.24846993 3.40124398],
9117
+ [ 2.09307984 -2.24846993 3.40124398],
9118
+ [ 5.49015924 3.47811311 -1.89771220],
9119
+ [ 2.09307984 -2.24846993 3.40124398]]]
9340
9120
  """
9341
9121
  return embedding_op(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq)
9342
9122
 
@@ -9630,6 +9410,7 @@ __all__ = [
9630
9410
  'conv2d',
9631
9411
  'conv_transpose2d',
9632
9412
  'sigmoid',
9413
+ 'inplace_sigmoid',
9633
9414
  'soft_margin_loss',
9634
9415
  'logsigmoid',
9635
9416
  'relu',
@@ -1262,9 +1262,6 @@ def randn_ext(*size, generator=None, dtype=None):
1262
1262
  Returns a new tensor filled with numbers from the normal distribution over an interval :math:`[0, 1)`
1263
1263
  based on the given shape and dtype.
1264
1264
 
1265
- .. warning::
1266
- This is an experimental API that is subject to change or deletion.
1267
-
1268
1265
  Args:
1269
1266
  size (Union[int, tuple(int), list(int)]): Shape of the new tensor, e.g. :math:`(2, 3)` or :math:`2`.
1270
1267
 
@@ -1341,9 +1338,6 @@ def randint_ext(*args, generator=None, dtype=None):
1341
1338
  Returns a new tensor filled with integer numbers from the uniform distribution over an interval :math:`[low, high)`
1342
1339
  based on the given shape and dtype.
1343
1340
 
1344
- .. warning::
1345
- This is an experimental API that is subject to change or deletion.
1346
-
1347
1341
  Args:
1348
1342
  low (int, optional): the lower bound of the generated random number. Default: ``0``.
1349
1343
  high (int): the upper bound of the generated random number
@@ -1680,9 +1674,6 @@ def randperm_ext(n, *, generator=None, dtype=mstype.int64):
1680
1674
  r"""
1681
1675
  Generates random permutation of integers from 0 to n-1.
1682
1676
 
1683
- .. warning::
1684
- - This is an experimental API that is subject to change or deletion.
1685
-
1686
1677
  Args:
1687
1678
  n (Union[Tensor, int]): size of the permutation. int or Tensor with shape: () or (1,) and
1688
1679
  data type int64. The value of `n` must be greater than zero.
@@ -1957,10 +1948,10 @@ def multinomial_ext(input, num_samples, replacement=False, *, generator=None):
1957
1948
  >>> # input1 and input2 have the same meaning.
1958
1949
  >>> output1 = ops.multinomial_ext(input1, 2)
1959
1950
  >>> output2 = ops.multinomial_ext(input2, 2)
1960
- >>> # print(output1)
1961
- >>> # [0 1]
1962
- >>> # print(output2)
1963
- >>> # [0 1]
1951
+ >>> print(output1)
1952
+ [0 1]
1953
+ >>> print(output2)
1954
+ [0 1]
1964
1955
  >>> print(len(output1))
1965
1956
  2
1966
1957
  >>> print(len(output2))
@@ -1968,8 +1959,8 @@ def multinomial_ext(input, num_samples, replacement=False, *, generator=None):
1968
1959
  >>> # case 2: The output is random, and the length of the output is the same as num_sample.
1969
1960
  >>> # replacement is True.
1970
1961
  >>> output3 = ops.multinomial_ext(input1, 10, replacement=True)
1971
- >>> # print(output3)
1972
- >>> # [0 0 1 0 0 0 0 0 0 0]
1962
+ >>> print(output3)
1963
+ [0 0 1 0 0 0 0 0 0 0]
1973
1964
  >>> print(len(output3))
1974
1965
  10
1975
1966
  >>> # case 3: The output is random, and the length of the output is the same as num_sample.
@@ -1977,9 +1968,9 @@ def multinomial_ext(input, num_samples, replacement=False, *, generator=None):
1977
1968
  >>> # rank is 2
1978
1969
  >>> input4 = Tensor([[90, 10, 0], [10, 90, 0]], mstype.float32)
1979
1970
  >>> output4 = ops.multinomial_ext(input4, 10, replacement=True)
1980
- >>> # print(output4)
1981
- >>> # [[0 0 0 0 0 0 0 0 1 0]
1982
- >>> # [1 1 1 1 1 0 1 1 1 1]]
1971
+ >>> print(output4)
1972
+ [[0 0 0 0 0 0 0 0 1 0]
1973
+ [1 1 1 1 1 0 1 1 1 1]]
1983
1974
  """
1984
1975
  if generator is None:
1985
1976
  generator = default_generator
@@ -266,6 +266,7 @@ setattr(tensor_operator_registry, 'erf', erf)
266
266
  setattr(tensor_operator_registry, 'erfc', erfc)
267
267
  setattr(tensor_operator_registry, 'standard_normal', P.StandardNormal)
268
268
  setattr(tensor_operator_registry, 'sigmoid', sigmoid)
269
+ setattr(tensor_operator_registry, 'sigmoid_', auto_generate.inplace_sigmoid)
269
270
  setattr(tensor_operator_registry, 'median', Median)
270
271
  setattr(tensor_operator_registry, 'tanh', tanh)
271
272
  setattr(tensor_operator_registry, 'tanh_', tanh_)
@@ -452,6 +453,7 @@ setattr(tensor_operator_registry, 'ne', ne)
452
453
  setattr(tensor_operator_registry, 'not_equal', not_equal)
453
454
  setattr(tensor_operator_registry, 'sgn', sgn)
454
455
  setattr(tensor_operator_registry, 'sign', sign)
456
+ setattr(tensor_operator_registry, 'sign_', auto_generate.inplace_sign)
455
457
  setattr(tensor_operator_registry, 'signbit', signbit)
456
458
  setattr(tensor_operator_registry, 'sinh', sinh)
457
459
  setattr(tensor_operator_registry, 'trunc', trunc)