mindspore 2.7.0__cp310-cp310-win_amd64.whl → 2.7.1__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (290) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/__init__.py +4 -1
  3. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  4. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  5. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  6. mindspore/_extends/parse/compile_config.py +24 -1
  7. mindspore/_extends/parse/deprecated/deprecated_tensor_method.py +6 -2
  8. mindspore/_extends/parse/resources.py +1 -1
  9. mindspore/_extends/parse/standard_method.py +8 -1
  10. mindspore/_extends/parse/trope.py +2 -1
  11. mindspore/_extends/pijit/pijit_func_white_list.py +7 -22
  12. mindspore/avcodec-59.dll +0 -0
  13. mindspore/avdevice-59.dll +0 -0
  14. mindspore/avfilter-8.dll +0 -0
  15. mindspore/avformat-59.dll +0 -0
  16. mindspore/avutil-57.dll +0 -0
  17. mindspore/boost/base.py +29 -2
  18. mindspore/common/_decorator.py +3 -2
  19. mindspore/common/_grad_function.py +3 -1
  20. mindspore/common/_tensor_cpp_method.py +1 -1
  21. mindspore/common/_tensor_docs.py +275 -64
  22. mindspore/common/_utils.py +0 -44
  23. mindspore/common/api.py +285 -35
  24. mindspore/common/dump.py +7 -108
  25. mindspore/common/dynamic_shape/auto_dynamic_shape.py +1 -3
  26. mindspore/common/hook_handle.py +60 -0
  27. mindspore/common/jit_config.py +5 -1
  28. mindspore/common/jit_trace.py +27 -12
  29. mindspore/common/lazy_inline.py +5 -3
  30. mindspore/common/parameter.py +13 -107
  31. mindspore/common/recompute.py +4 -11
  32. mindspore/common/tensor.py +16 -169
  33. mindspore/communication/_comm_helper.py +11 -1
  34. mindspore/communication/comm_func.py +138 -4
  35. mindspore/communication/management.py +85 -1
  36. mindspore/config/op_info.config +0 -15
  37. mindspore/context.py +5 -85
  38. mindspore/dataset/engine/datasets.py +8 -4
  39. mindspore/dataset/engine/datasets_vision.py +1 -1
  40. mindspore/dataset/engine/validators.py +1 -15
  41. mindspore/dnnl.dll +0 -0
  42. mindspore/{experimental/llm_boost/ascend_native → graph}/__init__.py +7 -7
  43. mindspore/graph/custom_pass.py +55 -0
  44. mindspore/include/dataset/execute.h +2 -2
  45. mindspore/jpeg62.dll +0 -0
  46. mindspore/mindrecord/__init__.py +3 -3
  47. mindspore/mindrecord/common/exceptions.py +1 -0
  48. mindspore/mindrecord/config.py +1 -1
  49. mindspore/{parallel/mpi → mindrecord/core}/__init__.py +4 -1
  50. mindspore/mindrecord/{shardheader.py → core/shardheader.py} +2 -1
  51. mindspore/mindrecord/{shardindexgenerator.py → core/shardindexgenerator.py} +1 -1
  52. mindspore/mindrecord/{shardreader.py → core/shardreader.py} +2 -1
  53. mindspore/mindrecord/{shardsegment.py → core/shardsegment.py} +2 -2
  54. mindspore/mindrecord/{shardutils.py → core/shardutils.py} +1 -1
  55. mindspore/mindrecord/{shardwriter.py → core/shardwriter.py} +1 -1
  56. mindspore/mindrecord/filereader.py +4 -4
  57. mindspore/mindrecord/filewriter.py +5 -5
  58. mindspore/mindrecord/mindpage.py +2 -2
  59. mindspore/mindrecord/tools/cifar10.py +1 -1
  60. mindspore/mindrecord/tools/cifar100.py +1 -1
  61. mindspore/mindrecord/tools/cifar100_to_mr.py +1 -1
  62. mindspore/mindrecord/tools/cifar10_to_mr.py +1 -1
  63. mindspore/mindrecord/tools/csv_to_mr.py +1 -1
  64. mindspore/mindrecord/tools/imagenet_to_mr.py +1 -1
  65. mindspore/mindrecord/tools/mnist_to_mr.py +1 -1
  66. mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -1
  67. mindspore/mindspore_backend_common.dll +0 -0
  68. mindspore/mindspore_backend_manager.dll +0 -0
  69. mindspore/mindspore_cluster.dll +0 -0
  70. mindspore/mindspore_common.dll +0 -0
  71. mindspore/mindspore_core.dll +0 -0
  72. mindspore/mindspore_cpu.dll +0 -0
  73. mindspore/mindspore_dump.dll +0 -0
  74. mindspore/mindspore_frontend.dll +0 -0
  75. mindspore/mindspore_glog.dll +0 -0
  76. mindspore/mindspore_hardware_abstract.dll +0 -0
  77. mindspore/mindspore_memory_pool.dll +0 -0
  78. mindspore/mindspore_ms_backend.dll +0 -0
  79. mindspore/mindspore_ops.dll +0 -0
  80. mindspore/{mindspore_ops_host.dll → mindspore_ops_cpu.dll} +0 -0
  81. mindspore/mindspore_profiler.dll +0 -0
  82. mindspore/mindspore_pyboost.dll +0 -0
  83. mindspore/mindspore_pynative.dll +0 -0
  84. mindspore/mindspore_runtime_pipeline.dll +0 -0
  85. mindspore/mindspore_runtime_utils.dll +0 -0
  86. mindspore/mindspore_tools.dll +0 -0
  87. mindspore/mint/__init__.py +15 -10
  88. mindspore/mint/distributed/distributed.py +182 -62
  89. mindspore/mint/nn/__init__.py +2 -16
  90. mindspore/mint/nn/functional.py +4 -110
  91. mindspore/mint/nn/layer/__init__.py +0 -2
  92. mindspore/mint/nn/layer/activation.py +0 -6
  93. mindspore/mint/nn/layer/basic.py +0 -47
  94. mindspore/mint/nn/layer/conv.py +4 -4
  95. mindspore/mint/nn/layer/normalization.py +8 -13
  96. mindspore/mint/nn/layer/pooling.py +0 -4
  97. mindspore/nn/__init__.py +1 -3
  98. mindspore/nn/cell.py +16 -66
  99. mindspore/nn/layer/basic.py +49 -1
  100. mindspore/nn/layer/container.py +16 -0
  101. mindspore/nn/layer/embedding.py +4 -169
  102. mindspore/nn/layer/normalization.py +2 -1
  103. mindspore/nn/layer/thor_layer.py +4 -85
  104. mindspore/nn/optim/ada_grad.py +0 -1
  105. mindspore/nn/optim/adafactor.py +0 -1
  106. mindspore/nn/optim/adam.py +31 -124
  107. mindspore/nn/optim/adamax.py +0 -1
  108. mindspore/nn/optim/asgd.py +0 -1
  109. mindspore/nn/optim/ftrl.py +8 -102
  110. mindspore/nn/optim/lamb.py +0 -1
  111. mindspore/nn/optim/lars.py +0 -3
  112. mindspore/nn/optim/lazyadam.py +25 -218
  113. mindspore/nn/optim/momentum.py +5 -43
  114. mindspore/nn/optim/optimizer.py +6 -55
  115. mindspore/nn/optim/proximal_ada_grad.py +0 -1
  116. mindspore/nn/optim/rmsprop.py +0 -1
  117. mindspore/nn/optim/rprop.py +0 -1
  118. mindspore/nn/optim/sgd.py +0 -1
  119. mindspore/nn/optim/tft_wrapper.py +0 -1
  120. mindspore/nn/optim/thor.py +0 -2
  121. mindspore/nn/probability/bijector/bijector.py +7 -8
  122. mindspore/nn/probability/bijector/gumbel_cdf.py +2 -2
  123. mindspore/nn/probability/bijector/power_transform.py +20 -21
  124. mindspore/nn/probability/bijector/scalar_affine.py +5 -5
  125. mindspore/nn/probability/bijector/softplus.py +13 -14
  126. mindspore/nn/wrap/grad_reducer.py +4 -74
  127. mindspore/numpy/array_creations.py +2 -2
  128. mindspore/numpy/fft.py +9 -9
  129. mindspore/{nn/reinforcement → onnx}/__init__.py +5 -8
  130. mindspore/onnx/onnx_export.py +137 -0
  131. mindspore/opencv_core4110.dll +0 -0
  132. mindspore/opencv_imgcodecs4110.dll +0 -0
  133. mindspore/{opencv_imgproc452.dll → opencv_imgproc4110.dll} +0 -0
  134. mindspore/ops/__init__.py +2 -0
  135. mindspore/ops/_grad_experimental/grad_comm_ops.py +38 -2
  136. mindspore/ops/_op_impl/aicpu/__init__.py +0 -10
  137. mindspore/ops/_op_impl/cpu/__init__.py +0 -5
  138. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +16 -22
  139. mindspore/ops/auto_generate/gen_extend_func.py +2 -7
  140. mindspore/ops/auto_generate/gen_ops_def.py +98 -141
  141. mindspore/ops/auto_generate/gen_ops_prim.py +12708 -12686
  142. mindspore/ops/communication.py +97 -0
  143. mindspore/ops/composite/__init__.py +5 -2
  144. mindspore/ops/composite/base.py +15 -1
  145. mindspore/ops/composite/multitype_ops/__init__.py +3 -1
  146. mindspore/ops/composite/multitype_ops/_compile_utils.py +150 -8
  147. mindspore/ops/composite/multitype_ops/add_impl.py +7 -0
  148. mindspore/ops/composite/multitype_ops/mod_impl.py +27 -0
  149. mindspore/ops/function/__init__.py +1 -0
  150. mindspore/ops/function/array_func.py +14 -12
  151. mindspore/ops/function/comm_func.py +3883 -0
  152. mindspore/ops/function/debug_func.py +3 -4
  153. mindspore/ops/function/math_func.py +45 -54
  154. mindspore/ops/function/nn_func.py +75 -294
  155. mindspore/ops/function/random_func.py +9 -18
  156. mindspore/ops/functional.py +2 -0
  157. mindspore/ops/functional_overload.py +354 -18
  158. mindspore/ops/operations/__init__.py +2 -5
  159. mindspore/ops/operations/_custom_ops_utils.py +7 -9
  160. mindspore/ops/operations/_inner_ops.py +1 -38
  161. mindspore/ops/operations/_rl_inner_ops.py +0 -933
  162. mindspore/ops/operations/array_ops.py +1 -0
  163. mindspore/ops/operations/comm_ops.py +94 -2
  164. mindspore/ops/operations/custom_ops.py +228 -19
  165. mindspore/ops/operations/debug_ops.py +27 -29
  166. mindspore/ops/operations/manually_defined/ops_def.py +27 -306
  167. mindspore/ops/operations/nn_ops.py +2 -2
  168. mindspore/ops/operations/sparse_ops.py +0 -83
  169. mindspore/ops/primitive.py +1 -17
  170. mindspore/ops/tensor_method.py +72 -3
  171. mindspore/ops_generate/aclnn/aclnn_kernel_register_auto_cc_generator.py +5 -5
  172. mindspore/ops_generate/aclnn/gen_aclnn_implement.py +8 -8
  173. mindspore/ops_generate/api/functions_cc_generator.py +53 -4
  174. mindspore/ops_generate/api/tensor_func_reg_cpp_generator.py +25 -11
  175. mindspore/ops_generate/common/gen_constants.py +11 -10
  176. mindspore/ops_generate/common/op_proto.py +18 -1
  177. mindspore/ops_generate/common/template.py +102 -245
  178. mindspore/ops_generate/common/template_utils.py +212 -0
  179. mindspore/ops_generate/gen_custom_ops.py +69 -0
  180. mindspore/ops_generate/op_def/ops_def_cc_generator.py +78 -7
  181. mindspore/ops_generate/op_def_py/base_op_prim_py_generator.py +360 -0
  182. mindspore/ops_generate/op_def_py/custom_op_prim_py_generator.py +140 -0
  183. mindspore/ops_generate/op_def_py/op_def_py_generator.py +54 -7
  184. mindspore/ops_generate/op_def_py/op_prim_py_generator.py +5 -312
  185. mindspore/ops_generate/pyboost/auto_grad_impl_cc_generator.py +74 -17
  186. mindspore/ops_generate/pyboost/auto_grad_reg_cc_generator.py +22 -5
  187. mindspore/ops_generate/pyboost/op_template_parser.py +3 -2
  188. mindspore/ops_generate/pyboost/pyboost_functions_cpp_generator.py +21 -5
  189. mindspore/ops_generate/pyboost/pyboost_functions_h_generator.py +2 -2
  190. mindspore/ops_generate/pyboost/pyboost_functions_impl_cpp_generator.py +30 -10
  191. mindspore/ops_generate/pyboost/pyboost_grad_function_cpp_generator.py +10 -3
  192. mindspore/ops_generate/pyboost/pyboost_internal_kernel_info_adapter_generator.py +1 -1
  193. mindspore/ops_generate/pyboost/pyboost_native_grad_functions_generator.py +19 -9
  194. mindspore/ops_generate/pyboost/pyboost_op_cpp_code_generator.py +71 -28
  195. mindspore/ops_generate/pyboost/pyboost_overload_functions_cpp_generator.py +10 -9
  196. mindspore/ops_generate/pyboost/pyboost_utils.py +27 -16
  197. mindspore/ops_generate/resources/yaml_loader.py +13 -0
  198. mindspore/ops_generate/tensor_py_cc_generator.py +2 -2
  199. mindspore/parallel/_cell_wrapper.py +1 -1
  200. mindspore/parallel/_parallel_serialization.py +1 -4
  201. mindspore/parallel/_utils.py +29 -6
  202. mindspore/parallel/checkpoint_transform.py +18 -2
  203. mindspore/parallel/cluster/process_entity/_api.py +24 -32
  204. mindspore/parallel/cluster/process_entity/_utils.py +9 -5
  205. mindspore/{experimental/llm_boost/atb → parallel/distributed}/__init__.py +21 -23
  206. mindspore/parallel/distributed/distributed_data_parallel.py +393 -0
  207. mindspore/parallel/distributed/flatten_grad_buffer.py +295 -0
  208. mindspore/parallel/strategy.py +336 -0
  209. mindspore/parallel/transform_safetensors.py +117 -16
  210. mindspore/profiler/analysis/viewer/ascend_kernel_details_viewer.py +3 -0
  211. mindspore/profiler/analysis/viewer/ms_minddata_viewer.py +1 -1
  212. mindspore/profiler/common/constant.py +5 -0
  213. mindspore/profiler/common/file_manager.py +9 -0
  214. mindspore/profiler/common/msprof_cmd_tool.py +38 -2
  215. mindspore/profiler/common/path_manager.py +56 -24
  216. mindspore/profiler/common/profiler_context.py +2 -12
  217. mindspore/profiler/common/profiler_info.py +3 -3
  218. mindspore/profiler/common/profiler_path_manager.py +13 -0
  219. mindspore/profiler/common/util.py +30 -3
  220. mindspore/profiler/experimental_config.py +2 -1
  221. mindspore/profiler/platform/npu_profiler.py +33 -6
  222. mindspore/run_check/_check_version.py +108 -24
  223. mindspore/runtime/__init__.py +3 -2
  224. mindspore/runtime/executor.py +11 -3
  225. mindspore/runtime/memory.py +112 -0
  226. mindspore/swresample-4.dll +0 -0
  227. mindspore/swscale-6.dll +0 -0
  228. mindspore/tinyxml2.dll +0 -0
  229. mindspore/{experimental/llm_boost → tools}/__init__.py +5 -5
  230. mindspore/tools/data_dump.py +130 -0
  231. mindspore/tools/sdc_detect.py +91 -0
  232. mindspore/tools/stress_detect.py +63 -0
  233. mindspore/train/__init__.py +6 -6
  234. mindspore/train/_utils.py +5 -18
  235. mindspore/train/amp.py +6 -4
  236. mindspore/train/callback/_checkpoint.py +0 -9
  237. mindspore/train/callback/_train_fault_tolerance.py +69 -18
  238. mindspore/train/data_sink.py +1 -5
  239. mindspore/train/model.py +38 -211
  240. mindspore/train/serialization.py +126 -387
  241. mindspore/turbojpeg.dll +0 -0
  242. mindspore/utils/__init__.py +6 -3
  243. mindspore/utils/dlpack.py +92 -0
  244. mindspore/utils/dryrun.py +1 -1
  245. mindspore/utils/runtime_execution_order_check.py +10 -0
  246. mindspore/utils/sdc_detect.py +14 -12
  247. mindspore/utils/stress_detect.py +43 -0
  248. mindspore/utils/utils.py +144 -8
  249. mindspore/version.py +1 -1
  250. {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/METADATA +3 -2
  251. {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/RECORD +254 -267
  252. mindspore/experimental/llm_boost/ascend_native/llama_boost_ascend_native.py +0 -210
  253. mindspore/experimental/llm_boost/ascend_native/llm_boost.py +0 -52
  254. mindspore/experimental/llm_boost/atb/boost_base.py +0 -385
  255. mindspore/experimental/llm_boost/atb/llama_boost.py +0 -137
  256. mindspore/experimental/llm_boost/atb/qwen_boost.py +0 -124
  257. mindspore/experimental/llm_boost/register.py +0 -130
  258. mindspore/experimental/llm_boost/utils.py +0 -31
  259. mindspore/include/OWNERS +0 -7
  260. mindspore/mindspore_cpu_res_manager.dll +0 -0
  261. mindspore/mindspore_ops_kernel_common.dll +0 -0
  262. mindspore/mindspore_res_manager.dll +0 -0
  263. mindspore/nn/optim/_dist_optimizer_registry.py +0 -111
  264. mindspore/nn/reinforcement/_batch_read_write.py +0 -142
  265. mindspore/nn/reinforcement/_tensors_queue.py +0 -152
  266. mindspore/nn/reinforcement/tensor_array.py +0 -145
  267. mindspore/opencv_core452.dll +0 -0
  268. mindspore/opencv_imgcodecs452.dll +0 -0
  269. mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +0 -113
  270. mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +0 -96
  271. mindspore/ops/_op_impl/aicpu/sparse_cross.py +0 -42
  272. mindspore/ops/_op_impl/cpu/buffer_append.py +0 -28
  273. mindspore/ops/_op_impl/cpu/buffer_get.py +0 -28
  274. mindspore/ops/_op_impl/cpu/buffer_sample.py +0 -28
  275. mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +0 -42
  276. mindspore/ops/operations/_tensor_array.py +0 -359
  277. mindspore/ops/operations/rl_ops.py +0 -288
  278. mindspore/parallel/_offload_context.py +0 -275
  279. mindspore/parallel/_recovery_context.py +0 -115
  280. mindspore/parallel/_transformer/__init__.py +0 -35
  281. mindspore/parallel/_transformer/layers.py +0 -765
  282. mindspore/parallel/_transformer/loss.py +0 -251
  283. mindspore/parallel/_transformer/moe.py +0 -693
  284. mindspore/parallel/_transformer/op_parallel_config.py +0 -222
  285. mindspore/parallel/_transformer/transformer.py +0 -3124
  286. mindspore/parallel/mpi/_mpi_config.py +0 -116
  287. mindspore/train/memory_profiling_pb2.py +0 -298
  288. {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/WHEEL +0 -0
  289. {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/entry_points.txt +0 -0
  290. {mindspore-2.7.0.dist-info → mindspore-2.7.1.dist-info}/top_level.txt +0 -0
@@ -20,6 +20,8 @@ from mindspore._c_expression import _any_instance
20
20
  from mindspore._c_expression import _bernoulli__instance
21
21
  from mindspore._c_expression import _bitwise_not_instance
22
22
  from mindspore._c_expression import _clamp_instance
23
+ from mindspore._c_expression import _conv1d_instance
24
+ from mindspore._c_expression import _conv2d_instance
23
25
  from mindspore._c_expression import _conv3d_instance
24
26
  from mindspore._c_expression import _div_instance
25
27
  from mindspore._c_expression import _einsum_instance
@@ -32,6 +34,7 @@ from mindspore._c_expression import _gmm_instance
32
34
  from mindspore._c_expression import _gmm_backward_instance
33
35
  from mindspore._c_expression import _gmm_backward_fusion_instance
34
36
  from mindspore._c_expression import _greater_equal_instance
37
+ from mindspore._c_expression import _imag_instance
35
38
  from mindspore._c_expression import _index_add_instance
36
39
  from mindspore._c_expression import _kthvalue_instance
37
40
  from mindspore._c_expression import _lerp_instance
@@ -41,6 +44,7 @@ from mindspore._c_expression import _min_instance
41
44
  from mindspore._c_expression import _nansum_instance
42
45
  from mindspore._c_expression import _pixel_shuffle_instance
43
46
  from mindspore._c_expression import _quant_matmul_instance
47
+ from mindspore._c_expression import _real_instance
44
48
  from mindspore._c_expression import _remainder_instance
45
49
  from mindspore._c_expression import _repeat_interleave_instance
46
50
  from mindspore._c_expression import _rmod_instance
@@ -439,6 +443,245 @@ def clip(*args, **kwargs):
439
443
  return _clamp_instance(*args, **kwargs)
440
444
 
441
445
 
446
+ def conv1d(*args, **kwargs):
447
+ r"""
448
+ conv1d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
449
+
450
+ Applies a 1D convolution over an input tensor. The input tenor is typically
451
+ of shape :math:`(N, C_{in}, L_{in})`,
452
+ where :math:`N` is batch size, :math:`C` is channel number, :math:`L` is sequence length.
453
+
454
+ The output is calculated based on formula:
455
+
456
+ .. math::
457
+
458
+ \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
459
+ \sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
460
+
461
+ where :math:`bias` is the output channel bias, :math:`ccor` is
462
+ the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
463
+ :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
464
+
465
+ - :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
466
+ where :math:`N` is the batch size of the input.
467
+
468
+ - :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
469
+ where :math:`C_{out}` is the number of
470
+ output channels, which is also equal to the number of kernels.
471
+
472
+ - :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
473
+ where :math:`C_{in}` is the number of
474
+ input channels, which is also equal to the number of channels in the convolutional kernels.
475
+
476
+ Therefore, in the above formula, :math:`{bias}(C_{\text{out}_j})` represents the bias of the :math:`j`-th
477
+ output channel, :math:`{weight}(C_{\text{out}_j}, k)` represents the slice of the :math:`j`-th convolutional
478
+ kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
479
+ channel in the :math:`i`-th batch of the input feature map.
480
+
481
+ The shape of the convolutional kernel is given by :math:`(\text{kernel_size})`,
482
+ where :math:`\text{kernel_size}` is the length of the kernel.
483
+ If we consider the input and output channels as well as the `groups` parameter, the complete kernel shape
484
+ will be :math:`(C_{out}, C_{in} / \text{groups}, \text{kernel_size})`,
485
+ where `groups` is the number of groups dividing `x`'s input channel when applying groups convolution.
486
+
487
+ For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
488
+ <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
489
+
490
+ Args:
491
+ input (Tensor): Tensor of shape :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`.
492
+ weight (Tensor): Tensor of shape
493
+ :math:`(C_{out}, C_{in} / \text{groups}, \text{kernel_size})`, then the size of kernel
494
+ is :math:`(\text{kernel_size})`.
495
+ bias (Tensor, optional): Bias Tensor with shape :math:`(C_{out})`.
496
+ When bias is ``None`` , zeros will be used. Default: ``None`` .
497
+ stride (Union[int, tuple[int], list[int]], optional): The movement stride of the 1D convolution kernel.
498
+ The data type is an integer or a tuple of one integer. Default: ``1`` .
499
+ padding (Union[int, tuple[int], list[int], str], optional): The number of padding
500
+ on the input.
501
+ The data type is an integer or a tuple of one integer or string {`valid`, `same`}.
502
+ The value should be greater than or equal to 0. Default: ``0`` .
503
+
504
+ - ``"same"``: Pad the input around its edges so that the shape of input and output
505
+ are the same when `stride` is set to ``1``.
506
+ The amount of padding to is calculated by the operator internally, If the amount is even, it is
507
+ uniformly distributed around the input, if it is odd, the excess amount goes to the right side.
508
+ If this mode is set, `stride` must be 1.
509
+
510
+ - ``"valid"``: No padding is applied to the input, and the output returns the maximum
511
+ possible length. Extra sequence that could not complete a full stride will
512
+ be discarded.
513
+
514
+ dilation (Union[int, tuple[int], list[int]], optional): Specifies the dilation rate to use for
515
+ dilated convolution. It can be a single int or a tuple of 1 integer.
516
+ Assuming :math:`dilation=(d)`, the convolutional kernel samples the input with a
517
+ spacing of :math:`d-1` elements in the length direction.
518
+ Default: ``1`` .
519
+ groups (int, optional): Splits filter into groups, `in_channels` and `out_channels` must be
520
+ divisible by `groups`. If the groups is equal to `in_channels` and `out_channels`,
521
+ this 1D convolution layer also can be called 1D depthwise convolution layer. Default: ``1`` .
522
+ The following restraints should be met:
523
+
524
+ - :math:`(C_{in} \text{ % } \text{groups} == 0)`
525
+ - :math:`(C_{out} \text{ % } \text{groups} == 0)`
526
+ - :math:`(C_{out} >= \text{groups})`
527
+ - :math:`(\text{weight[1]} = C_{in} / \text{groups})`
528
+
529
+ Returns:
530
+ Tensor, the value that applied 1D convolution. The shape is :math:`(N, C_{out}, L_{out})`.
531
+ To see how different pad modes affect the output shape, please refer to
532
+ :class:`mindspore.mint.nn.Conv1d` for more details.
533
+
534
+ Raises:
535
+ RuntimeError: On Ascend, due to the limitation of the L1 cache size of different NPU chip, if input size or
536
+ kernel size is too large, it may trigger an error.
537
+ TypeError: If `in_channels`, `out_channels` or `groups` is not an int.
538
+ TypeError: If `kernel_size`, `stride` or `dilation` is neither an int not a tuple.
539
+ ValueError: Args and size of the input feature map should satisfy the output formula to ensure that the size of
540
+ the output feature map is positive; otherwise, an error will be reported.
541
+ ValueError: If `in_channels`, `out_channels`, `kernel_size`, `stride` or `dilation` is less than 1.
542
+ ValueError: If `padding` is less than 0.
543
+ ValueError: If `padding` is `same` , `stride` is not equal to 1.
544
+ ValueError: The input parameters do not satisfy the convolution output formula.
545
+ ValueError: The `kernel_size` cannot exceed the size of the input feature map.
546
+ ValueError: The value of `padding` cannot cause the calculation area to exceed the input size.
547
+
548
+ Supported Platforms:
549
+ ``Ascend``
550
+
551
+ Examples:
552
+ >>> import mindspore
553
+ >>> import numpy as np
554
+ >>> from mindspore import Tensor, ops, mint
555
+ >>> x = Tensor(np.ones([10, 32, 32]), mindspore.float32)
556
+ >>> weight = Tensor(np.ones([32, 32, 3]), mindspore.float32)
557
+ >>> output = mint.nn.functional.conv1d(x, weight)
558
+ >>> print(output.shape)
559
+ (10, 32, 30)
560
+ """
561
+ return _conv1d_instance(*args, **kwargs)
562
+
563
+
564
+ def conv2d(*args, **kwargs):
565
+ r"""
566
+ Applies a 2D convolution over an input tensor. The input tensor is typically of
567
+ shape :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`,
568
+ where :math:`N` is batch size, :math:`C` is channel number, :math:`H` is feature height, :math:`W` is feature width.
569
+
570
+ The output is calculated based on formula:
571
+
572
+ .. math::
573
+
574
+ \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
575
+ \sum_{k = 0}^{C_{in} - 1} \text{ccor}({\text{weight}(C_{\text{out}_j}, k), \text{X}(N_i, k)})
576
+
577
+ where :math:`bias` is the output channel bias, :math:`ccor` is
578
+ the `cross-correlation <https://en.wikipedia.org/wiki/Cross-correlation>`_,
579
+ , :math:`weight` is the convolution kernel value and :math:`X` represents the input feature map.
580
+
581
+ - :math:`i` corresponds to the batch number, the range is :math:`[0, N-1]`,
582
+ where :math:`N` is the batch size of the input.
583
+
584
+ - :math:`j` corresponds to the output channel, the range is :math:`[0, C_{out}-1]`,
585
+ where :math:`C_{out}` is the number of output channels, which is also equal to the number of kernels.
586
+
587
+ - :math:`k` corresponds to the input channel, the range is :math:`[0, C_{in}-1]`,
588
+ where :math:`C_{in}` is the number of
589
+ input channels, which is also equal to the number of channels in the convolutional kernels.
590
+
591
+ Therefore, in the above formula, :math:`{bias}(C_{out_j})` represents the bias of the :math:`j`-th
592
+ output channel, :math:`{weight}(C_{out_j}, k)` represents the slice of the :math:`j`-th convolutional
593
+ kernel in the :math:`k`-th channel, and :math:`{X}(N_i, k)` represents the slice of the :math:`k`-th input
594
+ channel in the :math:`i`-th batch of the input feature map.
595
+
596
+ The shape of the convolutional kernel is given by :math:`(\text{kernel_size[0]}, \text{kernel_size[1]})`,
597
+ where :math:`\text{kernel_size[0]}` and :math:`\text{kernel_size[1]}` are the height and width of the kernel,
598
+ respectively.
599
+ If we consider the input and output channels as well as the `group` parameter, the complete kernel shape
600
+ will be :math:`(C_{out}, C_{in} / \text{group}, \text{kernel_size[0]}, \text{kernel_size[1]})`,
601
+ where `group` is the number of groups dividing `x`'s input channel when applying group convolution.
602
+
603
+ For more details about convolution layer, please refer to `Gradient Based Learning Applied to Document Recognition
604
+ <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_ and
605
+ `ConvNets <http://cs231n.github.io/convolutional-networks/>`_.
606
+
607
+ .. warning::
608
+ This is an experimental API that is subject to change or deletion.
609
+
610
+ Args:
611
+ input (Tensor): Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`.
612
+ weight (Tensor): Tensor of shape
613
+ :math:`(N, C_{in} / \text{groups}, \text{kernel_size[0]}, \text{kernel_size[1]})`, then the size of kernel
614
+ is :math:`(\text{kernel_size[0]}, \text{kernel_size[1]})`.
615
+ bias (Tensor, optional): Bias Tensor with shape :math:`(C_{out})`.
616
+ When bias is ``None`` , zeros will be used. Default: ``None`` .
617
+ stride (Union(int, tuple[int], list[int]), optional): The distance of kernel moving, an int number that
618
+ represents the height and width of movement are both strides, or a tuple of two int numbers that
619
+ represent height and width of movement respectively. Default: ``1`` .
620
+ padding (Union[int, tuple[int], list[int], str], optional): The number of padding
621
+ on the height and width directions of the input.
622
+ The data type is an integer or a tuple of two integers or string {`valid`, `same`}. If `padding` is an
623
+ integer, then `padding_{H}` and `padding_{W}` are all equal to `padding`.
624
+ If `padding` is a tuple of 2 integers, then `padding_{H}` and `padding_{W}`
625
+ is equal to `padding[0]` and `padding[1]` respectively.
626
+ The value should be greater than or equal to 0. Default: ``0`` .
627
+
628
+ - ``"same"``: Pad the input around its edges so that the shape of input and output
629
+ are the same when `stride` is set to ``1``.
630
+ The amount of padding to is calculated by the operator internally, If the amount is even, it is
631
+ uniformly distributed around the input, if it is odd, the excess amount goes to the right/bottom side.
632
+ If this mode is set, `stride` must be 1.
633
+
634
+ - ``"valid"``: No padding is applied to the input, and the output returns the maximum
635
+ possible height and width. Extra pixels that could not complete a full stride will
636
+ be discarded.
637
+
638
+ dilation (Union(int, tuple[int], list[int]), optional): Gaps between kernel elements.The data type
639
+ is int or a tuple of 2 integers. Specifies the dilation rate to use for dilated convolution.
640
+ If set to be :math:`k > 1`,
641
+ there will be :math:`k - 1` pixels skipped for each sampling location. Its value must
642
+ be greater than or equal to 1 and bounded by the height and width of the input `x`. Default: ``1`` .
643
+ groups (int, optional): Splits `input` into groups. Default: ``1`` .
644
+
645
+ - :math:`(C_{in} \text{ % } \text{groups} == 0)` , :math:`(C_{out} \text{ % } \text{groups} == 0)` ,
646
+ :math:`(C_{out} >= \text{groups})` , :math:`(\text{kernel_size[1]} = C_{in} / \text{groups})`
647
+
648
+ Returns:
649
+ Tensor, the value that applied 2D convolution. The shape is :math:`(N, C_{out}, H_{out}, W_{out})`.
650
+ To see how different pad modes affect the output shape, please refer to
651
+ :class:`mindspore.mint.nn.Conv2d` for more details.
652
+
653
+ Raises:
654
+ ValueError: Args and size of the input feature map should satisfy the output formula to ensure that the size of
655
+ the output feature map is positive; otherwise, an error will be reported. For more details on the output
656
+ formula, please refer to :class:`mindspore.mint.nn.Conv2d`.
657
+ RuntimeError: On Ascend, due to the limitation of the L1 cache size of different NPU chip, if input size or
658
+ kernel size is too large, it may trigger an error.
659
+ TypeError: If `in_channels` , `out_channels` or `groups` is not an int.
660
+ TypeError: If `kernel_size` , `stride` or `dilation` is neither an int nor a tuple.
661
+ TypeError: If `bias` is not a Tensor.
662
+ ValueError: If the shape of `bias` is not :math:`(C_{out})` .
663
+ ValueError: If `stride` or `dilation` is less than 1.
664
+ ValueError: If `padding` is `same` , `stride` is not equal to 1.
665
+ ValueError: The input parameters do not satisfy the convolution output formula.
666
+ ValueError: The KernelSize cannot exceed the size of the input feature map.
667
+ ValueError: The value of padding cannot cause the calculation area to exceed the input size.
668
+
669
+ Supported Platforms:
670
+ ``Ascend``
671
+
672
+ Examples:
673
+ >>> import mindspore
674
+ >>> import numpy as np
675
+ >>> from mindspore import Tensor, ops, mint
676
+ >>> x = Tensor(np.ones([10, 32, 32, 32]), mindspore.float32)
677
+ >>> weight = Tensor(np.ones([32, 32, 3, 3]), mindspore.float32)
678
+ >>> output = mint.nn.functional.conv2d(x, weight)
679
+ >>> print(output.shape)
680
+ (10, 32, 30, 30)
681
+ """
682
+ return _conv2d_instance(*args, **kwargs)
683
+
684
+
442
685
  def conv3d(*args, **kwargs):
443
686
  r"""
444
687
  conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
@@ -740,13 +983,10 @@ def einsum(*args, **kwargs):
740
983
 
741
984
  def empty(*args, **kwargs):
742
985
  r"""
743
- empty(*size, *, dtype=None, device=None) -> Tensor
986
+ empty(*size, *, dtype=None, device=None, pin_memory=False) -> Tensor
744
987
 
745
988
  Creates a tensor with uninitialized data, whose shape, dtype and device are described by the argument `size`,
746
- `dtype` and `device` respectively.
747
-
748
- .. warning::
749
- This is an experimental API that is subject to change or deletion.
989
+ `dtype` and `device` respectively. If `pin_memory` is True, the tensor will be allocated in pinned memory.
750
990
 
751
991
  Args:
752
992
  size (Union[tuple[int], list[int], int]): The specified shape of output tensor. Can be variable numbers of
@@ -755,15 +995,18 @@ def empty(*args, **kwargs):
755
995
  Keyword Args:
756
996
  dtype (:class:`mindspore.dtype`, optional): The specified type of output tensor. If `dtype` is ``None`` ,
757
997
  `mindspore.float32` will be used. Default: ``None`` .
758
- device (string, optional): The specified device of the output tensor. In PyNative mode, ``"Ascend"``, ``"npu"``,
998
+ device (str, optional): The specified device of the output tensor. In PyNative mode, ``"Ascend"``, ``"npu"``,
759
999
  ``"cpu"`` and ``"CPU"`` are supported. In graph mode O0, ``"Ascend"`` and ``"npu"`` are supported. If `device = None`,
760
1000
  `mindspore.context.device_target` will be used. Default ``None``.
1001
+ pin_memory (bool, optional): If set `pin_memory` to True, the tensor will be allocated in pinned memory, and `device`
1002
+ should be ``"cpu"`` or ``"CPU"`` . Default ``False``.
761
1003
 
762
1004
  Returns:
763
1005
  Tensor, whose shape, dtype and device are defined by input.
764
1006
 
765
1007
  Raises:
766
1008
  TypeError: If `size` is neither an int nor a tuple or list of int.
1009
+ RuntimeError: If `pin_memory` is True, and `device` is neither ``"cpu"`` nor ``"CPU"`` .
767
1010
 
768
1011
  Supported Platforms:
769
1012
  ``Ascend`` ``CPU``
@@ -781,13 +1024,10 @@ def empty(*args, **kwargs):
781
1024
 
782
1025
  def empty_like(*args, **kwargs):
783
1026
  r"""
784
- empty_like(input, *, dtype=None, device=None) -> Tensor
1027
+ empty_like(input, *, dtype=None, device=None, pin_memory=False) -> Tensor
785
1028
 
786
1029
  Returns an uninitialized Tensor with the same shape as the `input`. Its dtype is specified by `dtype` and its
787
- device is specified by `device`.
788
-
789
- .. warning::
790
- This is an experimental API that is subject to change or deletion.
1030
+ device is specified by `device`. If `pin_memory` is True, the tensor will be allocated in pinned memory.
791
1031
 
792
1032
  Args:
793
1033
  input (Tensor): Tensor of any dimension.
@@ -795,15 +1035,18 @@ def empty_like(*args, **kwargs):
795
1035
  Keyword Args:
796
1036
  dtype (:class:`mindspore.dtype`, optional): The specified dtype of the output tensor. If `dtype = None`, the
797
1037
  tensor will have the same dtype as input `input`. Default ``None``.
798
- device (string, optional): The specified device of the output tensor. In PyNative mode, ``"Ascend"``, ``"npu"``,
1038
+ device (str, optional): The specified device of the output tensor. In PyNative mode, ``"Ascend"``, ``"npu"``,
799
1039
  ``"cpu"`` and ``"CPU"`` are supported. In graph mode O0, ``"Ascend"`` and ``"npu"`` are supported. If `device = None`,
800
1040
  the value set by :func:`mindspore.set_device` will be used. Default ``None``.
1041
+ pin_memory (bool, optional): If set `pin_memory` to True, the tensor will be allocated in pinned memory, and `device`
1042
+ should be ``"cpu"`` or ``"CPU"`` . Default ``False``.
801
1043
 
802
1044
  Returns:
803
1045
  Tensor, has the same shape, type and device as `input` but with uninitialized data (May be a random value).
804
1046
 
805
1047
  Raises:
806
1048
  TypeError: If `input` is not a Tensor.
1049
+ RuntimeError: If `pin_memory` is True, and `device` is neither ``"cpu"`` nor ``"CPU"`` .
807
1050
 
808
1051
  Supported Platforms:
809
1052
  ``Ascend`` ``CPU``
@@ -843,9 +1086,6 @@ def floor_divide(*args, **kwargs):
843
1086
  where the :math:`floor` indicates the Floor operator. For more details,
844
1087
  please refer to the :class:`mindspore.mint.floor` operator.
845
1088
 
846
- .. warning::
847
- This is an experimental API that is subject to change or deletion.
848
-
849
1089
  Args:
850
1090
  input (Union[Tensor, Number, bool]): The first input is a number or
851
1091
  a bool or a tensor whose data type is number or bool.
@@ -1157,6 +1397,54 @@ def ge(*args, **kwargs):
1157
1397
  return _greater_equal_instance(*args, **kwargs)
1158
1398
 
1159
1399
 
1400
+ def imag(*args, **kwargs):
1401
+ r"""
1402
+ imag(input) -> Tensor
1403
+
1404
+ Return a new tensor containing the imaginary values of the input tensor.
1405
+ The returned tensor and input tensor share the same underlying storage.
1406
+
1407
+ Note:
1408
+ - Only support Pynative mode.
1409
+ - Only support complex64 and complex128 tensors.
1410
+
1411
+ Args:
1412
+ input (Tensor): The input tensor, the data type must be complex64 or complex128.
1413
+
1414
+ Returns:
1415
+ Tensor, the shape is same as `input`. The data type is float32 if `input` is complex64, float64 when `input` is complex128.
1416
+
1417
+ Raises:
1418
+ TypeError: If dtype of `input` is not complex64 or complex128.
1419
+ ValueError: If input tensor has no storage info.
1420
+
1421
+ Supported Platforms:
1422
+ ``Ascend``
1423
+
1424
+ Examples:
1425
+ >>> import mindspore
1426
+ >>> from mindspore import Tensor, ops, context
1427
+ >>> context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
1428
+ >>> real = Tensor([1.1, 2.1, 3.1], mindspore.float32)
1429
+ >>> imag = Tensor([4.1, 5.1, 6.1], mindspore.float32)
1430
+ >>> x = ops.Complex()(real, imag)
1431
+ >>> output = ops.functional_overload.imag(x)
1432
+ >>> print(output)
1433
+ [4.1 5.1 6.1]
1434
+ >>> print(output.dtype)
1435
+ Float32
1436
+ >>> real = Tensor([1.1, 2.1, 3.1], mindspore.float64)
1437
+ >>> imag = Tensor([4.1, 5.1, 6.1], mindspore.float64)
1438
+ >>> x = ops.Complex()(real, imag)
1439
+ >>> output = ops.functional_overload.imag(x)
1440
+ >>> print(output)
1441
+ [4.1 5.1 6.1]
1442
+ >>> print(output.dtype)
1443
+ Float64
1444
+ """
1445
+ return _imag_instance(*args, **kwargs)
1446
+
1447
+
1160
1448
  def index_add(*args, **kwargs):
1161
1449
  r"""
1162
1450
  index_add(input, dim, index, source, *, alpha=1) -> Tensor
@@ -1625,9 +1913,6 @@ def pixel_shuffle(*args, **kwargs):
1625
1913
  For detailed introduction to the pixel_shuffle algorithm, refer to
1626
1914
  `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158>`_ .
1627
1915
 
1628
- .. warning::
1629
- This is an experimental API that is subject to change or deletion.
1630
-
1631
1916
  Args:
1632
1917
  input (Tensor): Tensor of shape :math:`(*, C \times r^2, H, W)` . The dimension of `input` is larger than 2,
1633
1918
  and the length of third to last dimension can be divisible by the square of `upscale_factor`.
@@ -1709,6 +1994,53 @@ def quant_matmul(*args, **kwargs):
1709
1994
  return _quant_matmul_instance(*args, **kwargs)
1710
1995
 
1711
1996
 
1997
+ def real(*args, **kwargs):
1998
+ r"""
1999
+ real(input) -> Tensor
2000
+
2001
+ Return a new tensor containing the real values of the input tensor. If input is real, it is returned unchanged.
2002
+ The returned tensor and input tensor share the same underlying storage.
2003
+
2004
+ Note:
2005
+ Only support Pynative mode.
2006
+
2007
+ Args:
2008
+ input (Tensor): The input tensor.
2009
+
2010
+ Returns:
2011
+ Tensor, the shape is same as `input`. The data type is float32 if `input` is complex64, float64 when `input` is complex128.
2012
+ Otherwise, the data type is the same as `input`.
2013
+
2014
+ Raises:
2015
+ ValueError: If input tensor has no storage info.
2016
+
2017
+ Supported Platforms:
2018
+ ``Ascend``
2019
+
2020
+ Examples:
2021
+ >>> import mindspore
2022
+ >>> from mindspore import Tensor, ops, context
2023
+ >>> context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
2024
+ >>> real = Tensor([1.1, 2.1, 3.1], mindspore.float32)
2025
+ >>> imag = Tensor([4.1, 5.1, 6.1], mindspore.float32)
2026
+ >>> x = ops.Complex()(real, imag)
2027
+ >>> output = ops.functional_overload.real(x)
2028
+ >>> print(output)
2029
+ [1.1 2.1 3.1]
2030
+ >>> print(output.dtype)
2031
+ Float32
2032
+ >>> real = Tensor([1.1, 2.1, 3.1], mindspore.float64)
2033
+ >>> imag = Tensor([4.1, 5.1, 6.1], mindspore.float64)
2034
+ >>> x = ops.Complex()(real, imag)
2035
+ >>> output = ops.functional_overload.real(x)
2036
+ >>> print(output)
2037
+ [1.1 2.1 3.1]
2038
+ >>> print(output.dtype)
2039
+ Float64
2040
+ """
2041
+ return _real_instance(*args, **kwargs)
2042
+
2043
+
1712
2044
  def remainder(*args, **kwargs):
1713
2045
  r"""
1714
2046
  remainder(input, other) -> Tensor
@@ -1983,6 +2315,8 @@ __all__ = [
1983
2315
  "bitwise_not",
1984
2316
  "clamp",
1985
2317
  "clip",
2318
+ "conv1d",
2319
+ "conv2d",
1986
2320
  "conv3d",
1987
2321
  "div",
1988
2322
  "divide",
@@ -1997,6 +2331,7 @@ __all__ = [
1997
2331
  "gmm_backward_fusion",
1998
2332
  "greater_equal",
1999
2333
  "ge",
2334
+ "imag",
2000
2335
  "index_add",
2001
2336
  "kthvalue",
2002
2337
  "lerp",
@@ -2006,6 +2341,7 @@ __all__ = [
2006
2341
  "nansum",
2007
2342
  "pixel_shuffle",
2008
2343
  "quant_matmul",
2344
+ "real",
2009
2345
  "remainder",
2010
2346
  "repeat_interleave",
2011
2347
  "rmod",
@@ -55,7 +55,7 @@ from .comm_ops import (AllGather, AllReduce, Reduce, NeighborExchange, NeighborE
55
55
  Broadcast, CollectiveGather, CollectiveScatter, Barrier, Send, Receive, BatchISendIRecv,
56
56
  _MirrorOperator, _MirrorMiniStepOperator, _MiniStepAllGather, ReduceOp, _VirtualDataset,
57
57
  _VirtualOutput, _VirtualDiv, _GetTensorSlice, _VirtualAdd, _VirtualAssignAdd, _VirtualAccuGrad,
58
- _HostAllGather, _HostReduceScatter, _MirrorMicroStepOperator, _MicroStepAllGather,
58
+ _HostAllGather, _HostReduceScatter, _MirrorMicroStepOperator, _MicroStepAllGather, AlltoAllVC,
59
59
  _VirtualPipelineEnd, AlltoAllV, ReduceScatter, _VirtualAssignKvCache, AllGatherV, ReduceScatterV)
60
60
  from .control_ops import GeSwitch, Merge
61
61
  from .custom_ops import (Custom, CustomOpBuilder)
@@ -129,7 +129,6 @@ from .random_ops import (RandomChoiceWithMask, StandardNormal, Gamma, RandomGamm
129
129
  LogUniformCandidateSampler, TruncatedNormal, LogNormalReverse, NonDeterministicInts,
130
130
  ParameterizedTruncatedNormal, RandomPoisson, MultinomialWithReplacement, RandomShuffle,
131
131
  RandpermV2)
132
- from .rl_ops import (BufferAppend, BufferGetItem, BufferSample)
133
132
  from .sparse_ops import (
134
133
  SparseToDense, SparseTensorDenseMatmul, SparseTensorDenseAdd, SparseSlice)
135
134
  from .spectral_ops import (BartlettWindow, BlackmanWindow)
@@ -406,6 +405,7 @@ __all__ = [
406
405
  "AllReduce",
407
406
  "AllGatherV",
408
407
  "ReduceScatterV",
408
+ "AlltoAllVC",
409
409
  "Reduce",
410
410
  "_AllSwap",
411
411
  "ReduceScatter",
@@ -532,9 +532,6 @@ __all__ = [
532
532
  "HShrink",
533
533
  "PyExecute",
534
534
  "PyFunc",
535
- "BufferAppend",
536
- "BufferGetItem",
537
- "BufferSample",
538
535
  "Erfinv",
539
536
  "Conj",
540
537
  "Real",
@@ -228,20 +228,18 @@ class ExtensionBuilder:
228
228
  source_file.write(content)
229
229
 
230
230
  def _run_ninja_build(self, module_name):
231
- """Run ninja build."""
231
+ """Run ninja build and log output to .build_log.txt"""
232
232
  cmd = ['ninja', '-v']
233
233
  env = os.environ.copy()
234
+ log_file = os.path.join(self.build_dir, '.build_log.txt')
234
235
 
235
236
  try:
236
- subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir, check=True, env=env)
237
- # If the build succeeds, do nothing with the output (silent)
237
+ with open(log_file, 'w', encoding='utf-8') as f:
238
+ # If the build succeeds, do nothing with the output (silent)
239
+ subprocess.run(cmd, stdout=f, stderr=f, cwd=self.build_dir, check=True, env=env)
238
240
  except subprocess.CalledProcessError as e:
239
- # Capture the error details
240
- stderr_output = e.stderr.decode() if e.stderr else ""
241
- stdout_output = e.stdout.decode() if e.stdout else ""
242
- full_output = stderr_output + stdout_output
243
-
244
- # Format the error message
241
+ with open(log_file, 'r', encoding='utf-8') as rf:
242
+ full_output = rf.read()
245
243
  msg = f"Error building extension '{module_name}': {full_output}"
246
244
 
247
245
  # In multi-card situation, only one process build the library.
@@ -1413,6 +1413,7 @@ class PsROIPooling(PrimitiveWithInfer):
1413
1413
 
1414
1414
  @prim_attr_register
1415
1415
  def __init__(self, pooled_height, pooled_width, num_rois, spatial_scale, out_dim, group_size):
1416
+
1416
1417
  """Initialize PsROIPooling"""
1417
1418
  validator.check_value_type("pooled_height", pooled_height, [int], self.name)
1418
1419
  validator.check_value_type("pooled_width", pooled_width, [int], self.name)
@@ -1726,44 +1727,6 @@ class Format(PrimitiveWithInfer):
1726
1727
  return {'dtype': mstype.string, 'shape': [], 'value': value}
1727
1728
 
1728
1729
 
1729
- class FlattenConcat(Primitive):
1730
- """
1731
- Flatten input tensors and concatenate them into several chunk tensors grouped by data types.
1732
-
1733
- Args:
1734
- fusion_size (int): Maximum memory chunk size in bytes, 0 for unlimited. Default: 0.
1735
-
1736
- Inputs:
1737
- - **tensors** (tuple[Tensor], list[Tensor]) - The input Tensors to be flattened and concatenated.
1738
-
1739
- Outputs:
1740
- tuple[Tensor], result chunk tensors.
1741
-
1742
- Supported Platforms:
1743
- ``Ascend`` ``GPU`` ``CPU``
1744
-
1745
- Examples:
1746
- >>> from mindspore.ops.operations import _inner_ops as inner
1747
- >>> t1 = Tensor(np.array([1]).astype(np.float32))
1748
- >>> t2 = Tensor(np.array([2]).astype(np.float32))
1749
- >>> t3 = Tensor(np.array([3]).astype(np.float64))
1750
- >>> t4 = Tensor(np.array([4]).astype(np.float32))
1751
- >>> t5 = Tensor(np.array([5]).astype(np.float64))
1752
- >>> chunks = inner.FlattenConcat()([t1, t2, t2, t3, t4, t5])
1753
- >>> print(chunks[0].asnumpy())
1754
- >>> print(chunks[1].asnumpy())
1755
- [1. 2. 4.]
1756
- [3. 5.]
1757
- """
1758
-
1759
- @prim_attr_register
1760
- def __init__(self, fusion_size=0):
1761
- """Initialize FlattenConcat"""
1762
- validator.check_non_negative_int(fusion_size, 'fusion_size', self.name)
1763
- self.fusion_size = fusion_size
1764
- self.add_prim_attr('fusion_size', fusion_size)
1765
-
1766
-
1767
1730
  class KMeansCentroids(PrimitiveWithInfer):
1768
1731
  """
1769
1732
  Calculate the segment_sum, segment_count, kmean_total_sum that are clustering results