mindspore 2.2.14__cp39-cp39-win_amd64.whl → 2.4.0__cp39-cp39-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (1217) hide show
  1. mindspore/.commit_id +1 -1
  2. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  3. mindspore/Newtonsoft.Json.dll +0 -0
  4. mindspore/__init__.py +8 -5
  5. mindspore/_c_dataengine.cp39-win_amd64.pyd +0 -0
  6. mindspore/_c_expression.cp39-win_amd64.pyd +0 -0
  7. mindspore/_c_mindrecord.cp39-win_amd64.pyd +0 -0
  8. mindspore/_checkparam.py +124 -25
  9. mindspore/_extends/builtin_operations.py +2 -1
  10. mindspore/_extends/graph_kernel/model/graph_parallel.py +16 -6
  11. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +3 -16
  12. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +16 -4
  13. mindspore/_extends/parallel_compile/akg_compiler/compiler.py +1 -0
  14. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
  15. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +2 -1
  16. mindspore/_extends/parallel_compile/akg_compiler/util.py +5 -2
  17. mindspore/_extends/parse/__init__.py +18 -14
  18. mindspore/_extends/parse/compile_config.py +299 -0
  19. mindspore/_extends/parse/namespace.py +2 -2
  20. mindspore/_extends/parse/parser.py +182 -68
  21. mindspore/_extends/parse/resources.py +45 -14
  22. mindspore/_extends/parse/standard_method.py +192 -252
  23. mindspore/{ops/_op_impl/tbe/atomic_addr_clean.py → _extends/pijit/__init__.py} +6 -16
  24. mindspore/_extends/pijit/pijit_func_white_list.py +669 -0
  25. mindspore/_extends/remote/kernel_build_server.py +2 -0
  26. mindspore/_profiler.py +30 -0
  27. mindspore/amp.py +67 -26
  28. mindspore/atlprov.dll +0 -0
  29. mindspore/avcodec-59.dll +0 -0
  30. mindspore/avdevice-59.dll +0 -0
  31. mindspore/avfilter-8.dll +0 -0
  32. mindspore/avformat-59.dll +0 -0
  33. mindspore/avutil-57.dll +0 -0
  34. mindspore/boost/adasum.py +1 -1
  35. mindspore/boost/base.py +1 -1
  36. mindspore/boost/boost_cell_wrapper.py +2 -2
  37. mindspore/boost/grad_freeze.py +2 -2
  38. mindspore/boost/group_loss_scale_manager.py +1 -1
  39. mindspore/boost/less_batch_normalization.py +9 -6
  40. mindspore/c1.dll +0 -0
  41. mindspore/c1xx.dll +0 -0
  42. mindspore/c2.dll +0 -0
  43. mindspore/common/__init__.py +20 -7
  44. mindspore/common/_jit_fallback_utils.py +2 -3
  45. mindspore/common/_pijit_context.py +190 -0
  46. mindspore/common/_register_for_adapter.py +7 -0
  47. mindspore/common/_register_for_recompute.py +48 -0
  48. mindspore/common/_register_for_tensor.py +10 -10
  49. mindspore/common/_stub_tensor.py +7 -1
  50. mindspore/common/_tensor_overload.py +139 -0
  51. mindspore/common/_utils.py +5 -17
  52. mindspore/common/api.py +449 -129
  53. mindspore/common/auto_dynamic_shape.py +27 -14
  54. mindspore/common/dtype.py +17 -10
  55. mindspore/common/dump.py +8 -11
  56. mindspore/common/file_system.py +48 -0
  57. mindspore/common/generator.py +254 -0
  58. mindspore/common/hook_handle.py +65 -30
  59. mindspore/common/initializer.py +1 -1
  60. mindspore/common/jit_config.py +34 -14
  61. mindspore/common/lazy_inline.py +72 -19
  62. mindspore/common/mindir_util.py +12 -2
  63. mindspore/common/mutable.py +79 -14
  64. mindspore/common/no_inline.py +54 -0
  65. mindspore/common/np_dtype.py +25 -0
  66. mindspore/common/parameter.py +73 -21
  67. mindspore/common/recompute.py +292 -0
  68. mindspore/common/seed.py +9 -9
  69. mindspore/common/sparse_tensor.py +276 -24
  70. mindspore/common/symbol.py +122 -0
  71. mindspore/common/tensor.py +668 -514
  72. mindspore/communication/__init__.py +6 -11
  73. mindspore/communication/_comm_helper.py +43 -3
  74. mindspore/communication/comm_func.py +1395 -0
  75. mindspore/communication/management.py +117 -104
  76. mindspore/config/op_info.config +22 -54
  77. mindspore/context.py +455 -71
  78. mindspore/dataset/__init__.py +5 -5
  79. mindspore/dataset/audio/__init__.py +6 -6
  80. mindspore/dataset/audio/transforms.py +711 -158
  81. mindspore/dataset/callback/ds_callback.py +2 -2
  82. mindspore/dataset/core/config.py +7 -0
  83. mindspore/dataset/core/validator_helpers.py +7 -0
  84. mindspore/dataset/engine/cache_client.py +2 -2
  85. mindspore/dataset/engine/datasets.py +201 -116
  86. mindspore/dataset/engine/datasets_audio.py +14 -14
  87. mindspore/dataset/engine/datasets_standard_format.py +83 -3
  88. mindspore/dataset/engine/datasets_text.py +39 -39
  89. mindspore/dataset/engine/datasets_user_defined.py +230 -141
  90. mindspore/dataset/engine/datasets_vision.py +78 -74
  91. mindspore/dataset/engine/iterators.py +29 -0
  92. mindspore/dataset/engine/obs/util.py +7 -0
  93. mindspore/dataset/engine/offload.py +5 -7
  94. mindspore/dataset/engine/queue.py +138 -66
  95. mindspore/dataset/engine/serializer_deserializer.py +2 -2
  96. mindspore/dataset/engine/validators.py +41 -15
  97. mindspore/dataset/text/__init__.py +2 -5
  98. mindspore/dataset/text/transforms.py +408 -121
  99. mindspore/dataset/text/utils.py +9 -9
  100. mindspore/dataset/transforms/__init__.py +0 -3
  101. mindspore/dataset/transforms/transforms.py +261 -76
  102. mindspore/dataset/utils/browse_dataset.py +9 -9
  103. mindspore/dataset/utils/line_reader.py +2 -0
  104. mindspore/dataset/vision/__init__.py +7 -10
  105. mindspore/dataset/vision/c_transforms.py +10 -10
  106. mindspore/dataset/vision/py_transforms_util.py +1 -1
  107. mindspore/dataset/vision/transforms.py +2844 -549
  108. mindspore/dataset/vision/utils.py +161 -10
  109. mindspore/dataset/vision/validators.py +16 -3
  110. mindspore/dnnl.dll +0 -0
  111. mindspore/dpcmi.dll +0 -0
  112. mindspore/{rewrite/ast_creator_register.py → experimental/es/__init__.py} +5 -20
  113. mindspore/experimental/es/embedding_service.py +883 -0
  114. mindspore/experimental/es/embedding_service_layer.py +581 -0
  115. mindspore/experimental/llm_boost/__init__.py +21 -0
  116. mindspore/experimental/llm_boost/atb/__init__.py +23 -0
  117. mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
  118. mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
  119. mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
  120. mindspore/experimental/llm_boost/register.py +129 -0
  121. mindspore/experimental/llm_boost/utils.py +31 -0
  122. mindspore/experimental/optim/__init__.py +12 -2
  123. mindspore/experimental/optim/adadelta.py +161 -0
  124. mindspore/experimental/optim/adagrad.py +168 -0
  125. mindspore/experimental/optim/adam.py +35 -34
  126. mindspore/experimental/optim/adamax.py +170 -0
  127. mindspore/experimental/optim/adamw.py +124 -15
  128. mindspore/experimental/optim/asgd.py +153 -0
  129. mindspore/experimental/optim/lr_scheduler.py +66 -121
  130. mindspore/experimental/optim/nadam.py +157 -0
  131. mindspore/experimental/optim/optimizer.py +18 -8
  132. mindspore/experimental/optim/radam.py +194 -0
  133. mindspore/experimental/optim/rmsprop.py +154 -0
  134. mindspore/experimental/optim/rprop.py +164 -0
  135. mindspore/experimental/optim/sgd.py +28 -19
  136. mindspore/hal/__init__.py +40 -0
  137. mindspore/hal/_ascend.py +57 -0
  138. mindspore/hal/_base.py +57 -0
  139. mindspore/hal/_cpu.py +56 -0
  140. mindspore/hal/_gpu.py +57 -0
  141. mindspore/hal/contiguous_tensors_handle.py +175 -0
  142. mindspore/hal/device.py +356 -0
  143. mindspore/hal/event.py +179 -0
  144. mindspore/hal/memory.py +326 -0
  145. mindspore/hal/stream.py +357 -0
  146. mindspore/include/api/data_type.h +2 -2
  147. mindspore/include/api/dual_abi_helper.h +16 -3
  148. mindspore/include/api/model.h +4 -3
  149. mindspore/include/api/model_group.h +13 -1
  150. mindspore/include/api/status.h +14 -0
  151. mindspore/include/api/types.h +10 -10
  152. mindspore/include/c_api/model_c.h +173 -0
  153. mindspore/include/c_api/types_c.h +19 -0
  154. mindspore/include/dataset/config.h +2 -2
  155. mindspore/include/dataset/constants.h +2 -2
  156. mindspore/include/dataset/execute.h +3 -5
  157. mindspore/include/dataset/vision.h +58 -2
  158. mindspore/jpeg62.dll +0 -0
  159. mindspore/log.py +3 -3
  160. mindspore/mindrecord/__init__.py +5 -1
  161. mindspore/mindrecord/config.py +809 -0
  162. mindspore/mindrecord/filereader.py +25 -0
  163. mindspore/mindrecord/filewriter.py +138 -103
  164. mindspore/mindrecord/mindpage.py +40 -6
  165. mindspore/mindrecord/shardutils.py +3 -2
  166. mindspore/mindrecord/shardwriter.py +7 -0
  167. mindspore/mindrecord/tools/cifar100_to_mr.py +8 -13
  168. mindspore/mindrecord/tools/cifar10_to_mr.py +9 -15
  169. mindspore/mindrecord/tools/csv_to_mr.py +4 -9
  170. mindspore/mindrecord/tools/imagenet_to_mr.py +3 -8
  171. mindspore/mindrecord/tools/mnist_to_mr.py +7 -12
  172. mindspore/mindrecord/tools/tfrecord_to_mr.py +1 -6
  173. mindspore/mindspore_backend.dll +0 -0
  174. mindspore/mindspore_common.dll +0 -0
  175. mindspore/mindspore_core.dll +0 -0
  176. mindspore/mindspore_glog.dll +0 -0
  177. mindspore/mindspore_np_dtype.dll +0 -0
  178. mindspore/mindspore_ops.dll +0 -0
  179. mindspore/mint/__init__.py +1586 -0
  180. mindspore/mint/distributed/__init__.py +31 -0
  181. mindspore/mint/distributed/distributed.py +254 -0
  182. mindspore/{rewrite/ast_transformers → mint/linalg}/__init__.py +9 -4
  183. mindspore/mint/nn/__init__.py +757 -0
  184. mindspore/mint/nn/functional.py +679 -0
  185. mindspore/mint/nn/layer/__init__.py +39 -0
  186. mindspore/mint/nn/layer/activation.py +133 -0
  187. mindspore/mint/nn/layer/normalization.py +477 -0
  188. mindspore/mint/nn/layer/pooling.py +110 -0
  189. mindspore/mint/optim/__init__.py +24 -0
  190. mindspore/mint/optim/adamw.py +206 -0
  191. mindspore/mint/special/__init__.py +63 -0
  192. mindspore/msobj140.dll +0 -0
  193. mindspore/mspdb140.dll +0 -0
  194. mindspore/mspdbcore.dll +0 -0
  195. mindspore/mspdbst.dll +0 -0
  196. mindspore/mspft140.dll +0 -0
  197. mindspore/msvcdis140.dll +0 -0
  198. mindspore/msvcp140_1.dll +0 -0
  199. mindspore/msvcp140_2.dll +0 -0
  200. mindspore/msvcp140_atomic_wait.dll +0 -0
  201. mindspore/msvcp140_codecvt_ids.dll +0 -0
  202. mindspore/multiprocessing/__init__.py +73 -0
  203. mindspore/nn/cell.py +461 -323
  204. mindspore/nn/dynamic_lr.py +2 -2
  205. mindspore/nn/layer/activation.py +292 -135
  206. mindspore/nn/layer/basic.py +288 -83
  207. mindspore/nn/layer/channel_shuffle.py +3 -16
  208. mindspore/nn/layer/container.py +3 -3
  209. mindspore/nn/layer/conv.py +75 -66
  210. mindspore/nn/layer/embedding.py +221 -45
  211. mindspore/nn/layer/image.py +4 -7
  212. mindspore/nn/layer/math.py +1 -1
  213. mindspore/nn/layer/normalization.py +150 -68
  214. mindspore/nn/layer/padding.py +64 -87
  215. mindspore/nn/layer/pooling.py +175 -12
  216. mindspore/nn/layer/rnn_cells.py +6 -16
  217. mindspore/nn/layer/rnns.py +6 -5
  218. mindspore/nn/layer/thor_layer.py +1 -2
  219. mindspore/nn/layer/timedistributed.py +1 -1
  220. mindspore/nn/layer/transformer.py +55 -53
  221. mindspore/nn/learning_rate_schedule.py +6 -5
  222. mindspore/nn/loss/__init__.py +2 -2
  223. mindspore/nn/loss/loss.py +145 -88
  224. mindspore/nn/optim/__init__.py +2 -1
  225. mindspore/nn/optim/ada_grad.py +4 -2
  226. mindspore/nn/optim/adadelta.py +4 -2
  227. mindspore/nn/optim/adafactor.py +1 -1
  228. mindspore/nn/optim/adam.py +102 -181
  229. mindspore/nn/optim/adamax.py +4 -2
  230. mindspore/nn/optim/adasum.py +3 -3
  231. mindspore/nn/optim/asgd.py +4 -2
  232. mindspore/nn/optim/ftrl.py +31 -61
  233. mindspore/nn/optim/lamb.py +5 -3
  234. mindspore/nn/optim/lars.py +2 -2
  235. mindspore/nn/optim/lazyadam.py +6 -4
  236. mindspore/nn/optim/momentum.py +13 -25
  237. mindspore/nn/optim/optimizer.py +6 -3
  238. mindspore/nn/optim/proximal_ada_grad.py +4 -2
  239. mindspore/nn/optim/rmsprop.py +9 -3
  240. mindspore/nn/optim/rprop.py +4 -2
  241. mindspore/nn/optim/sgd.py +5 -3
  242. mindspore/nn/optim/tft_wrapper.py +127 -0
  243. mindspore/nn/optim/thor.py +2 -2
  244. mindspore/nn/probability/distribution/_utils/custom_ops.py +2 -2
  245. mindspore/nn/probability/distribution/beta.py +2 -2
  246. mindspore/nn/probability/distribution/categorical.py +4 -6
  247. mindspore/nn/probability/distribution/cauchy.py +2 -2
  248. mindspore/nn/probability/distribution/exponential.py +2 -2
  249. mindspore/nn/probability/distribution/geometric.py +1 -1
  250. mindspore/nn/probability/distribution/gumbel.py +2 -2
  251. mindspore/nn/probability/distribution/logistic.py +1 -1
  252. mindspore/nn/probability/distribution/poisson.py +2 -2
  253. mindspore/nn/probability/distribution/uniform.py +2 -2
  254. mindspore/nn/reinforcement/_tensors_queue.py +13 -1
  255. mindspore/nn/wrap/__init__.py +2 -1
  256. mindspore/nn/wrap/cell_wrapper.py +46 -12
  257. mindspore/nn/wrap/grad_reducer.py +148 -8
  258. mindspore/nn/wrap/loss_scale.py +44 -7
  259. mindspore/numpy/__init__.py +2 -0
  260. mindspore/numpy/array_creations.py +67 -68
  261. mindspore/numpy/array_ops.py +70 -66
  262. mindspore/numpy/dtypes.py +3 -3
  263. mindspore/numpy/fft.py +966 -0
  264. mindspore/numpy/logic_ops.py +11 -10
  265. mindspore/numpy/math_ops.py +147 -152
  266. mindspore/numpy/utils.py +3 -0
  267. mindspore/numpy/utils_const.py +4 -4
  268. mindspore/opencv_core452.dll +0 -0
  269. mindspore/opencv_imgcodecs452.dll +0 -0
  270. mindspore/opencv_imgproc452.dll +0 -0
  271. mindspore/ops/__init__.py +9 -6
  272. mindspore/ops/_grad_experimental/grad_array_ops.py +4 -129
  273. mindspore/ops/_grad_experimental/grad_comm_ops.py +135 -36
  274. mindspore/ops/_grad_experimental/grad_math_ops.py +61 -298
  275. mindspore/ops/_grad_experimental/grad_nn_ops.py +0 -53
  276. mindspore/ops/_grad_experimental/grad_quant_ops.py +3 -3
  277. mindspore/ops/_grad_experimental/grad_sparse.py +1 -1
  278. mindspore/ops/_grad_experimental/grad_sparse_ops.py +3 -3
  279. mindspore/ops/_op_impl/__init__.py +0 -1
  280. mindspore/ops/_op_impl/aicpu/gamma.py +2 -0
  281. mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +1 -1
  282. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +1 -3
  283. mindspore/ops/_op_impl/aicpu/poisson.py +2 -0
  284. mindspore/ops/_op_impl/cpu/__init__.py +1 -3
  285. mindspore/ops/_op_impl/cpu/adam.py +2 -2
  286. mindspore/ops/_op_impl/cpu/adam_weight_decay.py +3 -2
  287. mindspore/ops/_op_impl/cpu/maximum_grad.py +16 -14
  288. mindspore/ops/_op_impl/cpu/minimum_grad.py +8 -0
  289. mindspore/ops/_vmap/vmap_array_ops.py +162 -101
  290. mindspore/ops/_vmap/vmap_base.py +8 -1
  291. mindspore/ops/_vmap/vmap_grad_math_ops.py +95 -9
  292. mindspore/ops/_vmap/vmap_grad_nn_ops.py +143 -58
  293. mindspore/ops/_vmap/vmap_image_ops.py +70 -13
  294. mindspore/ops/_vmap/vmap_math_ops.py +147 -59
  295. mindspore/ops/_vmap/vmap_nn_ops.py +292 -117
  296. mindspore/ops/_vmap/vmap_other_ops.py +1 -1
  297. mindspore/ops/auto_generate/__init__.py +31 -0
  298. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +309 -0
  299. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +252 -0
  300. mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
  301. mindspore/ops/auto_generate/gen_extend_func.py +1701 -0
  302. mindspore/ops/auto_generate/gen_ops_def.py +8482 -0
  303. mindspore/ops/auto_generate/gen_ops_prim.py +16704 -0
  304. mindspore/ops/auto_generate/pyboost_inner_prim.py +549 -0
  305. mindspore/ops/composite/__init__.py +5 -2
  306. mindspore/ops/composite/base.py +201 -66
  307. mindspore/ops/composite/math_ops.py +10 -49
  308. mindspore/ops/composite/multitype_ops/_compile_utils.py +192 -618
  309. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +25 -134
  310. mindspore/ops/composite/multitype_ops/add_impl.py +6 -0
  311. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +6 -0
  312. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +6 -0
  313. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +6 -0
  314. mindspore/ops/composite/multitype_ops/div_impl.py +8 -0
  315. mindspore/ops/composite/multitype_ops/equal_impl.py +6 -0
  316. mindspore/ops/composite/multitype_ops/floordiv_impl.py +8 -0
  317. mindspore/ops/composite/multitype_ops/getitem_impl.py +6 -0
  318. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +6 -0
  319. mindspore/ops/composite/multitype_ops/greater_impl.py +6 -0
  320. mindspore/ops/composite/multitype_ops/in_impl.py +8 -2
  321. mindspore/ops/composite/multitype_ops/left_shift_impl.py +6 -0
  322. mindspore/ops/composite/multitype_ops/less_equal_impl.py +6 -0
  323. mindspore/ops/composite/multitype_ops/less_impl.py +6 -0
  324. mindspore/ops/composite/multitype_ops/logic_not_impl.py +6 -0
  325. mindspore/ops/composite/multitype_ops/logical_and_impl.py +6 -0
  326. mindspore/ops/composite/multitype_ops/logical_or_impl.py +6 -0
  327. mindspore/ops/composite/multitype_ops/mod_impl.py +6 -0
  328. mindspore/ops/composite/multitype_ops/mul_impl.py +6 -0
  329. mindspore/ops/composite/multitype_ops/negative_impl.py +9 -3
  330. mindspore/ops/composite/multitype_ops/not_equal_impl.py +6 -0
  331. mindspore/ops/composite/multitype_ops/not_in_impl.py +8 -3
  332. mindspore/ops/composite/multitype_ops/ones_like_impl.py +2 -2
  333. mindspore/ops/composite/multitype_ops/pow_impl.py +6 -0
  334. mindspore/ops/composite/multitype_ops/right_shift_impl.py +6 -0
  335. mindspore/ops/composite/multitype_ops/setitem_impl.py +32 -21
  336. mindspore/ops/composite/multitype_ops/sub_impl.py +6 -0
  337. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +6 -3
  338. mindspore/ops/deprecated.py +14 -3
  339. mindspore/ops/function/__init__.py +53 -11
  340. mindspore/ops/function/array_func.py +1269 -1821
  341. mindspore/ops/function/clip_func.py +19 -31
  342. mindspore/ops/function/debug_func.py +114 -5
  343. mindspore/ops/function/fft_func.py +44 -0
  344. mindspore/ops/function/grad/grad_func.py +30 -22
  345. mindspore/ops/function/image_func.py +27 -21
  346. mindspore/ops/function/linalg_func.py +35 -68
  347. mindspore/ops/function/math_func.py +1170 -2697
  348. mindspore/ops/function/nn_func.py +2116 -1128
  349. mindspore/ops/function/other_func.py +8 -8
  350. mindspore/ops/function/parameter_func.py +5 -93
  351. mindspore/ops/function/random_func.py +435 -113
  352. mindspore/ops/function/reshard_func.py +104 -0
  353. mindspore/ops/function/sparse_func.py +4 -4
  354. mindspore/ops/function/sparse_unary_func.py +9 -16
  355. mindspore/ops/function/spectral_func.py +1 -1
  356. mindspore/ops/function/vmap_func.py +16 -15
  357. mindspore/ops/functional.py +355 -346
  358. mindspore/ops/op_info_register.py +18 -45
  359. mindspore/ops/operations/__init__.py +38 -24
  360. mindspore/ops/operations/_grad_ops.py +21 -927
  361. mindspore/ops/operations/_infer_ops.py +19 -0
  362. mindspore/ops/operations/_inner_ops.py +173 -607
  363. mindspore/ops/operations/_rl_inner_ops.py +2 -2
  364. mindspore/ops/operations/_scalar_ops.py +5 -480
  365. mindspore/ops/operations/_sequence_ops.py +6 -36
  366. mindspore/ops/operations/_tensor_array.py +8 -8
  367. mindspore/ops/operations/array_ops.py +106 -2837
  368. mindspore/ops/operations/comm_ops.py +799 -127
  369. mindspore/ops/operations/custom_ops.py +124 -119
  370. mindspore/ops/operations/debug_ops.py +142 -41
  371. mindspore/ops/operations/image_ops.py +1 -217
  372. mindspore/ops/operations/inner_ops.py +5 -40
  373. mindspore/ops/operations/linalg_ops.py +1 -49
  374. mindspore/ops/operations/manually_defined/__init__.py +24 -0
  375. mindspore/ops/operations/manually_defined/_inner.py +73 -0
  376. mindspore/ops/operations/manually_defined/ops_def.py +2271 -0
  377. mindspore/ops/operations/math_ops.py +666 -4972
  378. mindspore/ops/operations/nn_ops.py +205 -2213
  379. mindspore/ops/operations/other_ops.py +60 -49
  380. mindspore/ops/operations/random_ops.py +50 -54
  381. mindspore/ops/operations/reshard_ops.py +53 -0
  382. mindspore/ops/operations/sparse_ops.py +4 -4
  383. mindspore/ops/primitive.py +216 -103
  384. mindspore/ops_generate/__init__.py +27 -0
  385. mindspore/ops_generate/arg_dtype_cast.py +252 -0
  386. mindspore/ops_generate/arg_handler.py +197 -0
  387. mindspore/ops_generate/gen_aclnn_implement.py +263 -0
  388. mindspore/ops_generate/gen_constants.py +36 -0
  389. mindspore/ops_generate/gen_ops.py +1099 -0
  390. mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
  391. mindspore/ops_generate/gen_pyboost_func.py +1052 -0
  392. mindspore/ops_generate/gen_utils.py +209 -0
  393. mindspore/ops_generate/op_proto.py +145 -0
  394. mindspore/ops_generate/pyboost_utils.py +367 -0
  395. mindspore/ops_generate/template.py +261 -0
  396. mindspore/parallel/__init__.py +8 -4
  397. mindspore/parallel/_auto_parallel_context.py +100 -10
  398. mindspore/parallel/_cell_wrapper.py +99 -9
  399. mindspore/parallel/_cost_model_context.py +1 -1
  400. mindspore/parallel/_dp_allreduce_fusion.py +159 -159
  401. mindspore/parallel/_parallel_serialization.py +67 -23
  402. mindspore/parallel/_ps_context.py +1 -1
  403. mindspore/parallel/_recovery_context.py +1 -1
  404. mindspore/parallel/_tensor.py +99 -22
  405. mindspore/parallel/_transformer/__init__.py +1 -1
  406. mindspore/parallel/_transformer/layers.py +1 -1
  407. mindspore/parallel/_transformer/loss.py +1 -1
  408. mindspore/parallel/_transformer/moe.py +1 -1
  409. mindspore/parallel/_transformer/op_parallel_config.py +1 -1
  410. mindspore/parallel/_transformer/transformer.py +2 -2
  411. mindspore/parallel/_utils.py +173 -6
  412. mindspore/parallel/algo_parameter_config.py +8 -10
  413. mindspore/parallel/checkpoint_transform.py +204 -38
  414. mindspore/parallel/cluster/__init__.py +15 -0
  415. mindspore/parallel/cluster/process_entity/__init__.py +18 -0
  416. mindspore/parallel/cluster/process_entity/_api.py +352 -0
  417. mindspore/parallel/cluster/process_entity/_utils.py +101 -0
  418. mindspore/parallel/cluster/run.py +136 -0
  419. mindspore/parallel/mpi/__init__.py +1 -1
  420. mindspore/parallel/mpi/_mpi_config.py +1 -1
  421. mindspore/parallel/parameter_broadcast.py +151 -0
  422. mindspore/parallel/shard.py +279 -37
  423. mindspore/parallel/transform_safetensors.py +993 -0
  424. mindspore/pgodb140.dll +0 -0
  425. mindspore/pgort140.dll +0 -0
  426. mindspore/profiler/__init__.py +4 -2
  427. mindspore/profiler/common/constant.py +29 -0
  428. mindspore/profiler/common/process_pool.py +41 -0
  429. mindspore/profiler/common/registry.py +47 -0
  430. mindspore/profiler/common/singleton.py +28 -0
  431. mindspore/profiler/common/util.py +153 -0
  432. mindspore/profiler/dynamic_profiler.py +694 -0
  433. mindspore/profiler/envprofiling.py +18 -20
  434. mindspore/{_extends/parallel_compile/tbe_compiler → profiler/parser/ascend_analysis}/__init__.py +1 -1
  435. mindspore/profiler/parser/ascend_analysis/constant.py +71 -0
  436. mindspore/profiler/parser/ascend_analysis/file_manager.py +180 -0
  437. mindspore/profiler/parser/ascend_analysis/function_event.py +185 -0
  438. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +136 -0
  439. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +131 -0
  440. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +104 -0
  441. mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
  442. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +123 -0
  443. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
  444. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +75 -0
  445. mindspore/profiler/parser/ascend_cluster_generator.py +14 -9
  446. mindspore/profiler/parser/ascend_communicate_generator.py +0 -1
  447. mindspore/profiler/parser/ascend_flops_generator.py +20 -4
  448. mindspore/profiler/parser/ascend_hccl_generator.py +29 -278
  449. mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
  450. mindspore/profiler/parser/ascend_memory_generator.py +185 -0
  451. mindspore/profiler/parser/ascend_msprof_exporter.py +148 -146
  452. mindspore/profiler/parser/ascend_msprof_generator.py +73 -283
  453. mindspore/profiler/parser/ascend_op_generator.py +92 -42
  454. mindspore/profiler/parser/ascend_timeline_generator.py +298 -133
  455. mindspore/profiler/parser/base_timeline_generator.py +25 -25
  456. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +25 -12
  457. mindspore/profiler/parser/framework_parser.py +4 -393
  458. mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
  459. mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
  460. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
  461. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
  462. mindspore/profiler/parser/integrator.py +3 -1
  463. mindspore/profiler/parser/memory_usage_parser.py +0 -154
  464. mindspore/profiler/parser/minddata_parser.py +72 -3
  465. mindspore/profiler/parser/profiler_info.py +94 -7
  466. mindspore/profiler/profiler.py +153 -0
  467. mindspore/profiler/profiling.py +631 -508
  468. mindspore/rewrite/__init__.py +2 -14
  469. mindspore/rewrite/api/node.py +122 -36
  470. mindspore/rewrite/api/pattern_engine.py +2 -3
  471. mindspore/rewrite/api/scoped_value.py +16 -15
  472. mindspore/rewrite/api/symbol_tree.py +45 -29
  473. mindspore/rewrite/ast_helpers/__init__.py +3 -6
  474. mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
  475. mindspore/rewrite/ast_helpers/ast_finder.py +48 -0
  476. mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
  477. mindspore/rewrite/ast_helpers/ast_modifier.py +160 -92
  478. mindspore/rewrite/common/__init__.py +1 -2
  479. mindspore/rewrite/common/config.py +24 -0
  480. mindspore/rewrite/common/{rewrite_elog.py → error_log.py} +39 -39
  481. mindspore/rewrite/{namer.py → common/namer.py} +63 -18
  482. mindspore/rewrite/common/namespace.py +118 -0
  483. mindspore/rewrite/node/__init__.py +5 -5
  484. mindspore/rewrite/node/call_function.py +23 -7
  485. mindspore/rewrite/node/cell_container.py +7 -3
  486. mindspore/rewrite/node/control_flow.py +53 -28
  487. mindspore/rewrite/node/node.py +212 -196
  488. mindspore/rewrite/node/node_manager.py +51 -22
  489. mindspore/rewrite/node/node_topological_manager.py +3 -23
  490. mindspore/rewrite/parsers/__init__.py +12 -0
  491. mindspore/rewrite/parsers/arguments_parser.py +8 -9
  492. mindspore/rewrite/parsers/assign_parser.py +637 -413
  493. mindspore/rewrite/parsers/attribute_parser.py +3 -4
  494. mindspore/rewrite/parsers/class_def_parser.py +115 -148
  495. mindspore/rewrite/parsers/constant_parser.py +5 -5
  496. mindspore/rewrite/parsers/container_parser.py +4 -6
  497. mindspore/rewrite/parsers/expr_parser.py +55 -0
  498. mindspore/rewrite/parsers/for_parser.py +31 -98
  499. mindspore/rewrite/parsers/function_def_parser.py +13 -5
  500. mindspore/rewrite/parsers/if_parser.py +28 -10
  501. mindspore/rewrite/parsers/module_parser.py +8 -182
  502. mindspore/rewrite/parsers/parser.py +1 -5
  503. mindspore/rewrite/parsers/parser_register.py +1 -1
  504. mindspore/rewrite/parsers/return_parser.py +5 -10
  505. mindspore/rewrite/parsers/while_parser.py +59 -0
  506. mindspore/rewrite/sparsify/utils.py +1 -1
  507. mindspore/rewrite/symbol_tree/__init__.py +20 -0
  508. mindspore/rewrite/{symbol_tree.py → symbol_tree/symbol_tree.py} +705 -186
  509. mindspore/rewrite/{symbol_tree_builder.py → symbol_tree/symbol_tree_builder.py} +8 -8
  510. mindspore/rewrite/{symbol_tree_dumper.py → symbol_tree/symbol_tree_dumper.py} +4 -4
  511. mindspore/run_check/_check_version.py +40 -115
  512. mindspore/run_check/run_check.py +1 -1
  513. mindspore/safeguard/rewrite_obfuscation.py +597 -263
  514. mindspore/swresample-4.dll +0 -0
  515. mindspore/swscale-6.dll +0 -0
  516. mindspore/tbbmalloc.dll +0 -0
  517. mindspore/tinyxml2.dll +0 -0
  518. mindspore/train/__init__.py +7 -5
  519. mindspore/train/_utils.py +204 -4
  520. mindspore/train/amp.py +335 -295
  521. mindspore/train/anf_ir_pb2.py +14 -2
  522. mindspore/train/callback/__init__.py +5 -2
  523. mindspore/train/callback/_backup_and_restore.py +5 -5
  524. mindspore/train/callback/_callback.py +4 -4
  525. mindspore/train/callback/_checkpoint.py +220 -43
  526. mindspore/train/callback/_cluster_monitor.py +201 -0
  527. mindspore/train/callback/_early_stop.py +2 -2
  528. mindspore/train/callback/_flops_collector.py +239 -0
  529. mindspore/train/callback/_landscape.py +15 -9
  530. mindspore/train/callback/_loss_monitor.py +5 -5
  531. mindspore/train/callback/_on_request_exit.py +136 -33
  532. mindspore/train/callback/_reduce_lr_on_plateau.py +2 -2
  533. mindspore/train/callback/_summary_collector.py +12 -12
  534. mindspore/train/callback/_tft_register.py +352 -0
  535. mindspore/train/callback/_time_monitor.py +3 -3
  536. mindspore/train/data_sink.py +6 -5
  537. mindspore/train/dataset_helper.py +66 -23
  538. mindspore/train/loss_scale_manager.py +2 -2
  539. mindspore/train/metrics/accuracy.py +7 -7
  540. mindspore/train/metrics/confusion_matrix.py +8 -6
  541. mindspore/train/metrics/cosine_similarity.py +6 -4
  542. mindspore/train/metrics/error.py +2 -2
  543. mindspore/train/metrics/metric.py +3 -3
  544. mindspore/train/metrics/perplexity.py +2 -1
  545. mindspore/train/metrics/roc.py +4 -4
  546. mindspore/train/metrics/topk.py +2 -2
  547. mindspore/train/mind_ir_pb2.py +116 -37
  548. mindspore/train/model.py +382 -76
  549. mindspore/train/serialization.py +787 -288
  550. mindspore/train/summary/_summary_adapter.py +1 -1
  551. mindspore/train/summary/summary_record.py +51 -28
  552. mindspore/train/train_thor/convert_utils.py +3 -3
  553. mindspore/turbojpeg.dll +0 -0
  554. mindspore/utils/__init__.py +21 -0
  555. mindspore/utils/utils.py +60 -0
  556. mindspore/vcmeta.dll +0 -0
  557. mindspore/vcruntime140.dll +0 -0
  558. mindspore/vcruntime140_1.dll +0 -0
  559. mindspore/version.py +1 -1
  560. {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/METADATA +8 -4
  561. mindspore-2.4.0.dist-info/RECORD +1406 -0
  562. {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/entry_points.txt +1 -0
  563. mindspore/_extends/parallel_compile/tbe_compiler/tbe_adapter.py +0 -662
  564. mindspore/_extends/parallel_compile/tbe_compiler/tbe_helper.py +0 -377
  565. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job.py +0 -201
  566. mindspore/_extends/parallel_compile/tbe_compiler/tbe_job_manager.py +0 -515
  567. mindspore/gen_ops.py +0 -273
  568. mindspore/include/c_api/ms/abstract.h +0 -67
  569. mindspore/include/c_api/ms/attribute.h +0 -197
  570. mindspore/include/c_api/ms/base/handle_types.h +0 -43
  571. mindspore/include/c_api/ms/base/macros.h +0 -32
  572. mindspore/include/c_api/ms/base/status.h +0 -33
  573. mindspore/include/c_api/ms/base/types.h +0 -282
  574. mindspore/include/c_api/ms/context.h +0 -102
  575. mindspore/include/c_api/ms/graph.h +0 -160
  576. mindspore/include/c_api/ms/node.h +0 -606
  577. mindspore/include/c_api/ms/tensor.h +0 -161
  578. mindspore/include/c_api/ms/value.h +0 -84
  579. mindspore/mindspore_shared_lib.dll +0 -0
  580. mindspore/nn/layer/flash_attention.py +0 -189
  581. mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +0 -93
  582. mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +0 -66
  583. mindspore/ops/_op_impl/cpu/concat.py +0 -39
  584. mindspore/ops/_op_impl/cpu/tensor_shape.py +0 -42
  585. mindspore/ops/_op_impl/tbe/__init__.py +0 -47
  586. mindspore/ops/_op_impl/tbe/abs.py +0 -38
  587. mindspore/ops/_op_impl/tbe/abs_ds.py +0 -39
  588. mindspore/ops/_op_impl/tbe/abs_grad.py +0 -43
  589. mindspore/ops/_op_impl/tbe/abs_grad_ds.py +0 -44
  590. mindspore/ops/_op_impl/tbe/accumulate_n_v2.py +0 -41
  591. mindspore/ops/_op_impl/tbe/accumulate_n_v2_ds.py +0 -42
  592. mindspore/ops/_op_impl/tbe/acos.py +0 -37
  593. mindspore/ops/_op_impl/tbe/acos_ds.py +0 -38
  594. mindspore/ops/_op_impl/tbe/acos_grad.py +0 -43
  595. mindspore/ops/_op_impl/tbe/acos_grad_ds.py +0 -44
  596. mindspore/ops/_op_impl/tbe/acosh.py +0 -37
  597. mindspore/ops/_op_impl/tbe/acosh_ds.py +0 -38
  598. mindspore/ops/_op_impl/tbe/acosh_grad.py +0 -43
  599. mindspore/ops/_op_impl/tbe/acosh_grad_ds.py +0 -44
  600. mindspore/ops/_op_impl/tbe/act_ulq_clamp_max_grad.py +0 -38
  601. mindspore/ops/_op_impl/tbe/act_ulq_clamp_min_grad.py +0 -38
  602. mindspore/ops/_op_impl/tbe/acts_ulq.py +0 -45
  603. mindspore/ops/_op_impl/tbe/acts_ulq_input_grad.py +0 -38
  604. mindspore/ops/_op_impl/tbe/adam_apply_one.py +0 -50
  605. mindspore/ops/_op_impl/tbe/adam_apply_one_assign.py +0 -53
  606. mindspore/ops/_op_impl/tbe/adam_apply_one_ds.py +0 -51
  607. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay.py +0 -54
  608. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_assign.py +0 -54
  609. mindspore/ops/_op_impl/tbe/adam_apply_one_with_decay_ds.py +0 -55
  610. mindspore/ops/_op_impl/tbe/adaptive_max_pool2d.py +0 -37
  611. mindspore/ops/_op_impl/tbe/add.py +0 -42
  612. mindspore/ops/_op_impl/tbe/add_ds.py +0 -43
  613. mindspore/ops/_op_impl/tbe/add_n.py +0 -39
  614. mindspore/ops/_op_impl/tbe/add_n_ds.py +0 -40
  615. mindspore/ops/_op_impl/tbe/addcdiv.py +0 -41
  616. mindspore/ops/_op_impl/tbe/addcdiv_ds.py +0 -42
  617. mindspore/ops/_op_impl/tbe/addcmul.py +0 -43
  618. mindspore/ops/_op_impl/tbe/addcmul_ds.py +0 -44
  619. mindspore/ops/_op_impl/tbe/apply_ada_max.py +0 -68
  620. mindspore/ops/_op_impl/tbe/apply_ada_max_ds.py +0 -69
  621. mindspore/ops/_op_impl/tbe/apply_adadelta.py +0 -66
  622. mindspore/ops/_op_impl/tbe/apply_adadelta_ds.py +0 -67
  623. mindspore/ops/_op_impl/tbe/apply_adagrad.py +0 -55
  624. mindspore/ops/_op_impl/tbe/apply_adagrad_d_a.py +0 -67
  625. mindspore/ops/_op_impl/tbe/apply_adagrad_ds.py +0 -56
  626. mindspore/ops/_op_impl/tbe/apply_adagrad_v2.py +0 -48
  627. mindspore/ops/_op_impl/tbe/apply_adagrad_v2_ds.py +0 -49
  628. mindspore/ops/_op_impl/tbe/apply_adam.py +0 -79
  629. mindspore/ops/_op_impl/tbe/apply_adam_ds.py +0 -80
  630. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad.py +0 -60
  631. mindspore/ops/_op_impl/tbe/apply_adam_with_amsgrad_ds.py +0 -61
  632. mindspore/ops/_op_impl/tbe/apply_add_sign.py +0 -65
  633. mindspore/ops/_op_impl/tbe/apply_add_sign_ds.py +0 -66
  634. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop.py +0 -77
  635. mindspore/ops/_op_impl/tbe/apply_centered_rms_prop_ds.py +0 -78
  636. mindspore/ops/_op_impl/tbe/apply_ftrl.py +0 -67
  637. mindspore/ops/_op_impl/tbe/apply_ftrl_ds.py +0 -68
  638. mindspore/ops/_op_impl/tbe/apply_gradient_descent.py +0 -44
  639. mindspore/ops/_op_impl/tbe/apply_gradient_descent_ds.py +0 -45
  640. mindspore/ops/_op_impl/tbe/apply_keras_momentum.py +0 -49
  641. mindspore/ops/_op_impl/tbe/apply_momentum.py +0 -64
  642. mindspore/ops/_op_impl/tbe/apply_momentum_ds.py +0 -65
  643. mindspore/ops/_op_impl/tbe/apply_power_sign.py +0 -65
  644. mindspore/ops/_op_impl/tbe/apply_power_sign_ds.py +0 -66
  645. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad.py +0 -57
  646. mindspore/ops/_op_impl/tbe/apply_proximal_adagrad_ds.py +0 -58
  647. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent.py +0 -54
  648. mindspore/ops/_op_impl/tbe/apply_proximal_gradient_descent_ds.py +0 -55
  649. mindspore/ops/_op_impl/tbe/apply_rms_prop.py +0 -52
  650. mindspore/ops/_op_impl/tbe/approximate_equal.py +0 -39
  651. mindspore/ops/_op_impl/tbe/approximate_equal_ds.py +0 -40
  652. mindspore/ops/_op_impl/tbe/arg_max.py +0 -38
  653. mindspore/ops/_op_impl/tbe/arg_max_with_value.py +0 -38
  654. mindspore/ops/_op_impl/tbe/arg_max_with_value_ds.py +0 -39
  655. mindspore/ops/_op_impl/tbe/arg_min.py +0 -38
  656. mindspore/ops/_op_impl/tbe/arg_min_v2_ds.py +0 -40
  657. mindspore/ops/_op_impl/tbe/arg_min_with_value.py +0 -38
  658. mindspore/ops/_op_impl/tbe/arg_min_with_value_ds.py +0 -39
  659. mindspore/ops/_op_impl/tbe/asin.py +0 -37
  660. mindspore/ops/_op_impl/tbe/asin_ds.py +0 -38
  661. mindspore/ops/_op_impl/tbe/asin_grad.py +0 -43
  662. mindspore/ops/_op_impl/tbe/asin_grad_ds.py +0 -44
  663. mindspore/ops/_op_impl/tbe/asinh.py +0 -37
  664. mindspore/ops/_op_impl/tbe/asinh_ds.py +0 -38
  665. mindspore/ops/_op_impl/tbe/asinh_grad.py +0 -43
  666. mindspore/ops/_op_impl/tbe/asinh_grad_ds.py +0 -44
  667. mindspore/ops/_op_impl/tbe/assign.py +0 -79
  668. mindspore/ops/_op_impl/tbe/assign_add.py +0 -59
  669. mindspore/ops/_op_impl/tbe/assign_add_ds.py +0 -60
  670. mindspore/ops/_op_impl/tbe/assign_ds.py +0 -80
  671. mindspore/ops/_op_impl/tbe/assign_sub.py +0 -55
  672. mindspore/ops/_op_impl/tbe/assign_sub_ds.py +0 -56
  673. mindspore/ops/_op_impl/tbe/atan.py +0 -37
  674. mindspore/ops/_op_impl/tbe/atan2.py +0 -38
  675. mindspore/ops/_op_impl/tbe/atan2_ds.py +0 -39
  676. mindspore/ops/_op_impl/tbe/atan_ds.py +0 -38
  677. mindspore/ops/_op_impl/tbe/atan_grad.py +0 -43
  678. mindspore/ops/_op_impl/tbe/atan_grad_ds.py +0 -44
  679. mindspore/ops/_op_impl/tbe/atanh.py +0 -37
  680. mindspore/ops/_op_impl/tbe/atanh_ds.py +0 -38
  681. mindspore/ops/_op_impl/tbe/avg_pool.py +0 -43
  682. mindspore/ops/_op_impl/tbe/avg_pool_3d.py +0 -44
  683. mindspore/ops/_op_impl/tbe/avg_pool_3d_grad.py +0 -45
  684. mindspore/ops/_op_impl/tbe/avg_pool_ds.py +0 -44
  685. mindspore/ops/_op_impl/tbe/avg_pool_grad.py +0 -42
  686. mindspore/ops/_op_impl/tbe/avg_pool_grad_vm.py +0 -42
  687. mindspore/ops/_op_impl/tbe/basic_lstm_cell.py +0 -57
  688. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad.py +0 -50
  689. mindspore/ops/_op_impl/tbe/basic_lstm_cell_c_state_grad_v2.py +0 -51
  690. mindspore/ops/_op_impl/tbe/basic_lstm_cell_input_grad.py +0 -42
  691. mindspore/ops/_op_impl/tbe/basic_lstm_cell_weight_grad.py +0 -41
  692. mindspore/ops/_op_impl/tbe/batch_matmul.py +0 -42
  693. mindspore/ops/_op_impl/tbe/batch_matmul_ds.py +0 -41
  694. mindspore/ops/_op_impl/tbe/batch_matmul_v2.py +0 -47
  695. mindspore/ops/_op_impl/tbe/batch_to_space.py +0 -38
  696. mindspore/ops/_op_impl/tbe/batch_to_space_nd.py +0 -38
  697. mindspore/ops/_op_impl/tbe/batch_to_space_nd_ds.py +0 -39
  698. mindspore/ops/_op_impl/tbe/batch_to_space_nd_v2.py +0 -41
  699. mindspore/ops/_op_impl/tbe/batchnorm.py +0 -58
  700. mindspore/ops/_op_impl/tbe/batchnorm_grad.py +0 -58
  701. mindspore/ops/_op_impl/tbe/bce_with_logits_loss.py +0 -42
  702. mindspore/ops/_op_impl/tbe/bessel_i0e.py +0 -37
  703. mindspore/ops/_op_impl/tbe/bessel_i0e_ds.py +0 -38
  704. mindspore/ops/_op_impl/tbe/bessel_i1e.py +0 -37
  705. mindspore/ops/_op_impl/tbe/bessel_i1e_ds.py +0 -38
  706. mindspore/ops/_op_impl/tbe/bias_add.py +0 -38
  707. mindspore/ops/_op_impl/tbe/bias_add_ds.py +0 -39
  708. mindspore/ops/_op_impl/tbe/bias_add_grad.py +0 -53
  709. mindspore/ops/_op_impl/tbe/binary_cross_entropy.py +0 -39
  710. mindspore/ops/_op_impl/tbe/binary_cross_entropy_ds.py +0 -40
  711. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad.py +0 -44
  712. mindspore/ops/_op_impl/tbe/binary_cross_entropy_grad_ds.py +0 -45
  713. mindspore/ops/_op_impl/tbe/bitwise_and.py +0 -39
  714. mindspore/ops/_op_impl/tbe/bitwise_and_ds.py +0 -40
  715. mindspore/ops/_op_impl/tbe/bitwise_or.py +0 -39
  716. mindspore/ops/_op_impl/tbe/bitwise_or_ds.py +0 -40
  717. mindspore/ops/_op_impl/tbe/bitwise_xor.py +0 -39
  718. mindspore/ops/_op_impl/tbe/bitwise_xor_ds.py +0 -40
  719. mindspore/ops/_op_impl/tbe/bn_infer.py +0 -43
  720. mindspore/ops/_op_impl/tbe/bn_infer_ds.py +0 -45
  721. mindspore/ops/_op_impl/tbe/bn_infer_grad.py +0 -41
  722. mindspore/ops/_op_impl/tbe/bn_infer_grad_ds.py +0 -40
  723. mindspore/ops/_op_impl/tbe/bn_inference.py +0 -50
  724. mindspore/ops/_op_impl/tbe/bn_training_reduce.py +0 -38
  725. mindspore/ops/_op_impl/tbe/bn_training_reduce_ds.py +0 -39
  726. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad.py +0 -46
  727. mindspore/ops/_op_impl/tbe/bn_training_reduce_grad_ds.py +0 -47
  728. mindspore/ops/_op_impl/tbe/bn_training_update.py +0 -52
  729. mindspore/ops/_op_impl/tbe/bn_training_update_ds.py +0 -53
  730. mindspore/ops/_op_impl/tbe/bn_training_update_grad.py +0 -44
  731. mindspore/ops/_op_impl/tbe/bn_training_update_grad_ds.py +0 -45
  732. mindspore/ops/_op_impl/tbe/bn_training_update_v2.py +0 -48
  733. mindspore/ops/_op_impl/tbe/bn_training_update_v3.py +0 -51
  734. mindspore/ops/_op_impl/tbe/bounding_box_decode.py +0 -41
  735. mindspore/ops/_op_impl/tbe/bounding_box_decode_ds.py +0 -42
  736. mindspore/ops/_op_impl/tbe/bounding_box_encode.py +0 -38
  737. mindspore/ops/_op_impl/tbe/broadcast_to.py +0 -40
  738. mindspore/ops/_op_impl/tbe/broadcast_to_ds.py +0 -44
  739. mindspore/ops/_op_impl/tbe/cast.py +0 -55
  740. mindspore/ops/_op_impl/tbe/cast_ds.py +0 -58
  741. mindspore/ops/_op_impl/tbe/cdist.py +0 -38
  742. mindspore/ops/_op_impl/tbe/cdist_grad.py +0 -42
  743. mindspore/ops/_op_impl/tbe/ceil.py +0 -37
  744. mindspore/ops/_op_impl/tbe/ceil_ds.py +0 -38
  745. mindspore/ops/_op_impl/tbe/celu.py +0 -39
  746. mindspore/ops/_op_impl/tbe/centralization.py +0 -39
  747. mindspore/ops/_op_impl/tbe/check_valid.py +0 -38
  748. mindspore/ops/_op_impl/tbe/check_valid_ds.py +0 -39
  749. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum.py +0 -41
  750. mindspore/ops/_op_impl/tbe/clip_by_norm_no_div_sum_ds.py +0 -42
  751. mindspore/ops/_op_impl/tbe/clip_by_value.py +0 -41
  752. mindspore/ops/_op_impl/tbe/clip_by_value_ds.py +0 -42
  753. mindspore/ops/_op_impl/tbe/concat.py +0 -40
  754. mindspore/ops/_op_impl/tbe/concat_ds.py +0 -38
  755. mindspore/ops/_op_impl/tbe/confusion_matrix.py +0 -63
  756. mindspore/ops/_op_impl/tbe/confusion_mul_grad.py +0 -40
  757. mindspore/ops/_op_impl/tbe/confusion_softmax_grad.py +0 -41
  758. mindspore/ops/_op_impl/tbe/confusion_transpose_d.py +0 -39
  759. mindspore/ops/_op_impl/tbe/conv2d.py +0 -47
  760. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter.py +0 -42
  761. mindspore/ops/_op_impl/tbe/conv2d_backprop_filter_ds.py +0 -43
  762. mindspore/ops/_op_impl/tbe/conv2d_backprop_input.py +0 -42
  763. mindspore/ops/_op_impl/tbe/conv2d_backprop_input_ds.py +0 -44
  764. mindspore/ops/_op_impl/tbe/conv2d_ds.py +0 -47
  765. mindspore/ops/_op_impl/tbe/conv2d_transpose.py +0 -48
  766. mindspore/ops/_op_impl/tbe/conv3d.py +0 -45
  767. mindspore/ops/_op_impl/tbe/conv3d_backprop_filter.py +0 -42
  768. mindspore/ops/_op_impl/tbe/conv3d_backprop_input.py +0 -42
  769. mindspore/ops/_op_impl/tbe/conv3d_transpose.py +0 -47
  770. mindspore/ops/_op_impl/tbe/conv3d_transpose_ds.py +0 -48
  771. mindspore/ops/_op_impl/tbe/cos.py +0 -37
  772. mindspore/ops/_op_impl/tbe/cos_ds.py +0 -38
  773. mindspore/ops/_op_impl/tbe/cosh.py +0 -37
  774. mindspore/ops/_op_impl/tbe/cosh_ds.py +0 -38
  775. mindspore/ops/_op_impl/tbe/ctc_loss_v2.py +0 -42
  776. mindspore/ops/_op_impl/tbe/ctc_loss_v2_grad.py +0 -44
  777. mindspore/ops/_op_impl/tbe/cum_sum.py +0 -42
  778. mindspore/ops/_op_impl/tbe/cum_sum_ds.py +0 -44
  779. mindspore/ops/_op_impl/tbe/cummin.py +0 -41
  780. mindspore/ops/_op_impl/tbe/cumprod.py +0 -42
  781. mindspore/ops/_op_impl/tbe/data_format_dim_map.py +0 -38
  782. mindspore/ops/_op_impl/tbe/data_format_dim_map_ds.py +0 -40
  783. mindspore/ops/_op_impl/tbe/deformable_offsets.py +0 -45
  784. mindspore/ops/_op_impl/tbe/deformable_offsets_grad.py +0 -48
  785. mindspore/ops/_op_impl/tbe/depth_to_space_ds.py +0 -49
  786. mindspore/ops/_op_impl/tbe/depthwise_conv2d.py +0 -44
  787. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_filter.py +0 -41
  788. mindspore/ops/_op_impl/tbe/depthwise_conv2d_backprop_input.py +0 -41
  789. mindspore/ops/_op_impl/tbe/diag.py +0 -38
  790. mindspore/ops/_op_impl/tbe/diag_part.py +0 -38
  791. mindspore/ops/_op_impl/tbe/dilation.py +0 -40
  792. mindspore/ops/_op_impl/tbe/div.py +0 -41
  793. mindspore/ops/_op_impl/tbe/div_ds.py +0 -42
  794. mindspore/ops/_op_impl/tbe/div_no_nan.py +0 -41
  795. mindspore/ops/_op_impl/tbe/div_no_nan_ds.py +0 -42
  796. mindspore/ops/_op_impl/tbe/dropout_do_mask.py +0 -38
  797. mindspore/ops/_op_impl/tbe/dropout_do_mask_ds.py +0 -39
  798. mindspore/ops/_op_impl/tbe/dropout_do_mask_v3.py +0 -39
  799. mindspore/ops/_op_impl/tbe/dynamic_atomic_addr_clean.py +0 -34
  800. mindspore/ops/_op_impl/tbe/dynamic_gru_v2.py +0 -95
  801. mindspore/ops/_op_impl/tbe/dynamic_rnn.py +0 -82
  802. mindspore/ops/_op_impl/tbe/elu.py +0 -38
  803. mindspore/ops/_op_impl/tbe/elu_ds.py +0 -39
  804. mindspore/ops/_op_impl/tbe/elu_grad.py +0 -43
  805. mindspore/ops/_op_impl/tbe/elu_grad_ds.py +0 -44
  806. mindspore/ops/_op_impl/tbe/equal.py +0 -42
  807. mindspore/ops/_op_impl/tbe/equal_ds.py +0 -42
  808. mindspore/ops/_op_impl/tbe/erf.py +0 -37
  809. mindspore/ops/_op_impl/tbe/erf_ds.py +0 -38
  810. mindspore/ops/_op_impl/tbe/erfc.py +0 -37
  811. mindspore/ops/_op_impl/tbe/erfc_ds.py +0 -38
  812. mindspore/ops/_op_impl/tbe/erfinv.py +0 -36
  813. mindspore/ops/_op_impl/tbe/exp.py +0 -40
  814. mindspore/ops/_op_impl/tbe/exp_ds.py +0 -41
  815. mindspore/ops/_op_impl/tbe/expand_dims.py +0 -38
  816. mindspore/ops/_op_impl/tbe/expm1.py +0 -37
  817. mindspore/ops/_op_impl/tbe/expm1_ds.py +0 -38
  818. mindspore/ops/_op_impl/tbe/extract_image_patches.py +0 -41
  819. mindspore/ops/_op_impl/tbe/extract_volume_patches.py +0 -39
  820. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars.py +0 -39
  821. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_gradient.py +0 -43
  822. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel.py +0 -39
  823. mindspore/ops/_op_impl/tbe/fake_quant_with_min_max_vars_per_channel_gradient.py +0 -43
  824. mindspore/ops/_op_impl/tbe/fast_gelu.py +0 -37
  825. mindspore/ops/_op_impl/tbe/fast_gelu_ds.py +0 -38
  826. mindspore/ops/_op_impl/tbe/fast_gelu_grad.py +0 -41
  827. mindspore/ops/_op_impl/tbe/fast_gelu_grad_ds.py +0 -42
  828. mindspore/ops/_op_impl/tbe/fill.py +0 -56
  829. mindspore/ops/_op_impl/tbe/fill_ds.py +0 -42
  830. mindspore/ops/_op_impl/tbe/flatten.py +0 -48
  831. mindspore/ops/_op_impl/tbe/floor.py +0 -37
  832. mindspore/ops/_op_impl/tbe/floor_div.py +0 -41
  833. mindspore/ops/_op_impl/tbe/floor_div_ds.py +0 -42
  834. mindspore/ops/_op_impl/tbe/floor_ds.py +0 -38
  835. mindspore/ops/_op_impl/tbe/floor_mod.py +0 -39
  836. mindspore/ops/_op_impl/tbe/floor_mod_ds.py +0 -40
  837. mindspore/ops/_op_impl/tbe/fused_dbn_dw.py +0 -52
  838. mindspore/ops/_op_impl/tbe/fused_mul_add.py +0 -38
  839. mindspore/ops/_op_impl/tbe/fused_mul_add_n.py +0 -48
  840. mindspore/ops/_op_impl/tbe/fused_mul_add_n_l2loss.py +0 -53
  841. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum.py +0 -57
  842. mindspore/ops/_op_impl/tbe/fused_mul_apply_momentum_extern.py +0 -67
  843. mindspore/ops/_op_impl/tbe/gather_nd.py +0 -52
  844. mindspore/ops/_op_impl/tbe/gather_nd_ds.py +0 -48
  845. mindspore/ops/_op_impl/tbe/gather_v2.py +0 -56
  846. mindspore/ops/_op_impl/tbe/gather_v2_ds.py +0 -68
  847. mindspore/ops/_op_impl/tbe/gelu.py +0 -37
  848. mindspore/ops/_op_impl/tbe/gelu_ds.py +0 -38
  849. mindspore/ops/_op_impl/tbe/gelu_grad.py +0 -42
  850. mindspore/ops/_op_impl/tbe/gelu_grad_ds.py +0 -43
  851. mindspore/ops/_op_impl/tbe/ger.py +0 -43
  852. mindspore/ops/_op_impl/tbe/ger_ds.py +0 -44
  853. mindspore/ops/_op_impl/tbe/greater.py +0 -43
  854. mindspore/ops/_op_impl/tbe/greater_equal.py +0 -41
  855. mindspore/ops/_op_impl/tbe/greater_equal_ds.py +0 -42
  856. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad.py +0 -51
  857. mindspore/ops/_op_impl/tbe/gru_v2_hidden_grad_cell.py +0 -52
  858. mindspore/ops/_op_impl/tbe/hard_swish.py +0 -37
  859. mindspore/ops/_op_impl/tbe/hard_swish_ds.py +0 -38
  860. mindspore/ops/_op_impl/tbe/hard_swish_grad.py +0 -41
  861. mindspore/ops/_op_impl/tbe/hard_swish_grad_ds.py +0 -42
  862. mindspore/ops/_op_impl/tbe/histogram_fixed_width.py +0 -40
  863. mindspore/ops/_op_impl/tbe/hshrink.py +0 -33
  864. mindspore/ops/_op_impl/tbe/hshrink_grad.py +0 -37
  865. mindspore/ops/_op_impl/tbe/hsigmoid.py +0 -45
  866. mindspore/ops/_op_impl/tbe/hsigmoid_grad.py +0 -39
  867. mindspore/ops/_op_impl/tbe/ifmr.py +0 -47
  868. mindspore/ops/_op_impl/tbe/ifmr_ds.py +0 -48
  869. mindspore/ops/_op_impl/tbe/im2col.py +0 -42
  870. mindspore/ops/_op_impl/tbe/in_top_k.py +0 -37
  871. mindspore/ops/_op_impl/tbe/inplace_add.py +0 -39
  872. mindspore/ops/_op_impl/tbe/inplace_index_add.py +0 -46
  873. mindspore/ops/_op_impl/tbe/inplace_sub.py +0 -39
  874. mindspore/ops/_op_impl/tbe/inplace_update.py +0 -39
  875. mindspore/ops/_op_impl/tbe/inplace_update_ds.py +0 -40
  876. mindspore/ops/_op_impl/tbe/inv.py +0 -38
  877. mindspore/ops/_op_impl/tbe/inv_ds.py +0 -39
  878. mindspore/ops/_op_impl/tbe/inv_grad.py +0 -40
  879. mindspore/ops/_op_impl/tbe/inv_grad_ds.py +0 -41
  880. mindspore/ops/_op_impl/tbe/invert.py +0 -37
  881. mindspore/ops/_op_impl/tbe/invert_ds.py +0 -38
  882. mindspore/ops/_op_impl/tbe/iou.py +0 -38
  883. mindspore/ops/_op_impl/tbe/iou_ds.py +0 -39
  884. mindspore/ops/_op_impl/tbe/is_close.py +0 -40
  885. mindspore/ops/_op_impl/tbe/kl_div_loss.py +0 -38
  886. mindspore/ops/_op_impl/tbe/kl_div_loss_ds.py +0 -39
  887. mindspore/ops/_op_impl/tbe/kl_div_loss_grad.py +0 -40
  888. mindspore/ops/_op_impl/tbe/l2_loss.py +0 -36
  889. mindspore/ops/_op_impl/tbe/l2_loss_ds.py +0 -37
  890. mindspore/ops/_op_impl/tbe/l2_normalize.py +0 -38
  891. mindspore/ops/_op_impl/tbe/l2_normalize_grad.py +0 -40
  892. mindspore/ops/_op_impl/tbe/lamb_apply_optimizer_assign.py +0 -55
  893. mindspore/ops/_op_impl/tbe/lamb_apply_weight_assign.py +0 -42
  894. mindspore/ops/_op_impl/tbe/lamb_next_mv.py +0 -59
  895. mindspore/ops/_op_impl/tbe/lamb_next_mv_with_decay.py +0 -59
  896. mindspore/ops/_op_impl/tbe/lamb_next_right.py +0 -44
  897. mindspore/ops/_op_impl/tbe/lamb_update_with_lr.py +0 -48
  898. mindspore/ops/_op_impl/tbe/lamb_update_with_lr_v2.py +0 -44
  899. mindspore/ops/_op_impl/tbe/lars_update.py +0 -50
  900. mindspore/ops/_op_impl/tbe/lars_update_ds.py +0 -51
  901. mindspore/ops/_op_impl/tbe/layer_norm.py +0 -46
  902. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop.py +0 -44
  903. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_ds.py +0 -45
  904. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2.py +0 -40
  905. mindspore/ops/_op_impl/tbe/layer_norm_beta_gamma_backprop_v2_ds.py +0 -41
  906. mindspore/ops/_op_impl/tbe/layer_norm_ds.py +0 -47
  907. mindspore/ops/_op_impl/tbe/layer_norm_grad.py +0 -48
  908. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop.py +0 -43
  909. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_ds.py +0 -44
  910. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2.py +0 -45
  911. mindspore/ops/_op_impl/tbe/layer_norm_x_backprop_v2_ds.py +0 -45
  912. mindspore/ops/_op_impl/tbe/lerp.py +0 -38
  913. mindspore/ops/_op_impl/tbe/less.py +0 -41
  914. mindspore/ops/_op_impl/tbe/less_ds.py +0 -42
  915. mindspore/ops/_op_impl/tbe/less_equal.py +0 -41
  916. mindspore/ops/_op_impl/tbe/less_equal_ds.py +0 -42
  917. mindspore/ops/_op_impl/tbe/log.py +0 -40
  918. mindspore/ops/_op_impl/tbe/log1p.py +0 -37
  919. mindspore/ops/_op_impl/tbe/log1p_ds.py +0 -38
  920. mindspore/ops/_op_impl/tbe/log_ds.py +0 -41
  921. mindspore/ops/_op_impl/tbe/logical_and.py +0 -37
  922. mindspore/ops/_op_impl/tbe/logical_and_ds.py +0 -38
  923. mindspore/ops/_op_impl/tbe/logical_not.py +0 -36
  924. mindspore/ops/_op_impl/tbe/logical_not_ds.py +0 -37
  925. mindspore/ops/_op_impl/tbe/logical_or.py +0 -37
  926. mindspore/ops/_op_impl/tbe/logical_or_ds.py +0 -38
  927. mindspore/ops/_op_impl/tbe/logsoftmax.py +0 -37
  928. mindspore/ops/_op_impl/tbe/logsoftmax_ds.py +0 -38
  929. mindspore/ops/_op_impl/tbe/logsoftmax_grad.py +0 -38
  930. mindspore/ops/_op_impl/tbe/logsoftmax_grad_ds.py +0 -39
  931. mindspore/ops/_op_impl/tbe/lp_norm.py +0 -40
  932. mindspore/ops/_op_impl/tbe/lp_norm_ds.py +0 -41
  933. mindspore/ops/_op_impl/tbe/lrn.py +0 -41
  934. mindspore/ops/_op_impl/tbe/lrn_grad.py +0 -42
  935. mindspore/ops/_op_impl/tbe/lstm_input_grad.py +0 -51
  936. mindspore/ops/_op_impl/tbe/masked_fill.py +0 -40
  937. mindspore/ops/_op_impl/tbe/masked_fill_ds.py +0 -41
  938. mindspore/ops/_op_impl/tbe/matmul.py +0 -53
  939. mindspore/ops/_op_impl/tbe/matmul_ds.py +0 -47
  940. mindspore/ops/_op_impl/tbe/matmul_v2.py +0 -50
  941. mindspore/ops/_op_impl/tbe/matrix_diag.py +0 -45
  942. mindspore/ops/_op_impl/tbe/matrix_diag_part.py +0 -45
  943. mindspore/ops/_op_impl/tbe/matrix_set_diag.py +0 -46
  944. mindspore/ops/_op_impl/tbe/max_pool.py +0 -39
  945. mindspore/ops/_op_impl/tbe/max_pool3d.py +0 -44
  946. mindspore/ops/_op_impl/tbe/max_pool3d_grad.py +0 -43
  947. mindspore/ops/_op_impl/tbe/max_pool3d_grad_grad.py +0 -44
  948. mindspore/ops/_op_impl/tbe/max_pool_ds.py +0 -40
  949. mindspore/ops/_op_impl/tbe/max_pool_grad.py +0 -43
  950. mindspore/ops/_op_impl/tbe/max_pool_grad_grad.py +0 -41
  951. mindspore/ops/_op_impl/tbe/max_pool_grad_grad_with_argmax.py +0 -41
  952. mindspore/ops/_op_impl/tbe/max_pool_grad_with_argmax.py +0 -42
  953. mindspore/ops/_op_impl/tbe/max_pool_with_argmax.py +0 -40
  954. mindspore/ops/_op_impl/tbe/maximum.py +0 -39
  955. mindspore/ops/_op_impl/tbe/maximum_ds.py +0 -40
  956. mindspore/ops/_op_impl/tbe/maximum_grad.py +0 -46
  957. mindspore/ops/_op_impl/tbe/maximum_grad_ds.py +0 -47
  958. mindspore/ops/_op_impl/tbe/mem_set.py +0 -38
  959. mindspore/ops/_op_impl/tbe/minimum.py +0 -40
  960. mindspore/ops/_op_impl/tbe/minimum_ds.py +0 -41
  961. mindspore/ops/_op_impl/tbe/minimum_grad.py +0 -46
  962. mindspore/ops/_op_impl/tbe/minimum_grad_ds.py +0 -47
  963. mindspore/ops/_op_impl/tbe/mish.py +0 -37
  964. mindspore/ops/_op_impl/tbe/mod.py +0 -41
  965. mindspore/ops/_op_impl/tbe/mod_ds.py +0 -42
  966. mindspore/ops/_op_impl/tbe/mul.py +0 -37
  967. mindspore/ops/_op_impl/tbe/mul_ds.py +0 -38
  968. mindspore/ops/_op_impl/tbe/mul_no_nan.py +0 -39
  969. mindspore/ops/_op_impl/tbe/mul_no_nan_ds.py +0 -40
  970. mindspore/ops/_op_impl/tbe/multilabel_margin_loss.py +0 -39
  971. mindspore/ops/_op_impl/tbe/neg.py +0 -39
  972. mindspore/ops/_op_impl/tbe/neg_ds.py +0 -40
  973. mindspore/ops/_op_impl/tbe/new_im2col.py +0 -40
  974. mindspore/ops/_op_impl/tbe/nll_loss.py +0 -41
  975. mindspore/ops/_op_impl/tbe/nll_loss_grad.py +0 -44
  976. mindspore/ops/_op_impl/tbe/nms_with_mask.py +0 -39
  977. mindspore/ops/_op_impl/tbe/not_equal.py +0 -41
  978. mindspore/ops/_op_impl/tbe/not_equal_ds.py +0 -42
  979. mindspore/ops/_op_impl/tbe/npu_alloc_float_status.py +0 -34
  980. mindspore/ops/_op_impl/tbe/npu_clear_float_status.py +0 -35
  981. mindspore/ops/_op_impl/tbe/npu_clear_float_status_v2.py +0 -35
  982. mindspore/ops/_op_impl/tbe/npu_get_float_status.py +0 -35
  983. mindspore/ops/_op_impl/tbe/npu_get_float_status_v2.py +0 -35
  984. mindspore/ops/_op_impl/tbe/one_hot.py +0 -48
  985. mindspore/ops/_op_impl/tbe/one_hot_ds.py +0 -45
  986. mindspore/ops/_op_impl/tbe/ones_like.py +0 -40
  987. mindspore/ops/_op_impl/tbe/ones_like_ds.py +0 -41
  988. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling.py +0 -40
  989. mindspore/ops/_op_impl/tbe/p_s_r_o_i_pooling_grad.py +0 -40
  990. mindspore/ops/_op_impl/tbe/pack.py +0 -58
  991. mindspore/ops/_op_impl/tbe/pack_ds.py +0 -59
  992. mindspore/ops/_op_impl/tbe/pad_d.py +0 -40
  993. mindspore/ops/_op_impl/tbe/pad_d_ds.py +0 -41
  994. mindspore/ops/_op_impl/tbe/parallel_concat.py +0 -70
  995. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear.py +0 -45
  996. mindspore/ops/_op_impl/tbe/parallel_resize_bilinear_grad.py +0 -44
  997. mindspore/ops/_op_impl/tbe/pdist.py +0 -36
  998. mindspore/ops/_op_impl/tbe/pooling.py +0 -46
  999. mindspore/ops/_op_impl/tbe/population_count.py +0 -38
  1000. mindspore/ops/_op_impl/tbe/pow.py +0 -41
  1001. mindspore/ops/_op_impl/tbe/pow_ds.py +0 -42
  1002. mindspore/ops/_op_impl/tbe/prelu.py +0 -37
  1003. mindspore/ops/_op_impl/tbe/prelu_ds.py +0 -38
  1004. mindspore/ops/_op_impl/tbe/prelu_grad.py +0 -40
  1005. mindspore/ops/_op_impl/tbe/range.py +0 -39
  1006. mindspore/ops/_op_impl/tbe/real_div.py +0 -38
  1007. mindspore/ops/_op_impl/tbe/real_div_ds.py +0 -39
  1008. mindspore/ops/_op_impl/tbe/reciprocal.py +0 -36
  1009. mindspore/ops/_op_impl/tbe/reciprocal_ds.py +0 -37
  1010. mindspore/ops/_op_impl/tbe/reciprocal_grad.py +0 -38
  1011. mindspore/ops/_op_impl/tbe/reciprocal_grad_ds.py +0 -39
  1012. mindspore/ops/_op_impl/tbe/reduce_all.py +0 -38
  1013. mindspore/ops/_op_impl/tbe/reduce_all_ds.py +0 -39
  1014. mindspore/ops/_op_impl/tbe/reduce_any.py +0 -38
  1015. mindspore/ops/_op_impl/tbe/reduce_any_ds.py +0 -39
  1016. mindspore/ops/_op_impl/tbe/reduce_max.py +0 -43
  1017. mindspore/ops/_op_impl/tbe/reduce_max_ds.py +0 -41
  1018. mindspore/ops/_op_impl/tbe/reduce_mean.py +0 -40
  1019. mindspore/ops/_op_impl/tbe/reduce_mean_ds.py +0 -42
  1020. mindspore/ops/_op_impl/tbe/reduce_min.py +0 -41
  1021. mindspore/ops/_op_impl/tbe/reduce_min_ds.py +0 -41
  1022. mindspore/ops/_op_impl/tbe/reduce_prod.py +0 -42
  1023. mindspore/ops/_op_impl/tbe/reduce_prod_ds.py +0 -41
  1024. mindspore/ops/_op_impl/tbe/reduce_std.py +0 -44
  1025. mindspore/ops/_op_impl/tbe/reduce_sum.py +0 -39
  1026. mindspore/ops/_op_impl/tbe/reduce_sum_ds.py +0 -41
  1027. mindspore/ops/_op_impl/tbe/relu.py +0 -39
  1028. mindspore/ops/_op_impl/tbe/relu6.py +0 -38
  1029. mindspore/ops/_op_impl/tbe/relu6_ds.py +0 -39
  1030. mindspore/ops/_op_impl/tbe/relu6_grad.py +0 -43
  1031. mindspore/ops/_op_impl/tbe/relu6_grad_ds.py +0 -44
  1032. mindspore/ops/_op_impl/tbe/relu_ds.py +0 -40
  1033. mindspore/ops/_op_impl/tbe/relu_grad.py +0 -41
  1034. mindspore/ops/_op_impl/tbe/relu_grad_ds.py +0 -42
  1035. mindspore/ops/_op_impl/tbe/relu_grad_v2.py +0 -40
  1036. mindspore/ops/_op_impl/tbe/relu_grad_v2_ds.py +0 -41
  1037. mindspore/ops/_op_impl/tbe/relu_v2.py +0 -40
  1038. mindspore/ops/_op_impl/tbe/relu_v2_ds.py +0 -41
  1039. mindspore/ops/_op_impl/tbe/renorm.py +0 -39
  1040. mindspore/ops/_op_impl/tbe/resize_bilinear.py +0 -40
  1041. mindspore/ops/_op_impl/tbe/resize_bilinear_grad.py +0 -41
  1042. mindspore/ops/_op_impl/tbe/resize_bilinear_v2.py +0 -43
  1043. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor.py +0 -40
  1044. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_ds.py +0 -40
  1045. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad.py +0 -39
  1046. mindspore/ops/_op_impl/tbe/resize_nearest_neighbor_grad_ds.py +0 -42
  1047. mindspore/ops/_op_impl/tbe/reverse_v2_d.py +0 -37
  1048. mindspore/ops/_op_impl/tbe/rint.py +0 -37
  1049. mindspore/ops/_op_impl/tbe/rint_ds.py +0 -38
  1050. mindspore/ops/_op_impl/tbe/roi_align.py +0 -43
  1051. mindspore/ops/_op_impl/tbe/roi_align_ds.py +0 -44
  1052. mindspore/ops/_op_impl/tbe/roi_align_grad.py +0 -43
  1053. mindspore/ops/_op_impl/tbe/roi_align_grad_ds.py +0 -44
  1054. mindspore/ops/_op_impl/tbe/roll.py +0 -42
  1055. mindspore/ops/_op_impl/tbe/round.py +0 -38
  1056. mindspore/ops/_op_impl/tbe/round_ds.py +0 -39
  1057. mindspore/ops/_op_impl/tbe/rsqrt.py +0 -37
  1058. mindspore/ops/_op_impl/tbe/rsqrt_ds.py +0 -38
  1059. mindspore/ops/_op_impl/tbe/rsqrt_grad.py +0 -40
  1060. mindspore/ops/_op_impl/tbe/rsqrt_grad_ds.py +0 -41
  1061. mindspore/ops/_op_impl/tbe/scatter_add.py +0 -44
  1062. mindspore/ops/_op_impl/tbe/scatter_div.py +0 -46
  1063. mindspore/ops/_op_impl/tbe/scatter_max.py +0 -45
  1064. mindspore/ops/_op_impl/tbe/scatter_min.py +0 -45
  1065. mindspore/ops/_op_impl/tbe/scatter_mul.py +0 -44
  1066. mindspore/ops/_op_impl/tbe/scatter_nd.py +0 -41
  1067. mindspore/ops/_op_impl/tbe/scatter_nd_add.py +0 -45
  1068. mindspore/ops/_op_impl/tbe/scatter_nd_d.py +0 -41
  1069. mindspore/ops/_op_impl/tbe/scatter_nd_ds.py +0 -49
  1070. mindspore/ops/_op_impl/tbe/scatter_nd_sub.py +0 -47
  1071. mindspore/ops/_op_impl/tbe/scatter_nd_sub_ds.py +0 -48
  1072. mindspore/ops/_op_impl/tbe/scatter_nd_update.py +0 -47
  1073. mindspore/ops/_op_impl/tbe/scatter_nd_update_ds.py +0 -48
  1074. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add.py +0 -39
  1075. mindspore/ops/_op_impl/tbe/scatter_non_aliasing_add_ds.py +0 -40
  1076. mindspore/ops/_op_impl/tbe/scatter_sub.py +0 -47
  1077. mindspore/ops/_op_impl/tbe/scatter_sub_ds.py +0 -48
  1078. mindspore/ops/_op_impl/tbe/scatter_update.py +0 -43
  1079. mindspore/ops/_op_impl/tbe/select.py +0 -38
  1080. mindspore/ops/_op_impl/tbe/select_ds.py +0 -39
  1081. mindspore/ops/_op_impl/tbe/selu.py +0 -39
  1082. mindspore/ops/_op_impl/tbe/selu_ds.py +0 -40
  1083. mindspore/ops/_op_impl/tbe/sgd.py +0 -62
  1084. mindspore/ops/_op_impl/tbe/sigmoid.py +0 -37
  1085. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits.py +0 -41
  1086. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_ds.py +0 -42
  1087. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad.py +0 -42
  1088. mindspore/ops/_op_impl/tbe/sigmoid_cross_entropy_with_logits_grad_ds.py +0 -43
  1089. mindspore/ops/_op_impl/tbe/sigmoid_ds.py +0 -38
  1090. mindspore/ops/_op_impl/tbe/sigmoid_grad.py +0 -39
  1091. mindspore/ops/_op_impl/tbe/sigmoid_grad_ds.py +0 -40
  1092. mindspore/ops/_op_impl/tbe/sign.py +0 -38
  1093. mindspore/ops/_op_impl/tbe/sign_ds.py +0 -39
  1094. mindspore/ops/_op_impl/tbe/sin.py +0 -37
  1095. mindspore/ops/_op_impl/tbe/sin_ds.py +0 -38
  1096. mindspore/ops/_op_impl/tbe/sinh.py +0 -37
  1097. mindspore/ops/_op_impl/tbe/sinh_ds.py +0 -38
  1098. mindspore/ops/_op_impl/tbe/slice.py +0 -58
  1099. mindspore/ops/_op_impl/tbe/smooth_l1_loss.py +0 -45
  1100. mindspore/ops/_op_impl/tbe/smooth_l1_loss_ds.py +0 -46
  1101. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad.py +0 -46
  1102. mindspore/ops/_op_impl/tbe/smooth_l1_loss_grad_ds.py +0 -47
  1103. mindspore/ops/_op_impl/tbe/soft_margin_loss.py +0 -38
  1104. mindspore/ops/_op_impl/tbe/soft_margin_loss_grad.py +0 -39
  1105. mindspore/ops/_op_impl/tbe/soft_shrink.py +0 -36
  1106. mindspore/ops/_op_impl/tbe/soft_shrink_grad.py +0 -38
  1107. mindspore/ops/_op_impl/tbe/softmax.py +0 -37
  1108. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits.py +0 -38
  1109. mindspore/ops/_op_impl/tbe/softmax_cross_entropy_with_logits_ds.py +0 -39
  1110. mindspore/ops/_op_impl/tbe/softmax_ds.py +0 -38
  1111. mindspore/ops/_op_impl/tbe/softmax_grad_ext.py +0 -42
  1112. mindspore/ops/_op_impl/tbe/softmax_v2_with_dropout_do_mask_v3.py +0 -39
  1113. mindspore/ops/_op_impl/tbe/softplus.py +0 -37
  1114. mindspore/ops/_op_impl/tbe/softplus_ds.py +0 -38
  1115. mindspore/ops/_op_impl/tbe/softplus_grad.py +0 -38
  1116. mindspore/ops/_op_impl/tbe/softplus_grad_ds.py +0 -38
  1117. mindspore/ops/_op_impl/tbe/softsign.py +0 -37
  1118. mindspore/ops/_op_impl/tbe/softsign_ds.py +0 -38
  1119. mindspore/ops/_op_impl/tbe/sort.py +0 -38
  1120. mindspore/ops/_op_impl/tbe/sort_ds.py +0 -39
  1121. mindspore/ops/_op_impl/tbe/space_to_batch.py +0 -38
  1122. mindspore/ops/_op_impl/tbe/space_to_batch_nd.py +0 -38
  1123. mindspore/ops/_op_impl/tbe/space_to_depth.py +0 -47
  1124. mindspore/ops/_op_impl/tbe/sparse_apply_adadelta.py +0 -56
  1125. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad.py +0 -45
  1126. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_ds.py +0 -46
  1127. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2.py +0 -46
  1128. mindspore/ops/_op_impl/tbe/sparse_apply_adagrad_v2_ds.py +0 -47
  1129. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d.py +0 -53
  1130. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_d_ds.py +0 -50
  1131. mindspore/ops/_op_impl/tbe/sparse_apply_ftrl_v2.py +0 -50
  1132. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad.py +0 -66
  1133. mindspore/ops/_op_impl/tbe/sparse_apply_proximal_adagrad_ds.py +0 -67
  1134. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop.py +0 -57
  1135. mindspore/ops/_op_impl/tbe/sparse_apply_r_m_s_prop_ds.py +0 -58
  1136. mindspore/ops/_op_impl/tbe/sparse_gather_v2.py +0 -56
  1137. mindspore/ops/_op_impl/tbe/sparse_gather_v2_ds.py +0 -58
  1138. mindspore/ops/_op_impl/tbe/split_d.py +0 -38
  1139. mindspore/ops/_op_impl/tbe/split_d_ds.py +0 -39
  1140. mindspore/ops/_op_impl/tbe/split_v.py +0 -39
  1141. mindspore/ops/_op_impl/tbe/splitv.py +0 -39
  1142. mindspore/ops/_op_impl/tbe/sqrt.py +0 -37
  1143. mindspore/ops/_op_impl/tbe/sqrt_ds.py +0 -38
  1144. mindspore/ops/_op_impl/tbe/sqrt_grad.py +0 -43
  1145. mindspore/ops/_op_impl/tbe/sqrt_grad_ds.py +0 -44
  1146. mindspore/ops/_op_impl/tbe/square.py +0 -38
  1147. mindspore/ops/_op_impl/tbe/square_ds.py +0 -39
  1148. mindspore/ops/_op_impl/tbe/square_sum_all.py +0 -40
  1149. mindspore/ops/_op_impl/tbe/square_sum_all_ds.py +0 -41
  1150. mindspore/ops/_op_impl/tbe/square_sum_v1.py +0 -38
  1151. mindspore/ops/_op_impl/tbe/square_sum_v1_ds.py +0 -39
  1152. mindspore/ops/_op_impl/tbe/square_sum_v2.py +0 -39
  1153. mindspore/ops/_op_impl/tbe/squared_difference.py +0 -39
  1154. mindspore/ops/_op_impl/tbe/squared_difference_ds.py +0 -41
  1155. mindspore/ops/_op_impl/tbe/squeeze.py +0 -37
  1156. mindspore/ops/_op_impl/tbe/strided_read.py +0 -38
  1157. mindspore/ops/_op_impl/tbe/strided_slice_d.py +0 -44
  1158. mindspore/ops/_op_impl/tbe/strided_slice_ds.py +0 -71
  1159. mindspore/ops/_op_impl/tbe/strided_slice_grad_d.py +0 -51
  1160. mindspore/ops/_op_impl/tbe/strided_slice_grad_ds.py +0 -57
  1161. mindspore/ops/_op_impl/tbe/strided_write.py +0 -38
  1162. mindspore/ops/_op_impl/tbe/sub.py +0 -39
  1163. mindspore/ops/_op_impl/tbe/sub_ds.py +0 -40
  1164. mindspore/ops/_op_impl/tbe/tan.py +0 -38
  1165. mindspore/ops/_op_impl/tbe/tan_ds.py +0 -39
  1166. mindspore/ops/_op_impl/tbe/tanh.py +0 -37
  1167. mindspore/ops/_op_impl/tbe/tanh_ds.py +0 -38
  1168. mindspore/ops/_op_impl/tbe/tanh_grad.py +0 -39
  1169. mindspore/ops/_op_impl/tbe/tanh_grad_ds.py +0 -40
  1170. mindspore/ops/_op_impl/tbe/tensor_move.py +0 -49
  1171. mindspore/ops/_op_impl/tbe/tensor_move_ds.py +0 -50
  1172. mindspore/ops/_op_impl/tbe/tensor_scatter_update.py +0 -41
  1173. mindspore/ops/_op_impl/tbe/tile.py +0 -37
  1174. mindspore/ops/_op_impl/tbe/tile_ds.py +0 -42
  1175. mindspore/ops/_op_impl/tbe/top_k.py +0 -42
  1176. mindspore/ops/_op_impl/tbe/top_k_ds.py +0 -43
  1177. mindspore/ops/_op_impl/tbe/trans_data.py +0 -167
  1178. mindspore/ops/_op_impl/tbe/trans_data_ds.py +0 -180
  1179. mindspore/ops/_op_impl/tbe/trans_data_rnn.py +0 -44
  1180. mindspore/ops/_op_impl/tbe/transpose.py +0 -60
  1181. mindspore/ops/_op_impl/tbe/transpose_d.py +0 -47
  1182. mindspore/ops/_op_impl/tbe/transpose_nod.py +0 -60
  1183. mindspore/ops/_op_impl/tbe/trunc.py +0 -39
  1184. mindspore/ops/_op_impl/tbe/truncate_div.py +0 -41
  1185. mindspore/ops/_op_impl/tbe/truncate_div_ds.py +0 -42
  1186. mindspore/ops/_op_impl/tbe/truncate_mod.py +0 -41
  1187. mindspore/ops/_op_impl/tbe/truncate_mod_ds.py +0 -42
  1188. mindspore/ops/_op_impl/tbe/unpack.py +0 -38
  1189. mindspore/ops/_op_impl/tbe/unpack_ds.py +0 -39
  1190. mindspore/ops/_op_impl/tbe/unsorted_segment_max.py +0 -49
  1191. mindspore/ops/_op_impl/tbe/unsorted_segment_max_ds.py +0 -40
  1192. mindspore/ops/_op_impl/tbe/unsorted_segment_min.py +0 -49
  1193. mindspore/ops/_op_impl/tbe/unsorted_segment_min_ds.py +0 -40
  1194. mindspore/ops/_op_impl/tbe/unsorted_segment_prod.py +0 -49
  1195. mindspore/ops/_op_impl/tbe/unsorted_segment_prod_ds.py +0 -38
  1196. mindspore/ops/_op_impl/tbe/unsorted_segment_sum.py +0 -38
  1197. mindspore/ops/_op_impl/tbe/unsorted_segment_sum_ds.py +0 -41
  1198. mindspore/ops/_op_impl/tbe/wts_arq.py +0 -40
  1199. mindspore/ops/_op_impl/tbe/xdivy.py +0 -38
  1200. mindspore/ops/_op_impl/tbe/xdivy_ds.py +0 -39
  1201. mindspore/ops/_op_impl/tbe/xlogy.py +0 -38
  1202. mindspore/ops/_op_impl/tbe/xlogy_ds.py +0 -39
  1203. mindspore/ops/_op_impl/tbe/zeros_like.py +0 -41
  1204. mindspore/ops/_op_impl/tbe/zeros_like_ds.py +0 -42
  1205. mindspore/ops/_tracefunc.py +0 -241
  1206. mindspore/ops/arg_dtype_cast.py +0 -54
  1207. mindspore/ops/silent_check.py +0 -162
  1208. mindspore/profiler/parser/msadvisor_analyzer.py +0 -82
  1209. mindspore/profiler/parser/msadvisor_parser.py +0 -240
  1210. mindspore/rewrite/api/tree_node_helper.py +0 -60
  1211. mindspore/rewrite/ast_helpers/ast_creator.py +0 -115
  1212. mindspore/rewrite/ast_transformers/flatten_recursive_stmt.py +0 -267
  1213. mindspore/rewrite/ast_transformers/remove_return_out_of_if.py +0 -228
  1214. mindspore/rewrite/namespace.py +0 -53
  1215. mindspore-2.2.14.dist-info/RECORD +0 -1924
  1216. {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/WHEEL +0 -0
  1217. {mindspore-2.2.14.dist-info → mindspore-2.4.0.dist-info}/top_level.txt +0 -0
@@ -66,7 +66,7 @@ class AllpassBiquad(AudioTensorOperation):
66
66
  .. math::
67
67
  H(s) = \frac{s^2 - \frac{s}{Q} + 1}{s^2 + \frac{s}{Q} + 1}
68
68
 
69
- Similar to `SoX <http://sox.sourceforge.net/sox.html>`_ implementation.
69
+ Similar to `SoX <https://sourceforge.net/projects/sox/>`_ implementation.
70
70
 
71
71
  Note:
72
72
  The shape of the audio waveform to be processed needs to be <..., time>.
@@ -91,15 +91,27 @@ class AllpassBiquad(AudioTensorOperation):
91
91
  Examples:
92
92
  >>> import numpy as np
93
93
  >>> import mindspore.dataset as ds
94
+ >>> import mindspore.dataset.audio as audio
94
95
  >>>
95
- >>> waveform = np.array([[2.716064453125e-03, 6.34765625e-03], [9.246826171875e-03, 1.0894775390625e-02]])
96
+ >>> # Use the transform in dataset pipeline mode.
97
+ >>> waveform = np.random.random([5, 16]) # 5 samples
96
98
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
97
- >>> transforms = [ds.audio.AllpassBiquad(44100, 200.0)]
99
+ >>> transforms = [audio.AllpassBiquad(44100, 200.0)]
98
100
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
101
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
102
+ ... print(item["audio"].shape, item["audio"].dtype)
103
+ ... break
104
+ (16,) float64
105
+ >>>
106
+ >>> # Use the transform in eager mode
107
+ >>> waveform = np.random.random([16]) # 1 sample
108
+ >>> output = audio.AllpassBiquad(44100, 200.0)(waveform)
109
+ >>> print(output.shape, output.dtype)
110
+ (16,) float64
99
111
 
100
112
  Tutorial Examples:
101
113
  - `Illustration of audio transforms
102
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
114
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
103
115
  """
104
116
 
105
117
  @check_allpass_biquad
@@ -153,16 +165,26 @@ class AmplitudeToDB(AudioTensorOperation):
153
165
  >>> import numpy as np
154
166
  >>> import mindspore.dataset as ds
155
167
  >>> import mindspore.dataset.audio as audio
156
- >>> from mindspore.dataset.audio import ScaleType
157
168
  >>>
158
- >>> waveform = np.random.random([1, 400 // 2 + 1, 30])
169
+ >>> # Use the transform in dataset pipeline mode
170
+ >>> waveform = np.random.random([5, 400 // 2 + 1, 30]) # 5 samples
159
171
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
160
- >>> transforms = [audio.AmplitudeToDB(stype=ScaleType.POWER)]
172
+ >>> transforms = [audio.AmplitudeToDB(stype=audio.ScaleType.POWER)]
161
173
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
174
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
175
+ ... print(item["audio"].shape, item["audio"].dtype)
176
+ ... break
177
+ (201, 30) float64
178
+ >>>
179
+ >>> # Use the transform in eager mode
180
+ >>> waveform = np.random.random([400 // 2 + 1, 30]) # 1 sample
181
+ >>> output = audio.AmplitudeToDB(stype=audio.ScaleType.POWER)(waveform)
182
+ >>> print(output.shape, output.dtype)
183
+ (201, 30) float64
162
184
 
163
185
  Tutorial Examples:
164
186
  - `Illustration of audio transforms
165
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
187
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
166
188
  """
167
189
 
168
190
  @check_amplitude_to_db
@@ -196,14 +218,25 @@ class Angle(AudioTensorOperation):
196
218
  >>> import mindspore.dataset as ds
197
219
  >>> import mindspore.dataset.audio as audio
198
220
  >>>
199
- >>> waveform = np.array([[1.43, 5.434], [23.54, 89.38]])
221
+ >>> # Use the transform in dataset pipeline mode
222
+ >>> waveform = np.random.random([5, 16, 2]) # 5 samples
200
223
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
201
224
  >>> transforms = [audio.Angle()]
202
225
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
226
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
227
+ ... print(item["audio"].shape, item["audio"].dtype)
228
+ ... break
229
+ (16,) float64
230
+ >>>
231
+ >>> # Use the transform in eager mode
232
+ >>> waveform = np.random.random([16, 2]) # 1 sample
233
+ >>> output = audio.Angle()(waveform)
234
+ >>> print(output.shape, output.dtype)
235
+ (16,) float64
203
236
 
204
237
  Tutorial Examples:
205
238
  - `Illustration of audio transforms
206
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
239
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
207
240
  """
208
241
 
209
242
  def parse(self):
@@ -218,7 +251,7 @@ class BandBiquad(AudioTensorOperation):
218
251
  bandwidth gives the slope of the drop. The frequencies at band edge will be
219
252
  half of their original amplitudes.
220
253
 
221
- Similar to `SoX <http://sox.sourceforge.net/sox.html>`_ implementation.
254
+ Similar to `SoX <https://sourceforge.net/projects/sox/>`_ implementation.
222
255
 
223
256
  Note:
224
257
  The shape of the audio waveform to be processed needs to be <..., time>.
@@ -249,14 +282,25 @@ class BandBiquad(AudioTensorOperation):
249
282
  >>> import mindspore.dataset as ds
250
283
  >>> import mindspore.dataset.audio as audio
251
284
  >>>
252
- >>> waveform = np.array([[2.716064453125e-03, 6.34765625e-03], [9.246826171875e-03, 1.0894775390625e-02]])
285
+ >>> # Use the transform in dataset pipeline mode
286
+ >>> waveform = np.random.random([5, 16]) # 5 samples
253
287
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
254
288
  >>> transforms = [audio.BandBiquad(44100, 200.0)]
255
289
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
290
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
291
+ ... print(item["audio"].shape, item["audio"].dtype)
292
+ ... break
293
+ (16,) float64
294
+ >>>
295
+ >>> # Use the transform in eager mode
296
+ >>> waveform = np.random.random([16]) # 1 sample
297
+ >>> output = audio.BandBiquad(44100, 200.0)(waveform)
298
+ >>> print(output.shape, output.dtype)
299
+ (16,) float64
256
300
 
257
301
  Tutorial Examples:
258
302
  - `Illustration of audio transforms
259
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
303
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
260
304
  """
261
305
 
262
306
  @check_band_biquad
@@ -286,7 +330,7 @@ class BandpassBiquad(AudioTensorOperation):
286
330
  \frac{\frac{s}{Q}}{s^2 + \frac{s}{Q} + 1}, &\text{if const_skirt_gain=False}.
287
331
  \end{cases}
288
332
 
289
- Similar to `SoX <http://sox.sourceforge.net/sox.html>`_ implementation.
333
+ Similar to `SoX <https://sourceforge.net/projects/sox/>`_ implementation.
290
334
 
291
335
  Note:
292
336
  The shape of the audio waveform to be processed needs to be <..., time>.
@@ -316,14 +360,25 @@ class BandpassBiquad(AudioTensorOperation):
316
360
  >>> import mindspore.dataset as ds
317
361
  >>> import mindspore.dataset.audio as audio
318
362
  >>>
319
- >>> waveform = np.array([[2.716064453125e-03, 6.34765625e-03], [9.246826171875e-03, 1.0894775390625e-02]])
363
+ >>> # Use the transform in dataset pipeline mode
364
+ >>> waveform = np.random.random([5, 16]) # 5 samples
320
365
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
321
366
  >>> transforms = [audio.BandpassBiquad(44100, 200.0)]
322
367
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
368
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
369
+ ... print(item["audio"].shape, item["audio"].dtype)
370
+ ... break
371
+ (16,) float64
372
+ >>>
373
+ >>> # Use the transform in eager mode
374
+ >>> waveform = np.random.random([16]) # 1 sample
375
+ >>> output = audio.BandpassBiquad(44100, 200.0)(waveform)
376
+ >>> print(output.shape, output.dtype)
377
+ (16,) float64
323
378
 
324
379
  Tutorial Examples:
325
380
  - `Illustration of audio transforms
326
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
381
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
327
382
  """
328
383
 
329
384
  @check_bandpass_biquad
@@ -351,7 +406,7 @@ class BandrejectBiquad(AudioTensorOperation):
351
406
  .. math::
352
407
  H(s) = \frac{s^2 + 1}{s^2 + \frac{s}{Q} + 1}
353
408
 
354
- Similar to `SoX <http://sox.sourceforge.net/sox.html>`_ implementation.
409
+ Similar to `SoX <https://sourceforge.net/projects/sox/>`_ implementation.
355
410
 
356
411
  Note:
357
412
  The shape of the audio waveform to be processed needs to be <..., time>.
@@ -378,14 +433,25 @@ class BandrejectBiquad(AudioTensorOperation):
378
433
  >>> import mindspore.dataset as ds
379
434
  >>> import mindspore.dataset.audio as audio
380
435
  >>>
381
- >>> waveform = np.array([[2.716064453125e-03, 6.34765625e-03],[9.246826171875e-03, 1.0894775390625e-02]])
436
+ >>> # Use the transform in dataset pipeline mode
437
+ >>> waveform = np.random.random([5, 16]) # 5 samples
382
438
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
383
439
  >>> transforms = [audio.BandrejectBiquad(44100, 200.0)]
384
440
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
441
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
442
+ ... print(item["audio"].shape, item["audio"].dtype)
443
+ ... break
444
+ (16,) float64
445
+ >>>
446
+ >>> # Use the transform in eager mode
447
+ >>> waveform = np.random.random([16]) # 1 sample
448
+ >>> output = audio.BandrejectBiquad(44100, 200.0)(waveform)
449
+ >>> print(output.shape, output.dtype)
450
+ (16,) float64
385
451
 
386
452
  Tutorial Examples:
387
453
  - `Illustration of audio transforms
388
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
454
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
389
455
  """
390
456
 
391
457
  @check_bandreject_biquad
@@ -409,7 +475,7 @@ class BassBiquad(AudioTensorOperation):
409
475
  .. math::
410
476
  H(s) = A\frac{s^2 + \frac{\sqrt{A}}{Q}s + A}{As^2 + \frac{\sqrt{A}}{Q}s + 1}
411
477
 
412
- Similar to `SoX <http://sox.sourceforge.net/sox.html>`_ implementation.
478
+ Similar to `SoX <https://sourceforge.net/projects/sox/>`_ implementation.
413
479
 
414
480
  Note:
415
481
  The shape of the audio waveform to be processed needs to be <..., time>.
@@ -438,14 +504,25 @@ class BassBiquad(AudioTensorOperation):
438
504
  >>> import mindspore.dataset as ds
439
505
  >>> import mindspore.dataset.audio as audio
440
506
  >>>
441
- >>> waveform = np.array([[2.716064453125e-03, 6.34765625e-03], [9.246826171875e-03, 1.0894775390625e-02]])
507
+ >>> # Use the transform in dataset pipeline mode
508
+ >>> waveform = np.random.random([5, 16]) # 5 samples
442
509
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
443
510
  >>> transforms = [audio.BassBiquad(44100, 100.0)]
444
511
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
512
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
513
+ ... print(item["audio"].shape, item["audio"].dtype)
514
+ ... break
515
+ (16,) float64
516
+ >>>
517
+ >>> # Use the transform in eager mode
518
+ >>> waveform = np.random.random([16]) # 1 sample
519
+ >>> output = audio.BassBiquad(44100, 200.0)(waveform)
520
+ >>> print(output.shape, output.dtype)
521
+ (16,) float64
445
522
 
446
523
  Tutorial Examples:
447
524
  - `Illustration of audio transforms
448
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
525
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
449
526
  """
450
527
 
451
528
  @check_bass_biquad
@@ -487,15 +564,28 @@ class Biquad(TensorOperation):
487
564
 
488
565
  Examples:
489
566
  >>> import numpy as np
567
+ >>> import mindspore.dataset as ds
490
568
  >>> import mindspore.dataset.audio as audio
491
569
  >>>
492
- >>> waveform = np.array([[2.716064453125e-03, 6.34765625e-03], [9.246826171875e-03, 1.0894775390625e-02]])
493
- >>> biquad_op = audio.Biquad(0.01, 0.02, 0.13, 1, 0.12, 0.3)
494
- >>> waveform_filtered = biquad_op(waveform)
570
+ >>> # Use the transform in dataset pipeline mode
571
+ >>> waveform = np.random.random([5, 16]) # 5 samples
572
+ >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
573
+ >>> transforms = [audio.Biquad(0.01, 0.02, 0.13, 1, 0.12, 0.3)]
574
+ >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
575
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
576
+ ... print(item["audio"].shape, item["audio"].dtype)
577
+ ... break
578
+ (16,) float64
579
+ >>>
580
+ >>> # Use the transform in eager mode
581
+ >>> waveform = np.random.random([16]) # 1 sample
582
+ >>> output = audio.Biquad(0.01, 0.02, 0.13, 1, 0.12, 0.3)(waveform)
583
+ >>> print(output.shape, output.dtype)
584
+ (16,) float64
495
585
 
496
586
  Tutorial Examples:
497
587
  - `Illustration of audio transforms
498
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
588
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
499
589
  """
500
590
 
501
591
  @check_biquad
@@ -536,14 +626,25 @@ class ComplexNorm(AudioTensorOperation):
536
626
  >>> import mindspore.dataset as ds
537
627
  >>> import mindspore.dataset.audio as audio
538
628
  >>>
539
- >>> waveform = np.random.random([2, 4, 2])
629
+ >>> # Use the transform in dataset pipeline mode
630
+ >>> waveform = np.random.random([5, 16, 2]) # 5 samples
540
631
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
541
632
  >>> transforms = [audio.ComplexNorm()]
542
633
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
634
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
635
+ ... print(item["audio"].shape, item["audio"].dtype)
636
+ ... break
637
+ (16,) float64
638
+ >>>
639
+ >>> # Use the transform in eager mode
640
+ >>> waveform = np.random.random([16, 2]) # 1 samples
641
+ >>> output = audio.ComplexNorm()(waveform)
642
+ >>> print(output.shape, output.dtype)
643
+ (16,) float64
543
644
 
544
645
  Tutorial Examples:
545
646
  - `Illustration of audio transforms
546
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
647
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
547
648
  """
548
649
 
549
650
  @check_complex_norm
@@ -601,16 +702,26 @@ class ComputeDeltas(AudioTensorOperation):
601
702
  >>> import numpy as np
602
703
  >>> import mindspore.dataset as ds
603
704
  >>> import mindspore.dataset.audio as audio
604
- >>> from mindspore.dataset.audio import BorderType
605
705
  >>>
606
- >>> waveform = np.random.random([1, 400 // 2 + 1, 30])
706
+ >>> # Use the transform in dataset pipeline mode
707
+ >>> waveform = np.random.random([5, 400 // 2 + 1, 30]) # 5 samples
607
708
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
608
- >>> transforms = [audio.ComputeDeltas(win_length=7, pad_mode=BorderType.EDGE)]
709
+ >>> transforms = [audio.ComputeDeltas(win_length=7, pad_mode=audio.BorderType.EDGE)]
609
710
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
711
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
712
+ ... print(item["audio"].shape, item["audio"].dtype)
713
+ ... break
714
+ (201, 30) float64
715
+ >>>
716
+ >>> # Use the transform in eager mode
717
+ >>> waveform = np.random.random([400 // 2 + 1, 30]) # 1 sample
718
+ >>> output = audio.ComputeDeltas(win_length=7, pad_mode=audio.BorderType.EDGE)(waveform)
719
+ >>> print(output.shape, output.dtype)
720
+ (201, 30) float64
610
721
 
611
722
  Tutorial Examples:
612
723
  - `Illustration of audio transforms
613
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
724
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
614
725
  """
615
726
 
616
727
  @check_compute_deltas
@@ -629,7 +740,7 @@ class Contrast(AudioTensorOperation):
629
740
 
630
741
  Comparable with compression, this effect modifies an audio signal to make it sound louder.
631
742
 
632
- Similar to `SoX <http://sox.sourceforge.net/sox.html>`_ implementation.
743
+ Similar to `SoX <https://sourceforge.net/projects/sox/>`_ implementation.
633
744
 
634
745
  Note:
635
746
  The shape of the audio waveform to be processed needs to be <..., time>.
@@ -652,14 +763,25 @@ class Contrast(AudioTensorOperation):
652
763
  >>> import mindspore.dataset as ds
653
764
  >>> import mindspore.dataset.audio as audio
654
765
  >>>
655
- >>> waveform = np.array([[2.716064453125e-03, 6.34765625e-03], [9.246826171875e-03, 1.0894775390625e-02]])
766
+ >>> # Use the transform in dataset pipeline mode
767
+ >>> waveform = np.random.random([5, 16]) # 5 samples
656
768
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
657
769
  >>> transforms = [audio.Contrast()]
658
770
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
771
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
772
+ ... print(item["audio"].shape, item["audio"].dtype)
773
+ ... break
774
+ (16,) float64
775
+ >>>
776
+ >>> # Use the transform in eager mode
777
+ >>> waveform = np.random.random([16]) # 1 sample
778
+ >>> output = audio.Contrast()(waveform)
779
+ >>> print(output.shape, output.dtype)
780
+ (16,) float64
659
781
 
660
782
  Tutorial Examples:
661
783
  - `Illustration of audio transforms
662
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
784
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
663
785
  """
664
786
 
665
787
  @check_contrast
@@ -691,14 +813,25 @@ class DBToAmplitude(AudioTensorOperation):
691
813
  >>> import mindspore.dataset as ds
692
814
  >>> import mindspore.dataset.audio as audio
693
815
  >>>
694
- >>> waveform = np.array([[2.716064453125e-03, 6.34765625e-03], [9.246826171875e-03, 1.0894775390625e-02]])
816
+ >>> # Use the transform in dataset pipeline mode
817
+ >>> waveform = np.random.random([5, 16]) # 5 samples
695
818
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
696
819
  >>> transforms = [audio.DBToAmplitude(0.5, 0.5)]
697
820
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
821
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
822
+ ... print(item["audio"].shape, item["audio"].dtype)
823
+ ... break
824
+ (16,) float64
825
+ >>>
826
+ >>> # Use the transform in eager mode
827
+ >>> waveform = np.random.random([16]) # 1 sample
828
+ >>> output = audio.DBToAmplitude(0.5, 0.5)(waveform)
829
+ >>> print(output.shape, output.dtype)
830
+ (16,) float64
698
831
 
699
832
  Tutorial Examples:
700
833
  - `Illustration of audio transforms
701
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
834
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
702
835
  """
703
836
 
704
837
  @check_db_to_amplitude
@@ -734,14 +867,25 @@ class DCShift(AudioTensorOperation):
734
867
  >>> import mindspore.dataset as ds
735
868
  >>> import mindspore.dataset.audio as audio
736
869
  >>>
737
- >>> waveform = np.array([0.60, 0.97, -1.04, -1.26, 0.97, 0.91, 0.48, 0.93])
870
+ >>> # Use the transform in dataset pipeline mode
871
+ >>> waveform = np.random.random([5, 16]) # 5 samples
738
872
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
739
873
  >>> transforms = [audio.DCShift(0.5, 0.02)]
740
874
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
875
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
876
+ ... print(item["audio"].shape, item["audio"].dtype)
877
+ ... break
878
+ (16,) float64
879
+ >>>
880
+ >>> # Use the transform in eager mode
881
+ >>> waveform = np.random.random([16]) # 1 sample
882
+ >>> output = audio.DCShift(0.5, 0.02)(waveform)
883
+ >>> print(output.shape, output.dtype)
884
+ (16,) float64
741
885
 
742
886
  Tutorial Examples:
743
887
  - `Illustration of audio transforms
744
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
888
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
745
889
  """
746
890
 
747
891
  @check_dc_shift
@@ -758,7 +902,7 @@ class DeemphBiquad(AudioTensorOperation):
758
902
  """
759
903
  Apply Compact Disc (IEC 60908) de-emphasis (a treble attenuation shelving filter) to the audio waveform.
760
904
 
761
- Similar to `SoX <http://sox.sourceforge.net/sox.html>`_ implementation.
905
+ Similar to `SoX <https://sourceforge.net/projects/sox/>`_ implementation.
762
906
 
763
907
  Args:
764
908
  sample_rate (int): Sampling rate of the waveform, must be 44100 or 48000 (Hz).
@@ -776,14 +920,25 @@ class DeemphBiquad(AudioTensorOperation):
776
920
  >>> import mindspore.dataset as ds
777
921
  >>> import mindspore.dataset.audio as audio
778
922
  >>>
779
- >>> waveform = np.array([[2.716064453125e-03, 6.34765625e-03], [9.246826171875e-03, 1.0894775390625e-02]])
923
+ >>> # Use the transform in dataset pipeline mode
924
+ >>> waveform = np.random.random([5, 8]) # 5 samples
780
925
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
781
926
  >>> transforms = [audio.DeemphBiquad(44100)]
782
927
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
928
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
929
+ ... print(item["audio"].shape, item["audio"].dtype)
930
+ ... break
931
+ (8,) float64
932
+ >>>
933
+ >>> # Use the transform in eager mode
934
+ >>> waveform = np.random.random([8]) # 1 sample
935
+ >>> output = audio.DeemphBiquad(44100)(waveform)
936
+ >>> print(output.shape, output.dtype)
937
+ (8,) float64
783
938
 
784
939
  Tutorial Examples:
785
940
  - `Illustration of audio transforms
786
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
941
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
787
942
  """
788
943
 
789
944
  @check_deemph_biquad
@@ -831,15 +986,25 @@ class DetectPitchFrequency(AudioTensorOperation):
831
986
  >>> import mindspore.dataset as ds
832
987
  >>> import mindspore.dataset.audio as audio
833
988
  >>>
834
- >>> waveform = np.array([[0.716064e-03, 5.347656e-03, 6.246826e-03, 2.089477e-02, 7.138305e-02],
835
- ... [4.156616e-02, 1.394653e-02, 3.550292e-02, 0.614379e-02, 3.840209e-02]])
989
+ >>> # Use the transform in dataset pipeline mode
990
+ >>> waveform = np.random.random([5, 16]) # 5 samples
836
991
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
837
992
  >>> transforms = [audio.DetectPitchFrequency(30, 0.1, 3, 5, 25)]
838
993
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
994
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
995
+ ... print(item["audio"].shape, item["audio"].dtype)
996
+ ... break
997
+ (5,) float32
998
+ >>>
999
+ >>> # Use the transform in eager mode
1000
+ >>> waveform = np.random.random([16]) # 1 sample
1001
+ >>> output = audio.DetectPitchFrequency(30, 0.1, 3, 5, 25)(waveform)
1002
+ >>> print(output.shape, output.dtype)
1003
+ (5,) float32
839
1004
 
840
1005
  Tutorial Examples:
841
1006
  - `Illustration of audio transforms
842
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
1007
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
843
1008
  """
844
1009
 
845
1010
  @check_detect_pitch_frequency
@@ -888,14 +1053,25 @@ class Dither(AudioTensorOperation):
888
1053
  >>> import mindspore.dataset as ds
889
1054
  >>> import mindspore.dataset.audio as audio
890
1055
  >>>
891
- >>> waveform = np.array([[1, 2, 3], [4, 5, 6]])
1056
+ >>> # Use the transform in dataset pipeline mode
1057
+ >>> waveform = np.random.random([5, 16]) # 5 samples
892
1058
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
893
1059
  >>> transforms = [audio.Dither()]
894
1060
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
1061
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
1062
+ ... print(item["audio"].shape, item["audio"].dtype)
1063
+ ... break
1064
+ (16,) float64
1065
+ >>>
1066
+ >>> # Use the transform in eager mode
1067
+ >>> waveform = np.random.random([16]) # 1 sample
1068
+ >>> output = audio.Dither()(waveform)
1069
+ >>> print(output.shape, output.dtype)
1070
+ (16,) float64
895
1071
 
896
1072
  Tutorial Examples:
897
1073
  - `Illustration of audio transforms
898
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
1074
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
899
1075
  """
900
1076
 
901
1077
  @check_dither
@@ -912,7 +1088,7 @@ class EqualizerBiquad(AudioTensorOperation):
912
1088
  """
913
1089
  Design biquad equalizer filter and perform filtering.
914
1090
 
915
- Similar to `SoX <http://sox.sourceforge.net/sox.html>`_ implementation.
1091
+ Similar to `SoX <https://sourceforge.net/projects/sox/>`_ implementation.
916
1092
 
917
1093
  Args:
918
1094
  sample_rate (int): Sampling rate of the waveform, e.g. ``44100`` (Hz), the value can't be 0.
@@ -936,14 +1112,25 @@ class EqualizerBiquad(AudioTensorOperation):
936
1112
  >>> import mindspore.dataset as ds
937
1113
  >>> import mindspore.dataset.audio as audio
938
1114
  >>>
939
- >>> waveform = np.array([[2.716064453125e-03, 6.34765625e-03], [9.246826171875e-03, 1.0894775390625e-02]])
1115
+ >>> # Use the transform in dataset pipeline mode
1116
+ >>> waveform = np.random.random([5, 16]) # 5 samples
940
1117
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
941
1118
  >>> transforms = [audio.EqualizerBiquad(44100, 1500, 5.5, 0.7)]
942
1119
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
1120
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
1121
+ ... print(item["audio"].shape, item["audio"].dtype)
1122
+ ... break
1123
+ (16,) float64
1124
+ >>>
1125
+ >>> # Use the transform in eager mode
1126
+ >>> waveform = np.random.random([16]) # 1 sample
1127
+ >>> output = audio.EqualizerBiquad(44100, 1500, 5.5, 0.7)(waveform)
1128
+ >>> print(output.shape, output.dtype)
1129
+ (16,) float64
943
1130
 
944
1131
  Tutorial Examples:
945
1132
  - `Illustration of audio transforms
946
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
1133
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
947
1134
  """
948
1135
 
949
1136
  @check_equalizer_biquad
@@ -996,16 +1183,26 @@ class Fade(AudioTensorOperation):
996
1183
  >>> import numpy as np
997
1184
  >>> import mindspore.dataset as ds
998
1185
  >>> import mindspore.dataset.audio as audio
999
- >>> from mindspore.dataset.audio import FadeShape
1000
1186
  >>>
1001
- >>> waveform = np.array([[2.716064453125e-03, 6.34765625e-03, 9.246826171875e-03, 1.0894775390625e-02]])
1002
- >>> dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
1003
- >>> transforms = [audio.Fade(fade_in_len=3, fade_out_len=2, fade_shape=FadeShape.LINEAR)]
1004
- >>> dataset = dataset.map(operations=transforms, input_columns=["audio"])
1187
+ >>> # Use the transform in dataset pipeline mode
1188
+ >>> waveform = np.random.random([5, 16]) # 5 samples
1189
+ >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
1190
+ >>> transforms = [audio.Fade(fade_in_len=3, fade_out_len=2, fade_shape=audio.FadeShape.LINEAR)]
1191
+ >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
1192
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
1193
+ ... print(item["audio"].shape, item["audio"].dtype)
1194
+ ... break
1195
+ (16,) float64
1196
+ >>>
1197
+ >>> # Use the transform in eager mode
1198
+ >>> waveform = np.random.random([16]) # 1 sample
1199
+ >>> output = audio.Fade(fade_in_len=3, fade_out_len=2, fade_shape=audio.FadeShape.LINEAR)(waveform)
1200
+ >>> print(output.shape, output.dtype)
1201
+ (16,) float64
1005
1202
 
1006
1203
  Tutorial Examples:
1007
1204
  - `Illustration of audio transforms
1008
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
1205
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
1009
1206
  """
1010
1207
 
1011
1208
  @check_fade
@@ -1045,16 +1242,25 @@ class Filtfilt(AudioTensorOperation):
1045
1242
  >>> import mindspore.dataset as ds
1046
1243
  >>> import mindspore.dataset.audio as audio
1047
1244
  >>>
1048
- >>> waveform = np.array([[2.716064453125e-03, 6.34765625e-03], [9.246826171875e-03, 1.0894775390625e-02]])
1049
- >>> a_coeffs = [0.1, 0.2, 0.3]
1050
- >>> b_coeffs = [0.1, 0.2, 0.3]
1245
+ >>> # Use the transform in dataset pipeline mode
1246
+ >>> waveform = np.random.random([5, 16]) # 5 samples
1051
1247
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
1052
- >>> transforms = [audio.Filtfilt(a_coeffs, b_coeffs)]
1248
+ >>> transforms = [audio.Filtfilt(a_coeffs=[0.1, 0.2, 0.3], b_coeffs=[0.1, 0.2, 0.3])]
1053
1249
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
1250
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
1251
+ ... print(item["audio"].shape, item["audio"].dtype)
1252
+ ... break
1253
+ (16,) float64
1254
+ >>>
1255
+ >>> # Use the transform in eager mode
1256
+ >>> waveform = np.random.random([16]) # 1 sample
1257
+ >>> output = audio.Filtfilt(a_coeffs=[0.1, 0.2, 0.3], b_coeffs=[0.1, 0.2, 0.3])(waveform)
1258
+ >>> print(output.shape, output.dtype)
1259
+ (16,) float64
1054
1260
 
1055
1261
  Tutorial Examples:
1056
1262
  - `Illustration of audio transforms
1057
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
1263
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
1058
1264
  """
1059
1265
 
1060
1266
  @check_lfilter
@@ -1079,7 +1285,7 @@ class Flanger(AudioTensorOperation):
1079
1285
  """
1080
1286
  Apply a flanger effect to the audio.
1081
1287
 
1082
- Similar to `SoX <http://sox.sourceforge.net/sox.html>`_ implementation.
1288
+ Similar to `SoX <https://sourceforge.net/projects/sox/>`_ implementation.
1083
1289
 
1084
1290
  Args:
1085
1291
  sample_rate (int): Sampling rate of the waveform, e.g. 44100 (Hz).
@@ -1121,14 +1327,25 @@ class Flanger(AudioTensorOperation):
1121
1327
  >>> import mindspore.dataset as ds
1122
1328
  >>> import mindspore.dataset.audio as audio
1123
1329
  >>>
1124
- >>> waveform = np.array([[2.716064453125e-03, 6.34765625e-03], [9.246826171875e-03, 1.0894775390625e-02]])
1330
+ >>> # Use the transform in dataset pipeline mode
1331
+ >>> waveform = np.random.random([5, 4, 16]) # 5 samples
1125
1332
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
1126
1333
  >>> transforms = [audio.Flanger(44100)]
1127
1334
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
1335
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
1336
+ ... print(item["audio"].shape, item["audio"].dtype)
1337
+ ... break
1338
+ (4, 16) float64
1339
+ >>>
1340
+ >>> # Use the transform in eager mode
1341
+ >>> waveform = np.random.random([4, 16]) # 1 sample
1342
+ >>> output = audio.Flanger(44100)(waveform)
1343
+ >>> print(output.shape, output.dtype)
1344
+ (4, 16) float64
1128
1345
 
1129
1346
  Tutorial Examples:
1130
1347
  - `Illustration of audio transforms
1131
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
1348
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
1132
1349
  """
1133
1350
 
1134
1351
  @check_flanger
@@ -1187,14 +1404,25 @@ class FrequencyMasking(AudioTensorOperation):
1187
1404
  >>> import mindspore.dataset as ds
1188
1405
  >>> import mindspore.dataset.audio as audio
1189
1406
  >>>
1190
- >>> waveform = np.random.random([1, 3, 2])
1407
+ >>> # Use the transform in dataset pipeline mode
1408
+ >>> waveform = np.random.random([5, 16, 2]) # 5 samples
1191
1409
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
1192
- >>> transforms = [audio.FrequencyMasking(freq_mask_param=1)]
1410
+ >>> transforms = [audio.FrequencyMasking(iid_masks=True, freq_mask_param=1)]
1193
1411
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
1412
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
1413
+ ... print(item["audio"].shape, item["audio"].dtype)
1414
+ ... break
1415
+ (16, 2) float64
1416
+ >>>
1417
+ >>> # Use the transform in eager mode
1418
+ >>> waveform = np.random.random([16, 2]) # 1 sample
1419
+ >>> output = audio.FrequencyMasking(iid_masks=True, freq_mask_param=1)(waveform)
1420
+ >>> print(output.shape, output.dtype)
1421
+ (16, 2) float64
1194
1422
 
1195
1423
  Tutorial Examples:
1196
1424
  - `Illustration of audio transforms
1197
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
1425
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
1198
1426
 
1199
1427
  .. image:: frequency_masking_original.png
1200
1428
 
@@ -1232,14 +1460,25 @@ class Gain(AudioTensorOperation):
1232
1460
  >>> import mindspore.dataset as ds
1233
1461
  >>> import mindspore.dataset.audio as audio
1234
1462
  >>>
1235
- >>> waveform = np.array([[2.716064453125e-03, 6.34765625e-03], [9.246826171875e-03, 1.0894775390625e-02]])
1463
+ >>> # Use the transform in dataset pipeline mode
1464
+ >>> waveform = np.random.random([5, 8]) # 5 samples
1236
1465
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
1237
1466
  >>> transforms = [audio.Gain(1.2)]
1238
1467
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
1468
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
1469
+ ... print(item["audio"].shape, item["audio"].dtype)
1470
+ ... break
1471
+ (8,) float64
1472
+ >>>
1473
+ >>> # Use the transform in eager mode
1474
+ >>> waveform = np.random.random([8]) # 1 sample
1475
+ >>> output = audio.Gain(1.2)(waveform)
1476
+ >>> print(output.shape, output.dtype)
1477
+ (8,) float64
1239
1478
 
1240
1479
  Tutorial Examples:
1241
1480
  - `Illustration of audio transforms
1242
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
1481
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
1243
1482
  """
1244
1483
 
1245
1484
  @check_gain
@@ -1302,14 +1541,25 @@ class GriffinLim(AudioTensorOperation):
1302
1541
  >>> import mindspore.dataset as ds
1303
1542
  >>> import mindspore.dataset.audio as audio
1304
1543
  >>>
1305
- >>> waveform = np.random.random([201, 6])
1544
+ >>> # Use the transform in dataset pipeline mode
1545
+ >>> waveform = np.random.random([5, 201, 6]) # 5 samples
1306
1546
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
1307
1547
  >>> transforms = [audio.GriffinLim(n_fft=400)]
1308
1548
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
1549
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
1550
+ ... print(item["audio"].shape, item["audio"].dtype)
1551
+ ... break
1552
+ (1000,) float64
1553
+ >>>
1554
+ >>> # Use the transform in eager mode
1555
+ >>> waveform = np.random.random([201, 6]) # 1 sample
1556
+ >>> output = audio.GriffinLim(n_fft=400)(waveform)
1557
+ >>> print(output.shape, output.dtype)
1558
+ (1000,) float64
1309
1559
 
1310
1560
  Tutorial Examples:
1311
1561
  - `Illustration of audio transforms
1312
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
1562
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
1313
1563
  """
1314
1564
 
1315
1565
  @check_griffin_lim
@@ -1336,7 +1586,7 @@ class HighpassBiquad(AudioTensorOperation):
1336
1586
  """
1337
1587
  Design biquad highpass filter and perform filtering.
1338
1588
 
1339
- Similar to `SoX <http://sox.sourceforge.net/sox.html>`_ implementation.
1589
+ Similar to `SoX <https://sourceforge.net/projects/sox/>`_ implementation.
1340
1590
 
1341
1591
  Args:
1342
1592
  sample_rate (int): Sampling rate of the waveform, e.g. 44100 (Hz), the value can't be 0.
@@ -1359,14 +1609,25 @@ class HighpassBiquad(AudioTensorOperation):
1359
1609
  >>> import mindspore.dataset as ds
1360
1610
  >>> import mindspore.dataset.audio as audio
1361
1611
  >>>
1362
- >>> waveform = np.array([[2.716064453125e-03, 6.34765625e-03], [9.246826171875e-03, 1.0894775390625e-02]])
1612
+ >>> # Use the transform in dataset pipeline mode
1613
+ >>> waveform = np.random.random([5, 16]) # 5 samples
1363
1614
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
1364
1615
  >>> transforms = [audio.HighpassBiquad(44100, 1500, 0.7)]
1365
1616
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
1617
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
1618
+ ... print(item["audio"].shape, item["audio"].dtype)
1619
+ ... break
1620
+ (16,) float64
1621
+ >>>
1622
+ >>> # Use the transform in eager mode
1623
+ >>> waveform = np.random.random([16]) # 1 sample
1624
+ >>> output = audio.HighpassBiquad(44100, 1500, 0.7)(waveform)
1625
+ >>> print(output.shape, output.dtype)
1626
+ (16,) float64
1366
1627
 
1367
1628
  Tutorial Examples:
1368
1629
  - `Illustration of audio transforms
1369
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
1630
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
1370
1631
  """
1371
1632
 
1372
1633
  @check_highpass_biquad
@@ -1429,14 +1690,25 @@ class InverseMelScale(AudioTensorOperation):
1429
1690
  >>> import mindspore.dataset as ds
1430
1691
  >>> import mindspore.dataset.audio as audio
1431
1692
  >>>
1432
- >>> waveform = np.random.randn(2, 2, 3, 2)
1693
+ >>> # Use the transform in dataset pipeline mode
1694
+ >>> waveform = np.random.randn(5, 8, 3, 2) # 5 samples
1433
1695
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
1434
1696
  >>> transforms = [audio.InverseMelScale(20, 3, 16000, 0, 8000, 10)]
1435
1697
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
1698
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
1699
+ ... print(item["audio"].shape, item["audio"].dtype)
1700
+ ... break
1701
+ (8, 20, 2) float64
1702
+ >>>
1703
+ >>> # Use the transform in eager mode
1704
+ >>> waveform = np.random.random([8, 3, 2]) # 1 sample
1705
+ >>> output = audio.InverseMelScale(20, 3, 16000, 0, 8000, 10)(waveform)
1706
+ >>> print(output.shape, output.dtype)
1707
+ (8, 20, 2) float64
1436
1708
 
1437
1709
  Tutorial Examples:
1438
1710
  - `Illustration of audio transforms
1439
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
1711
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
1440
1712
  """
1441
1713
 
1442
1714
  @check_inverse_mel_scale
@@ -1513,15 +1785,25 @@ class InverseSpectrogram(AudioTensorOperation):
1513
1785
  >>> import mindspore.dataset as ds
1514
1786
  >>> import mindspore.dataset.audio as audio
1515
1787
  >>>
1516
- >>> waveform = np.array([[[0.8236, 0.2049, 0.3335], [0.5933, 0.9911, 0.2482],
1517
- ... [0.3007, 0.9054, 0.7598], [0.5394, 0.2842, 0.5634], [0.6363, 0.2226, 0.2288]]])
1788
+ >>> # Use the transform in dataset pipeline mode
1789
+ >>> waveform = np.random.random([5, 400 // 2 + 1, 30, 2]) # 5 samples
1518
1790
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
1519
1791
  >>> transforms = [audio.InverseSpectrogram(1, 400, 400, 200)]
1520
1792
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
1793
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
1794
+ ... print(item["audio"].shape, item["audio"].dtype)
1795
+ ... break
1796
+ (1,) float64
1797
+ >>>
1798
+ >>> # Use the transform in eager mode
1799
+ >>> waveform = np.random.random([400 // 2 + 1, 30, 2]) # 1 sample
1800
+ >>> output = audio.InverseSpectrogram(1, 400, 400, 200)(waveform)
1801
+ >>> print(output.shape, output.dtype)
1802
+ (1,) float64
1521
1803
 
1522
1804
  Tutorial Examples:
1523
1805
  - `Illustration of audio transforms
1524
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
1806
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
1525
1807
  """
1526
1808
 
1527
1809
  @check_inverse_spectrogram
@@ -1601,14 +1883,25 @@ class LFCC(AudioTensorOperation):
1601
1883
  >>> import mindspore.dataset as ds
1602
1884
  >>> import mindspore.dataset.audio as audio
1603
1885
  >>>
1604
- >>> waveform = np.random.random([1, 1, 300])
1886
+ >>> # Use the transform in dataset pipeline mode
1887
+ >>> waveform = np.random.random([5, 10, 300])
1605
1888
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
1606
1889
  >>> transforms = [audio.LFCC()]
1607
1890
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
1891
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
1892
+ ... print(item["audio"].shape, item["audio"].dtype)
1893
+ ... break
1894
+ (10, 40, 2) float32
1895
+ >>>
1896
+ >>> # Use the transform in eager mode
1897
+ >>> waveform = np.random.random([10, 300]) # 1 sample
1898
+ >>> output = audio.LFCC()(waveform)
1899
+ >>> print(output.shape, output.dtype)
1900
+ (10, 40, 2) float32
1608
1901
 
1609
1902
  Tutorial Examples:
1610
1903
  - `Illustration of audio transforms
1611
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
1904
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
1612
1905
  """
1613
1906
 
1614
1907
  @check_lfcc
@@ -1673,16 +1966,25 @@ class LFilter(AudioTensorOperation):
1673
1966
  >>> import mindspore.dataset as ds
1674
1967
  >>> import mindspore.dataset.audio as audio
1675
1968
  >>>
1676
- >>> waveform = np.array([[2.716064453125e-03, 6.34765625e-03], [9.246826171875e-03, 1.0894775390625e-02]])
1677
- >>> a_coeffs = [0.1, 0.2, 0.3]
1678
- >>> b_coeffs = [0.1, 0.2, 0.3]
1969
+ >>> # Use the transform in dataset pipeline mode
1970
+ >>> waveform = np.random.random([5, 16]) # 5 samples
1679
1971
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
1680
- >>> transforms = [audio.LFilter(a_coeffs, b_coeffs)]
1972
+ >>> transforms = [audio.LFilter(a_coeffs=[0.1, 0.2, 0.3], b_coeffs=[0.3, 0.2, 0.1])]
1681
1973
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
1974
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
1975
+ ... print(item["audio"].shape, item["audio"].dtype)
1976
+ ... break
1977
+ (16,) float64
1978
+ >>>
1979
+ >>> # Use the transform in eager mode
1980
+ >>> waveform = np.random.random([16]) # 1 sample
1981
+ >>> output = audio.LFilter(a_coeffs=[0.1, 0.2, 0.3], b_coeffs=[0.3, 0.2, 0.1])(waveform)
1982
+ >>> print(output.shape, output.dtype)
1983
+ (16,) float64
1682
1984
 
1683
1985
  Tutorial Examples:
1684
1986
  - `Illustration of audio transforms
1685
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
1987
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
1686
1988
  """
1687
1989
 
1688
1990
  @check_lfilter
@@ -1706,7 +2008,7 @@ class LowpassBiquad(AudioTensorOperation):
1706
2008
  .. math::
1707
2009
  H(s) = \frac{1}{s^2 + \frac{s}{Q} + 1}
1708
2010
 
1709
- Similar to `SoX <http://sox.sourceforge.net/sox.html>`_ implementation.
2011
+ Similar to `SoX <https://sourceforge.net/projects/sox/>`_ implementation.
1710
2012
 
1711
2013
  Note:
1712
2014
  The shape of the audio waveform to be processed needs to be <..., time>.
@@ -1733,15 +2035,25 @@ class LowpassBiquad(AudioTensorOperation):
1733
2035
  >>> import mindspore.dataset as ds
1734
2036
  >>> import mindspore.dataset.audio as audio
1735
2037
  >>>
1736
- >>> waveform = np.array([[0.8236, 0.2049, 0.3335], [0.5933, 0.9911, 0.2482],
1737
- ... [0.3007, 0.9054, 0.7598], [0.5394, 0.2842, 0.5634], [0.6363, 0.2226, 0.2288]])
2038
+ >>> # Use the transform in dataset pipeline mode
2039
+ >>> waveform = np.random.random([5, 10]) # 5 samples
1738
2040
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
1739
2041
  >>> transforms = [audio.LowpassBiquad(4000, 1500, 0.7)]
1740
2042
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
2043
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
2044
+ ... print(item["audio"].shape, item["audio"].dtype)
2045
+ ... break
2046
+ (10,) float64
2047
+ >>>
2048
+ >>> # Use the transform in eager mode
2049
+ >>> waveform = np.random.random([10]) # 1 sample
2050
+ >>> output = audio.LowpassBiquad(4000, 1500, 0.7)(waveform)
2051
+ >>> print(output.shape, output.dtype)
2052
+ (10,) float64
1741
2053
 
1742
2054
  Tutorial Examples:
1743
2055
  - `Illustration of audio transforms
1744
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
2056
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
1745
2057
  """
1746
2058
 
1747
2059
  @check_lowpass_biquad
@@ -1773,14 +2085,26 @@ class Magphase(AudioTensorOperation):
1773
2085
  >>> import mindspore.dataset as ds
1774
2086
  >>> import mindspore.dataset.audio as audio
1775
2087
  >>>
1776
- >>> waveform = np.random.random([2, 4, 2])
2088
+ >>> # Use the transform in dataset pipeline mode
2089
+ >>> waveform = np.random.random([5, 16, 2]) # 5 samples
1777
2090
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
1778
2091
  >>> transforms = [audio.Magphase()]
1779
- >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
2092
+ >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"],
2093
+ ... output_columns=["spect", "phase"])
2094
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
2095
+ ... print(item["spect"].shape, item["spect"].dtype)
2096
+ ... break
2097
+ (16,) float64
2098
+ >>>
2099
+ >>> # Use the transform in eager mode
2100
+ >>> waveform = np.random.random([16, 2]) # 1 sample
2101
+ >>> output = audio.Magphase()(waveform)
2102
+ >>> print(output[0].shape, output[0].dtype)
2103
+ (16,) float64
1780
2104
 
1781
2105
  Tutorial Examples:
1782
2106
  - `Illustration of audio transforms
1783
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
2107
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
1784
2108
  """
1785
2109
 
1786
2110
  @check_magphase
@@ -1815,14 +2139,25 @@ class MaskAlongAxis(AudioTensorOperation):
1815
2139
  >>> import mindspore.dataset as ds
1816
2140
  >>> import mindspore.dataset.audio as audio
1817
2141
  >>>
1818
- >>> waveform = np.random.random([1, 20, 20])
2142
+ >>> # Use the transform in dataset pipeline mode
2143
+ >>> waveform = np.random.random([5, 20, 20]) # 5 samples
1819
2144
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
1820
2145
  >>> transforms = [audio.MaskAlongAxis(0, 10, 0.5, 1)]
1821
2146
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
2147
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
2148
+ ... print(item["audio"].shape, item["audio"].dtype)
2149
+ ... break
2150
+ (20, 20) float64
2151
+ >>>
2152
+ >>> # Use the transform in eager mode
2153
+ >>> waveform = np.random.random([20, 20]) # 1 sample
2154
+ >>> output = audio.MaskAlongAxis(0, 10, 0.5, 1)(waveform)
2155
+ >>> print(output.shape, output.dtype)
2156
+ (20, 20) float64
1822
2157
 
1823
2158
  Tutorial Examples:
1824
2159
  - `Illustration of audio transforms
1825
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
2160
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
1826
2161
  """
1827
2162
 
1828
2163
  @check_mask_along_axis
@@ -1866,14 +2201,25 @@ class MaskAlongAxisIID(AudioTensorOperation):
1866
2201
  >>> import mindspore.dataset as ds
1867
2202
  >>> import mindspore.dataset.audio as audio
1868
2203
  >>>
1869
- >>> waveform= np.random.random([1, 20, 20])
2204
+ >>> # Use the transform in dataset pipeline mode
2205
+ >>> waveform= np.random.random([5, 20, 20]) # 5 samples
1870
2206
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
1871
2207
  >>> transforms = [audio.MaskAlongAxisIID(5, 0.5, 2)]
1872
2208
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
2209
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
2210
+ ... print(item["audio"].shape, item["audio"].dtype)
2211
+ ... break
2212
+ (20, 20) float64
2213
+ >>>
2214
+ >>> # Use the transform in eager mode
2215
+ >>> waveform = np.random.random([20, 20]) # 1 sample
2216
+ >>> output = audio.MaskAlongAxisIID(5, 0.5, 2)(waveform)
2217
+ >>> print(output.shape, output.dtype)
2218
+ (20, 20) float64
1873
2219
 
1874
2220
  Tutorial Examples:
1875
2221
  - `Illustration of audio transforms
1876
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
2222
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
1877
2223
  """
1878
2224
 
1879
2225
  @check_mask_along_axis_iid
@@ -1932,15 +2278,25 @@ class MelScale(AudioTensorOperation):
1932
2278
  >>> import mindspore.dataset as ds
1933
2279
  >>> import mindspore.dataset.audio as audio
1934
2280
  >>>
1935
- >>> waveform = np.array([[0.8236, 0.2049, 0.3335], [0.5933, 0.9911, 0.2482],
1936
- ... [0.3007, 0.9054, 0.7598], [0.5394, 0.2842, 0.5634], [0.6363, 0.2226, 0.2288]])
2281
+ >>> # Use the transform in dataset pipeline mode
2282
+ >>> waveform = np.random.random([5, 201, 3]) # 5 samples
1937
2283
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
1938
- >>> transforms = [audio.MelScale(4000, 1500, 0.7)]
2284
+ >>> transforms = [audio.MelScale(200, 1500, 0.7)]
1939
2285
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
2286
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
2287
+ ... print(item["audio"].shape, item["audio"].dtype)
2288
+ ... break
2289
+ (200, 3) float64
2290
+ >>>
2291
+ >>> # Use the transform in eager mode
2292
+ >>> waveform = np.random.random([201, 3]) # 1 sample
2293
+ >>> output = audio.MelScale(200, 1500, 0.7)(waveform)
2294
+ >>> print(output.shape, output.dtype)
2295
+ (200, 3) float64
1940
2296
 
1941
2297
  Tutorial Examples:
1942
2298
  - `Illustration of audio transforms
1943
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
2299
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
1944
2300
  """
1945
2301
 
1946
2302
  @check_mel_scale
@@ -2029,19 +2385,33 @@ class MelSpectrogram(AudioTensorOperation):
2029
2385
  >>> import mindspore.dataset as ds
2030
2386
  >>> import mindspore.dataset.audio as audio
2031
2387
  >>>
2032
- >>> from mindspore.dataset.audio import WindowType, BorderType, NormType, MelType
2033
2388
  >>>
2034
- >>> waveform = np.array([[[1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0, 0, 1, 1, 2, 2, 3, 3, 4]]])
2389
+ >>> # Use the transform in dataset pipeline mode
2390
+ >>> waveform = np.random.random([5, 32]) # 5 samples
2035
2391
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
2036
- >>> transforms = [audio.MelSpectrogram(sample_rate=16000, n_fft=16, win_length=16, hop_length=8, f_min=0.0, \
2037
- ... f_max=5000.0, pad=0, n_mels=8, window=WindowType.HANN, power=2.0, \
2038
- ... normalized=False, center=True, pad_mode=BorderType.REFLECT, \
2039
- ... onesided=True, norm=NormType.SLANEY, mel_scale=MelType.HTK)]
2392
+ >>> transforms = [audio.MelSpectrogram(sample_rate=16000, n_fft=16, win_length=16, hop_length=8, f_min=0.0,
2393
+ ... f_max=5000.0, pad=0, n_mels=2, window=audio.WindowType.HANN, power=2.0,
2394
+ ... normalized=False, center=True, pad_mode=audio.BorderType.REFLECT,
2395
+ ... onesided=True, norm=audio.NormType.SLANEY, mel_scale=audio.MelType.HTK)]
2040
2396
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
2397
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
2398
+ ... print(item["audio"].shape, item["audio"].dtype)
2399
+ ... break
2400
+ (2, 5) float64
2401
+ >>>
2402
+ >>> # Use the transform in eager mode
2403
+ >>> waveform = np.random.random([32]) # 1 sample
2404
+ >>> output = audio.MelSpectrogram(sample_rate=16000, n_fft=16, win_length=16, hop_length=8, f_min=0.0,
2405
+ ... f_max=5000.0, pad=0, n_mels=2, window=audio.WindowType.HANN, power=2.0,
2406
+ ... normalized=False, center=True, pad_mode=audio.BorderType.REFLECT,
2407
+ ... onesided=True, norm=audio.NormType.SLANEY,
2408
+ ... mel_scale=audio.MelType.HTK)(waveform)
2409
+ >>> print(output.shape, output.dtype)
2410
+ (2, 5) float64
2041
2411
 
2042
2412
  Tutorial Examples:
2043
2413
  - `Illustration of audio transforms
2044
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
2414
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
2045
2415
  """
2046
2416
 
2047
2417
  @check_mel_spectrogram
@@ -2120,15 +2490,25 @@ class MFCC(AudioTensorOperation):
2120
2490
  >>> import mindspore.dataset as ds
2121
2491
  >>> import mindspore.dataset.audio as audio
2122
2492
  >>>
2123
- >>> waveform = np.array([[0.8236, 0.2049, 0.3335], [0.5933, 0.9911, 0.2482],
2124
- ... [0.3007, 0.9054, 0.7598], [0.5394, 0.2842, 0.5634], [0.6363, 0.2226, 0.2288]])
2493
+ >>> # Use the transform in dataset pipeline mode
2494
+ >>> waveform = np.random.random([5, 500]) # 5 samples
2125
2495
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
2126
- >>> transforms = [audio.MFCC(4000, 1500, 2)]
2496
+ >>> transforms = [audio.MFCC(4000, 128, 2)]
2127
2497
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
2498
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
2499
+ ... print(item["audio"].shape, item["audio"].dtype)
2500
+ ... break
2501
+ (128, 3) float32
2502
+ >>>
2503
+ >>> # Use the transform in eager mode
2504
+ >>> waveform = np.random.random([500]) # 1 sample
2505
+ >>> output = audio.MFCC(4000, 128, 2)(waveform)
2506
+ >>> print(output.shape, output.dtype)
2507
+ (128, 3) float32
2128
2508
 
2129
2509
  Tutorial Examples:
2130
2510
  - `Illustration of audio transforms
2131
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
2511
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
2132
2512
  """
2133
2513
 
2134
2514
  @check_mfcc
@@ -2189,14 +2569,25 @@ class MuLawDecoding(AudioTensorOperation):
2189
2569
  >>> import mindspore.dataset as ds
2190
2570
  >>> import mindspore.dataset.audio as audio
2191
2571
  >>>
2192
- >>> waveform = np.random.random([1, 3, 4])
2572
+ >>> # Use the transform in dataset pipeline mode
2573
+ >>> waveform = np.random.random([5, 3, 4]) # 5 samples
2193
2574
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
2194
2575
  >>> transforms = [audio.MuLawDecoding()]
2195
2576
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
2577
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
2578
+ ... print(item["audio"].shape, item["audio"].dtype)
2579
+ ... break
2580
+ (3, 4) float64
2581
+ >>>
2582
+ >>> # Use the transform in eager mode
2583
+ >>> waveform = np.random.random([3, 4]) # 1 sample
2584
+ >>> output = audio.MuLawDecoding()(waveform)
2585
+ >>> print(output.shape, output.dtype)
2586
+ (3, 4) float64
2196
2587
 
2197
2588
  Tutorial Examples:
2198
2589
  - `Illustration of audio transforms
2199
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
2590
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
2200
2591
  """
2201
2592
 
2202
2593
  @check_mu_law_coding
@@ -2227,14 +2618,25 @@ class MuLawEncoding(AudioTensorOperation):
2227
2618
  >>> import mindspore.dataset as ds
2228
2619
  >>> import mindspore.dataset.audio as audio
2229
2620
  >>>
2230
- >>> waveform = np.random.random([1, 3, 4])
2621
+ >>> # Use the transform in dataset pipeline mode
2622
+ >>> waveform = np.random.random([5, 3, 4]) # 5 samples
2231
2623
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
2232
2624
  >>> transforms = [audio.MuLawEncoding()]
2233
2625
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
2626
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
2627
+ ... print(item["audio"].shape, item["audio"].dtype)
2628
+ ... break
2629
+ (3, 4) int32
2630
+ >>>
2631
+ >>> # Use the transform in eager mode
2632
+ >>> waveform = np.random.random([3, 4]) # 1 sample
2633
+ >>> output = audio.MuLawEncoding()(waveform)
2634
+ >>> print(output.shape, output.dtype)
2635
+ (3, 4) int32
2234
2636
 
2235
2637
  Tutorial Examples:
2236
2638
  - `Illustration of audio transforms
2237
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
2639
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
2238
2640
  """
2239
2641
 
2240
2642
  @check_mu_law_coding
@@ -2250,7 +2652,7 @@ class Overdrive(AudioTensorOperation):
2250
2652
  """
2251
2653
  Apply an overdrive effect to the audio waveform.
2252
2654
 
2253
- Similar to `SoX <http://sox.sourceforge.net/sox.html>`_ implementation.
2655
+ Similar to `SoX <https://sourceforge.net/projects/sox/>`_ implementation.
2254
2656
 
2255
2657
  Args:
2256
2658
  gain (float, optional): Desired gain at the boost (or attenuation) in dB, in range of [0, 100].
@@ -2273,14 +2675,25 @@ class Overdrive(AudioTensorOperation):
2273
2675
  >>> import mindspore.dataset as ds
2274
2676
  >>> import mindspore.dataset.audio as audio
2275
2677
  >>>
2276
- >>> waveform = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
2678
+ >>> # Use the transform in dataset pipeline mode
2679
+ >>> waveform = np.random.random([5, 10]) # 5 samples
2277
2680
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
2278
2681
  >>> transforms = [audio.Overdrive()]
2279
2682
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
2683
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
2684
+ ... print(item["audio"].shape, item["audio"].dtype)
2685
+ ... break
2686
+ (10,) float64
2687
+ >>>
2688
+ >>> # Use the transform in eager mode
2689
+ >>> waveform = np.random.random([10]) # 1 sample
2690
+ >>> output = audio.Overdrive()(waveform)
2691
+ >>> print(output.shape, output.dtype)
2692
+ (10,) float64
2280
2693
 
2281
2694
  Tutorial Examples:
2282
2695
  - `Illustration of audio transforms
2283
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
2696
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
2284
2697
  """
2285
2698
 
2286
2699
  @check_overdrive
@@ -2297,7 +2710,7 @@ class Phaser(AudioTensorOperation):
2297
2710
  """
2298
2711
  Apply a phasing effect to the audio.
2299
2712
 
2300
- Similar to `SoX <http://sox.sourceforge.net/sox.html>`_ implementation.
2713
+ Similar to `SoX <https://sourceforge.net/projects/sox/>`_ implementation.
2301
2714
 
2302
2715
  Args:
2303
2716
  sample_rate (int): Sampling rate of the waveform, e.g. 44100 (Hz).
@@ -2335,14 +2748,25 @@ class Phaser(AudioTensorOperation):
2335
2748
  >>> import mindspore.dataset as ds
2336
2749
  >>> import mindspore.dataset.audio as audio
2337
2750
  >>>
2338
- >>> waveform = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
2751
+ >>> # Use the transform in dataset pipeline mode
2752
+ >>> waveform = np.random.random([5, 12]) # 5 samples
2339
2753
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
2340
2754
  >>> transforms = [audio.Phaser(44100)]
2341
2755
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
2756
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
2757
+ ... print(item["audio"].shape, item["audio"].dtype)
2758
+ ... break
2759
+ (12,) float64
2760
+ >>>
2761
+ >>> # Use the transform in eager mode
2762
+ >>> waveform = np.random.random([12]) # 1 sample
2763
+ >>> output = audio.Phaser(44100)(waveform)
2764
+ >>> print(output.shape, output.dtype)
2765
+ (12,) float64
2342
2766
 
2343
2767
  Tutorial Examples:
2344
2768
  - `Illustration of audio transforms
2345
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
2769
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
2346
2770
  """
2347
2771
 
2348
2772
  @check_phaser
@@ -2384,15 +2808,25 @@ class PhaseVocoder(AudioTensorOperation):
2384
2808
  >>> import mindspore.dataset as ds
2385
2809
  >>> import mindspore.dataset.audio as audio
2386
2810
  >>>
2387
- >>> waveform = np.random.random([2, 44, 10, 2])
2811
+ >>> # Use the transform in dataset pipeline mode
2812
+ >>> waveform = np.random.random([5, 44, 10, 2]) # 5 samples
2388
2813
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
2389
- >>> phase_advance = np.random.random([44, 1])
2390
- >>> transforms = [audio.PhaseVocoder(rate=2, phase_advance=phase_advance)]
2814
+ >>> transforms = [audio.PhaseVocoder(rate=2, phase_advance=np.random.random([44, 1]))]
2391
2815
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
2816
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
2817
+ ... print(item["audio"].shape, item["audio"].dtype)
2818
+ ... break
2819
+ (44, 5, 2) float64
2820
+ >>>
2821
+ >>> # Use the transform in eager mode
2822
+ >>> waveform = np.random.random([44, 10, 2]) # 1 sample
2823
+ >>> output = audio.PhaseVocoder(rate=2, phase_advance=np.random.random([44, 1]))(waveform)
2824
+ >>> print(output.shape, output.dtype)
2825
+ (44, 5, 2) float64
2392
2826
 
2393
2827
  Tutorial Examples:
2394
2828
  - `Illustration of audio transforms
2395
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
2829
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
2396
2830
  """
2397
2831
 
2398
2832
  @check_phase_vocoder
@@ -2439,19 +2873,28 @@ class PitchShift(AudioTensorOperation):
2439
2873
 
2440
2874
  Examples:
2441
2875
  >>> import numpy as np
2442
- >>>
2443
2876
  >>> import mindspore.dataset as ds
2444
2877
  >>> import mindspore.dataset.audio as audio
2445
- >>> from mindspore.dataset.audio import WindowType
2446
2878
  >>>
2447
- >>> waveform = np.random.random([1, 1, 300])
2879
+ >>> # Use the transform in dataset pipeline mode
2880
+ >>> waveform = np.random.random([5, 8, 30]) # 5 samples
2448
2881
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
2449
- >>> transforms = [audio.PitchShift(sample_rate=16000,n_steps=4)]
2882
+ >>> transforms = [audio.PitchShift(sample_rate=16000, n_steps=4)]
2450
2883
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
2884
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
2885
+ ... print(item["audio"].shape, item["audio"].dtype)
2886
+ ... break
2887
+ (8, 30) float64
2888
+ >>>
2889
+ >>> # Use the transform in eager mode
2890
+ >>> waveform = np.random.random([8, 30]) # 1 sample
2891
+ >>> output = audio.PitchShift(sample_rate=16000, n_steps=4)(waveform)
2892
+ >>> print(output.shape, output.dtype)
2893
+ (8, 30) float64
2451
2894
 
2452
2895
  Tutorial Examples:
2453
2896
  - `Illustration of audio transforms
2454
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
2897
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
2455
2898
  """
2456
2899
 
2457
2900
  @check_pitch_shift
@@ -2512,18 +2955,30 @@ class Resample(AudioTensorOperation):
2512
2955
  >>> import numpy as np
2513
2956
  >>> import mindspore.dataset as ds
2514
2957
  >>> import mindspore.dataset.audio as audio
2515
- >>> from mindspore.dataset.audio import ResampleMethod
2516
2958
  >>>
2517
- >>> waveform = np.random.random([1, 30])
2959
+ >>> # Use the transform in dataset pipeline mode
2960
+ >>> waveform = np.random.random([5, 16, 30]) # 5 samples
2518
2961
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
2519
2962
  >>> transforms = [audio.Resample(orig_freq=48000, new_freq=16000,
2520
- ... resample_method=ResampleMethod.SINC_INTERPOLATION,
2963
+ ... resample_method=audio.ResampleMethod.SINC_INTERPOLATION,
2521
2964
  ... lowpass_filter_width=6, rolloff=0.99, beta=None)]
2522
2965
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
2966
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
2967
+ ... print(item["audio"].shape, item["audio"].dtype)
2968
+ ... break
2969
+ (16, 10) float64
2970
+ >>>
2971
+ >>> # Use the transform in eager mode
2972
+ >>> waveform = np.random.random([16, 30]) # 1 sample
2973
+ >>> output = audio.Resample(orig_freq=48000, new_freq=16000,
2974
+ ... resample_method=audio.ResampleMethod.SINC_INTERPOLATION,
2975
+ ... lowpass_filter_width=6, rolloff=0.99, beta=None)(waveform)
2976
+ >>> print(output.shape, output.dtype)
2977
+ (16, 10) float64
2523
2978
 
2524
2979
  Tutorial Examples:
2525
2980
  - `Illustration of audio transforms
2526
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
2981
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
2527
2982
  """
2528
2983
 
2529
2984
  @check_resample
@@ -2547,7 +3002,7 @@ class RiaaBiquad(AudioTensorOperation):
2547
3002
  """
2548
3003
  Apply RIAA vinyl playback equalization.
2549
3004
 
2550
- Similar to `SoX <http://sox.sourceforge.net/sox.html>`_ implementation.
3005
+ Similar to `SoX <https://sourceforge.net/projects/sox/>`_ implementation.
2551
3006
 
2552
3007
  Args:
2553
3008
  sample_rate (int): sampling rate of the waveform, e.g. 44100 (Hz),
@@ -2565,14 +3020,25 @@ class RiaaBiquad(AudioTensorOperation):
2565
3020
  >>> import mindspore.dataset as ds
2566
3021
  >>> import mindspore.dataset.audio as audio
2567
3022
  >>>
2568
- >>> waveform = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64)
3023
+ >>> # Use the transform in dataset pipeline mode
3024
+ >>> waveform = np.random.random([5, 24]) # 5 samples
2569
3025
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
2570
3026
  >>> transforms = [audio.RiaaBiquad(44100)]
2571
3027
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
3028
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
3029
+ ... print(item["audio"].shape, item["audio"].dtype)
3030
+ ... break
3031
+ (24,) float64
3032
+ >>>
3033
+ >>> # Use the transform in eager mode
3034
+ >>> waveform = np.random.random([24]) # 1 sample
3035
+ >>> output = audio.RiaaBiquad(44100)(waveform)
3036
+ >>> print(output.shape, output.dtype)
3037
+ (24,) float64
2572
3038
 
2573
3039
  Tutorial Examples:
2574
3040
  - `Illustration of audio transforms
2575
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
3041
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
2576
3042
  """
2577
3043
 
2578
3044
  @check_riaa_biquad
@@ -2612,14 +3078,25 @@ class SlidingWindowCmn(AudioTensorOperation):
2612
3078
  >>> import mindspore.dataset as ds
2613
3079
  >>> import mindspore.dataset.audio as audio
2614
3080
  >>>
2615
- >>> waveform = np.array([[[1, 2, 3], [4, 5, 6]]], dtype=np.float64)
3081
+ >>> # Use the transform in dataset pipeline mode
3082
+ >>> waveform = np.random.random([5, 16, 3]) # 5 samples
2616
3083
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
2617
3084
  >>> transforms = [audio.SlidingWindowCmn()]
2618
3085
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
3086
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
3087
+ ... print(item["audio"].shape, item["audio"].dtype)
3088
+ ... break
3089
+ (16, 3) float64
3090
+ >>>
3091
+ >>> # Use the transform in eager mode
3092
+ >>> waveform = np.random.random([16, 3]) # 1 sample
3093
+ >>> output = audio.SlidingWindowCmn()(waveform)
3094
+ >>> print(output.shape, output.dtype)
3095
+ (16, 3) float64
2619
3096
 
2620
3097
  Tutorial Examples:
2621
3098
  - `Illustration of audio transforms
2622
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
3099
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
2623
3100
  """
2624
3101
 
2625
3102
  @check_sliding_window_cmn
@@ -2678,14 +3155,25 @@ class SpectralCentroid(TensorOperation):
2678
3155
  >>> import mindspore.dataset as ds
2679
3156
  >>> import mindspore.dataset.audio as audio
2680
3157
  >>>
2681
- >>> waveform = np.random.random([5, 10, 20])
3158
+ >>> # Use the transform in dataset pipeline mode
3159
+ >>> waveform = np.random.random([5, 10, 20]) # 5 samples
2682
3160
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
2683
3161
  >>> transforms = [audio.SpectralCentroid(44100)]
2684
3162
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
3163
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
3164
+ ... print(item["audio"].shape, item["audio"].dtype)
3165
+ ... break
3166
+ (10, 1, 1) float64
3167
+ >>>
3168
+ >>> # Use the transform in eager mode
3169
+ >>> waveform = np.random.random([10, 20]) # 1 sample
3170
+ >>> output = audio.SpectralCentroid(44100)(waveform)
3171
+ >>> print(output.shape, output.dtype)
3172
+ (10, 1, 1) float64
2685
3173
 
2686
3174
  Tutorial Examples:
2687
3175
  - `Illustration of audio transforms
2688
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
3176
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
2689
3177
  """
2690
3178
 
2691
3179
  @check_spectral_centroid
@@ -2751,14 +3239,25 @@ class Spectrogram(TensorOperation):
2751
3239
  >>> import mindspore.dataset as ds
2752
3240
  >>> import mindspore.dataset.audio as audio
2753
3241
  >>>
2754
- >>> waveform = np.random.random([5, 10, 20])
3242
+ >>> # Use the transform in dataset pipeline mode
3243
+ >>> waveform = np.random.random([5, 10, 20]) # 5 samples
2755
3244
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
2756
3245
  >>> transforms = [audio.Spectrogram()]
2757
3246
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
3247
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
3248
+ ... print(item["audio"].shape, item["audio"].dtype)
3249
+ ... break
3250
+ (10, 201, 1) float64
3251
+ >>>
3252
+ >>> # Use the transform in eager mode
3253
+ >>> waveform = np.random.random([10, 20]) # 1 sample
3254
+ >>> output = audio.Spectrogram()(waveform)
3255
+ >>> print(output.shape, output.dtype)
3256
+ (10, 201, 1) float64
2758
3257
 
2759
3258
  Tutorial Examples:
2760
3259
  - `Illustration of audio transforms
2761
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
3260
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
2762
3261
  """
2763
3262
 
2764
3263
  @check_spectrogram
@@ -2818,14 +3317,25 @@ class TimeMasking(AudioTensorOperation):
2818
3317
  >>> import mindspore.dataset as ds
2819
3318
  >>> import mindspore.dataset.audio as audio
2820
3319
  >>>
2821
- >>> waveform = np.random.random([4, 3, 2])
3320
+ >>> # Use the transform in dataset pipeline mode
3321
+ >>> waveform = np.random.random([5, 16, 2]) # 5 samples
2822
3322
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
2823
3323
  >>> transforms = [audio.TimeMasking(time_mask_param=1)]
2824
3324
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
3325
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
3326
+ ... print(item["audio"].shape, item["audio"].dtype)
3327
+ ... break
3328
+ (16, 2) float64
3329
+ >>>
3330
+ >>> # Use the transform in eager mode
3331
+ >>> waveform = np.random.random([16, 2]) # 1 sample
3332
+ >>> output = audio.TimeMasking(time_mask_param=1)(waveform)
3333
+ >>> print(output.shape, output.dtype)
3334
+ (16, 2) float64
2825
3335
 
2826
3336
  Tutorial Examples:
2827
3337
  - `Illustration of audio transforms
2828
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
3338
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
2829
3339
 
2830
3340
  .. image:: time_masking_original.png
2831
3341
 
@@ -2876,14 +3386,25 @@ class TimeStretch(AudioTensorOperation):
2876
3386
  >>> import mindspore.dataset as ds
2877
3387
  >>> import mindspore.dataset.audio as audio
2878
3388
  >>>
2879
- >>> waveform = np.random.random([44, 10, 2])
3389
+ >>> # Use the transform in dataset pipeline mode
3390
+ >>> waveform = np.random.random([5, 16, 8, 2]) # 5 samples
2880
3391
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
2881
3392
  >>> transforms = [audio.TimeStretch()]
2882
3393
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
3394
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
3395
+ ... print(item["audio"].shape, item["audio"].dtype)
3396
+ ... break
3397
+ (1, 16, 8, 2) float64
3398
+ >>>
3399
+ >>> # Use the transform in eager mode
3400
+ >>> waveform = np.random.random([16, 8, 2]) # 1 sample
3401
+ >>> output = audio.TimeStretch()(waveform)
3402
+ >>> print(output.shape, output.dtype)
3403
+ (1, 16, 8, 2) float64
2883
3404
 
2884
3405
  Tutorial Examples:
2885
3406
  - `Illustration of audio transforms
2886
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
3407
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
2887
3408
 
2888
3409
  .. image:: time_stretch_rate1.5.png
2889
3410
 
@@ -2910,7 +3431,7 @@ class TrebleBiquad(AudioTensorOperation):
2910
3431
  """
2911
3432
  Design a treble tone-control effect.
2912
3433
 
2913
- Similar to `SoX <http://sox.sourceforge.net/sox.html>`_ implementation.
3434
+ Similar to `SoX <https://sourceforge.net/projects/sox/>`_ implementation.
2914
3435
 
2915
3436
  Args:
2916
3437
  sample_rate (int): Sampling rate (in Hz), which can't be zero.
@@ -2936,14 +3457,25 @@ class TrebleBiquad(AudioTensorOperation):
2936
3457
  >>> import mindspore.dataset as ds
2937
3458
  >>> import mindspore.dataset.audio as audio
2938
3459
  >>>
2939
- >>> waveform = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float64)
3460
+ >>> # Use the transform in dataset pipeline mode
3461
+ >>> waveform = np.random.random([5, 20]) # 5 samples
2940
3462
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
2941
3463
  >>> transforms = [audio.TrebleBiquad(44100, 200.0)]
2942
3464
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
3465
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
3466
+ ... print(item["audio"].shape, item["audio"].dtype)
3467
+ ... break
3468
+ (20,) float64
3469
+ >>>
3470
+ >>> # Use the transform in eager mode
3471
+ >>> waveform = np.random.random([20]) # 1 sample
3472
+ >>> output = audio.TrebleBiquad(44100, 200.0)(waveform)
3473
+ >>> print(output.shape, output.dtype)
3474
+ (20,) float64
2943
3475
 
2944
3476
  Tutorial Examples:
2945
3477
  - `Illustration of audio transforms
2946
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
3478
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
2947
3479
  """
2948
3480
 
2949
3481
  @check_treble_biquad
@@ -2964,7 +3496,7 @@ class Vad(AudioTensorOperation):
2964
3496
 
2965
3497
  Attempt to trim silence and quiet background sounds from the ends of recordings of speech.
2966
3498
 
2967
- Similar to `SoX <http://sox.sourceforge.net/sox.html>`_ implementation.
3499
+ Similar to `SoX <https://sourceforge.net/projects/sox/>`_ implementation.
2968
3500
 
2969
3501
  Args:
2970
3502
  sample_rate (int): Sampling rate of audio signal.
@@ -3042,14 +3574,25 @@ class Vad(AudioTensorOperation):
3042
3574
  >>> import mindspore.dataset as ds
3043
3575
  >>> import mindspore.dataset.audio as audio
3044
3576
  >>>
3045
- >>> waveform = np.random.random([2, 1000])
3577
+ >>> # Use the transform in dataset pipeline mode
3578
+ >>> waveform = np.random.random([5, 1000]) # 5 samples
3046
3579
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
3047
3580
  >>> transforms = [audio.Vad(sample_rate=600)]
3048
3581
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
3582
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
3583
+ ... print(item["audio"].shape, item["audio"].dtype)
3584
+ ... break
3585
+ (660,) float64
3586
+ >>>
3587
+ >>> # Use the transform in eager mode
3588
+ >>> waveform = np.random.random([1000]) # 1 sample
3589
+ >>> output = audio.Vad(sample_rate=600)(waveform)
3590
+ >>> print(output.shape, output.dtype)
3591
+ (660,) float64
3049
3592
 
3050
3593
  Tutorial Examples:
3051
3594
  - `Illustration of audio transforms
3052
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
3595
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
3053
3596
  """
3054
3597
 
3055
3598
  @check_vad
@@ -3115,16 +3658,26 @@ class Vol(AudioTensorOperation):
3115
3658
  >>> import numpy as np
3116
3659
  >>> import mindspore.dataset as ds
3117
3660
  >>> import mindspore.dataset.audio as audio
3118
- >>> from mindspore.dataset.audio import GainType
3119
3661
  >>>
3120
- >>> waveform = np.random.random([20, 30])
3662
+ >>> # Use the transform in dataset pipeline mode
3663
+ >>> waveform = np.random.random([5, 30]) # 5 sample
3121
3664
  >>> numpy_slices_dataset = ds.NumpySlicesDataset(data=waveform, column_names=["audio"])
3122
- >>> transforms = [audio.Vol(gain=10, gain_type=GainType.DB)]
3665
+ >>> transforms = [audio.Vol(gain=10, gain_type=audio.GainType.DB)]
3123
3666
  >>> numpy_slices_dataset = numpy_slices_dataset.map(operations=transforms, input_columns=["audio"])
3667
+ >>> for item in numpy_slices_dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
3668
+ ... print(item["audio"].shape, item["audio"].dtype)
3669
+ ... break
3670
+ (30,) float64
3671
+ >>>
3672
+ >>> # Use the transform in eager mode
3673
+ >>> waveform = np.random.random([30]) # 1 sample
3674
+ >>> output = audio.Vol(gain=10, gain_type=audio.GainType.DB)(waveform)
3675
+ >>> print(output.shape, output.dtype)
3676
+ (30,) float64
3124
3677
 
3125
3678
  Tutorial Examples:
3126
3679
  - `Illustration of audio transforms
3127
- <https://www.mindspore.cn/docs/en/r2.2/api_python/samples/dataset/audio_gallery.html>`_
3680
+ <https://www.mindspore.cn/docs/en/master/api_python/samples/dataset/audio_gallery.html>`_
3128
3681
  """
3129
3682
 
3130
3683
  @check_vol