mindspore 2.4.0__cp311-cp311-macosx_10_15_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mindspore might be problematic. Click here for more details.

Files changed (1387) hide show
  1. mindspore/.commit_id +1 -0
  2. mindspore/__init__.py +53 -0
  3. mindspore/_c_dataengine.cpython-311-darwin.so +0 -0
  4. mindspore/_c_expression.cpython-311-darwin.so +0 -0
  5. mindspore/_c_mindrecord.cpython-311-darwin.so +0 -0
  6. mindspore/_check_jit_forbidden_api.py +106 -0
  7. mindspore/_checkparam.py +1419 -0
  8. mindspore/_extends/__init__.py +23 -0
  9. mindspore/_extends/builtin_operations.py +224 -0
  10. mindspore/_extends/graph_kernel/__init__.py +17 -0
  11. mindspore/_extends/graph_kernel/model/__init__.py +19 -0
  12. mindspore/_extends/graph_kernel/model/graph_parallel.py +311 -0
  13. mindspore/_extends/graph_kernel/model/graph_split.py +1348 -0
  14. mindspore/_extends/graph_kernel/model/model.py +553 -0
  15. mindspore/_extends/graph_kernel/model/model_builder.py +216 -0
  16. mindspore/_extends/graph_kernel/parallel_estimate.py +60 -0
  17. mindspore/_extends/graph_kernel/splitter.py +140 -0
  18. mindspore/_extends/graph_kernel/utils.py +28 -0
  19. mindspore/_extends/parallel_compile/__init__.py +19 -0
  20. mindspore/_extends/parallel_compile/akg_compiler/__init__.py +19 -0
  21. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +269 -0
  22. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +529 -0
  23. mindspore/_extends/parallel_compile/akg_compiler/compiler.py +56 -0
  24. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
  25. mindspore/_extends/parallel_compile/akg_compiler/get_file_path.py +36 -0
  26. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +556 -0
  27. mindspore/_extends/parallel_compile/akg_compiler/util.py +159 -0
  28. mindspore/_extends/parse/__init__.py +49 -0
  29. mindspore/_extends/parse/compile_config.py +299 -0
  30. mindspore/_extends/parse/namespace.py +136 -0
  31. mindspore/_extends/parse/parser.py +1448 -0
  32. mindspore/_extends/parse/resources.py +213 -0
  33. mindspore/_extends/parse/standard_method.py +4475 -0
  34. mindspore/_extends/parse/trope.py +97 -0
  35. mindspore/_extends/pijit/__init__.py +23 -0
  36. mindspore/_extends/pijit/pijit_func_white_list.py +669 -0
  37. mindspore/_extends/remote/__init__.py +19 -0
  38. mindspore/_extends/remote/kernel_build_server.py +199 -0
  39. mindspore/_extends/remote/kernel_build_server_akg.py +55 -0
  40. mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
  41. mindspore/_extends/remote/kernel_build_server_ascend.py +75 -0
  42. mindspore/_extends/utils.py +68 -0
  43. mindspore/_install_custom.py +43 -0
  44. mindspore/_profiler.py +30 -0
  45. mindspore/amp.py +433 -0
  46. mindspore/boost/__init__.py +42 -0
  47. mindspore/boost/adasum.py +319 -0
  48. mindspore/boost/base.py +535 -0
  49. mindspore/boost/boost.py +400 -0
  50. mindspore/boost/boost_cell_wrapper.py +790 -0
  51. mindspore/boost/dim_reduce.py +323 -0
  52. mindspore/boost/grad_accumulation.py +79 -0
  53. mindspore/boost/grad_freeze.py +382 -0
  54. mindspore/boost/group_loss_scale_manager.py +166 -0
  55. mindspore/boost/less_batch_normalization.py +174 -0
  56. mindspore/common/__init__.py +86 -0
  57. mindspore/common/_auto_dynamic.py +68 -0
  58. mindspore/common/_decorator.py +50 -0
  59. mindspore/common/_jit_fallback_utils.py +110 -0
  60. mindspore/common/_monad.py +25 -0
  61. mindspore/common/_pijit_context.py +190 -0
  62. mindspore/common/_register_for_adapter.py +74 -0
  63. mindspore/common/_register_for_recompute.py +48 -0
  64. mindspore/common/_register_for_tensor.py +46 -0
  65. mindspore/common/_stub_tensor.py +210 -0
  66. mindspore/common/_tensor_overload.py +139 -0
  67. mindspore/common/_utils.py +122 -0
  68. mindspore/common/api.py +2064 -0
  69. mindspore/common/auto_dynamic_shape.py +507 -0
  70. mindspore/common/dtype.py +422 -0
  71. mindspore/common/dump.py +130 -0
  72. mindspore/common/file_system.py +48 -0
  73. mindspore/common/generator.py +254 -0
  74. mindspore/common/hook_handle.py +143 -0
  75. mindspore/common/initializer.py +880 -0
  76. mindspore/common/jit_config.py +98 -0
  77. mindspore/common/lazy_inline.py +240 -0
  78. mindspore/common/mindir_util.py +111 -0
  79. mindspore/common/mutable.py +234 -0
  80. mindspore/common/no_inline.py +54 -0
  81. mindspore/common/np_dtype.py +25 -0
  82. mindspore/common/parameter.py +1081 -0
  83. mindspore/common/recompute.py +292 -0
  84. mindspore/common/seed.py +260 -0
  85. mindspore/common/sparse_tensor.py +1175 -0
  86. mindspore/common/symbol.py +122 -0
  87. mindspore/common/tensor.py +5039 -0
  88. mindspore/communication/__init__.py +37 -0
  89. mindspore/communication/_comm_helper.py +501 -0
  90. mindspore/communication/_hccl_management.py +297 -0
  91. mindspore/communication/comm_func.py +1395 -0
  92. mindspore/communication/management.py +673 -0
  93. mindspore/config/op_info.config +533 -0
  94. mindspore/context.py +2077 -0
  95. mindspore/dataset/__init__.py +90 -0
  96. mindspore/dataset/audio/__init__.py +61 -0
  97. mindspore/dataset/audio/transforms.py +3690 -0
  98. mindspore/dataset/audio/utils.py +386 -0
  99. mindspore/dataset/audio/validators.py +1172 -0
  100. mindspore/dataset/callback/__init__.py +20 -0
  101. mindspore/dataset/callback/ds_callback.py +368 -0
  102. mindspore/dataset/callback/validators.py +32 -0
  103. mindspore/dataset/core/__init__.py +13 -0
  104. mindspore/dataset/core/config.py +1095 -0
  105. mindspore/dataset/core/datatypes.py +101 -0
  106. mindspore/dataset/core/py_util_helpers.py +65 -0
  107. mindspore/dataset/core/validator_helpers.py +781 -0
  108. mindspore/dataset/debug/__init__.py +21 -0
  109. mindspore/dataset/debug/debug_hook.py +97 -0
  110. mindspore/dataset/debug/pre_defined_hook.py +67 -0
  111. mindspore/dataset/engine/__init__.py +124 -0
  112. mindspore/dataset/engine/cache_admin.py +47 -0
  113. mindspore/dataset/engine/cache_client.py +129 -0
  114. mindspore/dataset/engine/datasets.py +4582 -0
  115. mindspore/dataset/engine/datasets_audio.py +911 -0
  116. mindspore/dataset/engine/datasets_standard_format.py +543 -0
  117. mindspore/dataset/engine/datasets_text.py +2161 -0
  118. mindspore/dataset/engine/datasets_user_defined.py +1184 -0
  119. mindspore/dataset/engine/datasets_vision.py +4816 -0
  120. mindspore/dataset/engine/iterators.py +371 -0
  121. mindspore/dataset/engine/obs/__init__.py +23 -0
  122. mindspore/dataset/engine/obs/config_loader.py +68 -0
  123. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +508 -0
  124. mindspore/dataset/engine/obs/util.py +482 -0
  125. mindspore/dataset/engine/offload.py +596 -0
  126. mindspore/dataset/engine/queue.py +304 -0
  127. mindspore/dataset/engine/samplers.py +895 -0
  128. mindspore/dataset/engine/serializer_deserializer.py +159 -0
  129. mindspore/dataset/engine/validators.py +2895 -0
  130. mindspore/dataset/text/__init__.py +51 -0
  131. mindspore/dataset/text/transforms.py +1703 -0
  132. mindspore/dataset/text/utils.py +715 -0
  133. mindspore/dataset/text/validators.py +642 -0
  134. mindspore/dataset/transforms/__init__.py +45 -0
  135. mindspore/dataset/transforms/c_transforms.py +638 -0
  136. mindspore/dataset/transforms/py_transforms.py +393 -0
  137. mindspore/dataset/transforms/py_transforms_util.py +255 -0
  138. mindspore/dataset/transforms/transforms.py +1260 -0
  139. mindspore/dataset/transforms/validators.py +410 -0
  140. mindspore/dataset/utils/__init__.py +19 -0
  141. mindspore/dataset/utils/browse_dataset.py +190 -0
  142. mindspore/dataset/utils/line_reader.py +126 -0
  143. mindspore/dataset/vision/__init__.py +65 -0
  144. mindspore/dataset/vision/c_transforms.py +2641 -0
  145. mindspore/dataset/vision/py_transforms.py +2120 -0
  146. mindspore/dataset/vision/py_transforms_util.py +1660 -0
  147. mindspore/dataset/vision/transforms.py +7295 -0
  148. mindspore/dataset/vision/utils.py +863 -0
  149. mindspore/dataset/vision/validators.py +1483 -0
  150. mindspore/default_config.py +2 -0
  151. mindspore/experimental/__init__.py +20 -0
  152. mindspore/experimental/es/__init__.py +22 -0
  153. mindspore/experimental/es/embedding_service.py +883 -0
  154. mindspore/experimental/es/embedding_service_layer.py +581 -0
  155. mindspore/experimental/llm_boost/__init__.py +21 -0
  156. mindspore/experimental/llm_boost/atb/__init__.py +23 -0
  157. mindspore/experimental/llm_boost/atb/boost_base.py +211 -0
  158. mindspore/experimental/llm_boost/atb/llama_boost.py +115 -0
  159. mindspore/experimental/llm_boost/atb/qwen_boost.py +101 -0
  160. mindspore/experimental/llm_boost/register.py +129 -0
  161. mindspore/experimental/llm_boost/utils.py +31 -0
  162. mindspore/experimental/map_parameter.py +309 -0
  163. mindspore/experimental/optim/__init__.py +40 -0
  164. mindspore/experimental/optim/adadelta.py +161 -0
  165. mindspore/experimental/optim/adagrad.py +168 -0
  166. mindspore/experimental/optim/adam.py +193 -0
  167. mindspore/experimental/optim/adamax.py +170 -0
  168. mindspore/experimental/optim/adamw.py +290 -0
  169. mindspore/experimental/optim/asgd.py +153 -0
  170. mindspore/experimental/optim/lr_scheduler.py +1371 -0
  171. mindspore/experimental/optim/nadam.py +157 -0
  172. mindspore/experimental/optim/optimizer.py +262 -0
  173. mindspore/experimental/optim/radam.py +194 -0
  174. mindspore/experimental/optim/rmsprop.py +154 -0
  175. mindspore/experimental/optim/rprop.py +164 -0
  176. mindspore/experimental/optim/sgd.py +156 -0
  177. mindspore/hal/__init__.py +40 -0
  178. mindspore/hal/_ascend.py +57 -0
  179. mindspore/hal/_base.py +57 -0
  180. mindspore/hal/_cpu.py +56 -0
  181. mindspore/hal/_gpu.py +57 -0
  182. mindspore/hal/contiguous_tensors_handle.py +175 -0
  183. mindspore/hal/device.py +356 -0
  184. mindspore/hal/event.py +179 -0
  185. mindspore/hal/memory.py +326 -0
  186. mindspore/hal/stream.py +357 -0
  187. mindspore/include/OWNERS +7 -0
  188. mindspore/include/api/allocator.h +97 -0
  189. mindspore/include/api/callback/callback.h +93 -0
  190. mindspore/include/api/callback/ckpt_saver.h +41 -0
  191. mindspore/include/api/callback/loss_monitor.h +33 -0
  192. mindspore/include/api/callback/lr_scheduler.h +51 -0
  193. mindspore/include/api/callback/time_monitor.h +34 -0
  194. mindspore/include/api/callback/train_accuracy.h +37 -0
  195. mindspore/include/api/cell.h +90 -0
  196. mindspore/include/api/cfg.h +82 -0
  197. mindspore/include/api/context.h +602 -0
  198. mindspore/include/api/data_type.h +47 -0
  199. mindspore/include/api/delegate.h +178 -0
  200. mindspore/include/api/delegate_api.h +75 -0
  201. mindspore/include/api/dual_abi_helper.h +208 -0
  202. mindspore/include/api/format.h +28 -0
  203. mindspore/include/api/graph.h +46 -0
  204. mindspore/include/api/kernel.h +58 -0
  205. mindspore/include/api/kernel_api.h +168 -0
  206. mindspore/include/api/metrics/accuracy.h +36 -0
  207. mindspore/include/api/metrics/metrics.h +41 -0
  208. mindspore/include/api/model.h +438 -0
  209. mindspore/include/api/model_group.h +91 -0
  210. mindspore/include/api/model_parallel_runner.h +168 -0
  211. mindspore/include/api/serialization.h +185 -0
  212. mindspore/include/api/status.h +192 -0
  213. mindspore/include/api/types.h +431 -0
  214. mindspore/include/api/visible.h +41 -0
  215. mindspore/include/c_api/context_c.h +179 -0
  216. mindspore/include/c_api/data_type_c.h +52 -0
  217. mindspore/include/c_api/format_c.h +46 -0
  218. mindspore/include/c_api/model_c.h +347 -0
  219. mindspore/include/c_api/status_c.h +79 -0
  220. mindspore/include/c_api/tensor_c.h +146 -0
  221. mindspore/include/c_api/types_c.h +67 -0
  222. mindspore/include/dataset/config.h +163 -0
  223. mindspore/include/dataset/constants.h +363 -0
  224. mindspore/include/dataset/execute.h +196 -0
  225. mindspore/include/dataset/text.h +1092 -0
  226. mindspore/include/dataset/transforms.h +638 -0
  227. mindspore/include/dataset/vision.h +2129 -0
  228. mindspore/include/dataset/vision_ascend.h +206 -0
  229. mindspore/include/dataset/vision_lite.h +625 -0
  230. mindspore/lib/libavcodec.59.dylib +0 -0
  231. mindspore/lib/libavdevice.59.dylib +0 -0
  232. mindspore/lib/libavfilter.8.dylib +0 -0
  233. mindspore/lib/libavformat.59.dylib +0 -0
  234. mindspore/lib/libavutil.57.dylib +0 -0
  235. mindspore/lib/libdnnl.2.dylib +0 -0
  236. mindspore/lib/libicudata.69.dylib +0 -0
  237. mindspore/lib/libicui18n.69.dylib +0 -0
  238. mindspore/lib/libicuuc.69.dylib +0 -0
  239. mindspore/lib/libmindspore_address_sorting.15.dylib +0 -0
  240. mindspore/lib/libmindspore_backend.dylib +0 -0
  241. mindspore/lib/libmindspore_common.dylib +0 -0
  242. mindspore/lib/libmindspore_core.dylib +0 -0
  243. mindspore/lib/libmindspore_glog.0.dylib +0 -0
  244. mindspore/lib/libmindspore_gpr.15.dylib +0 -0
  245. mindspore/lib/libmindspore_grpc++.1.dylib +0 -0
  246. mindspore/lib/libmindspore_grpc.15.dylib +0 -0
  247. mindspore/lib/libmindspore_np_dtype.dylib +0 -0
  248. mindspore/lib/libmindspore_ops.dylib +0 -0
  249. mindspore/lib/libmindspore_upb.15.dylib +0 -0
  250. mindspore/lib/libnnacl.dylib +0 -0
  251. mindspore/lib/libopencv_core.4.5.dylib +0 -0
  252. mindspore/lib/libopencv_imgcodecs.4.5.dylib +0 -0
  253. mindspore/lib/libopencv_imgproc.4.5.dylib +0 -0
  254. mindspore/lib/libps_cache.dylib +0 -0
  255. mindspore/lib/libswresample.4.dylib +0 -0
  256. mindspore/lib/libswscale.6.dylib +0 -0
  257. mindspore/lib/libtinyxml2.8.dylib +0 -0
  258. mindspore/log.py +633 -0
  259. mindspore/mindrecord/__init__.py +43 -0
  260. mindspore/mindrecord/common/__init__.py +17 -0
  261. mindspore/mindrecord/common/constant.py +20 -0
  262. mindspore/mindrecord/common/enums.py +44 -0
  263. mindspore/mindrecord/common/exceptions.py +311 -0
  264. mindspore/mindrecord/config.py +809 -0
  265. mindspore/mindrecord/filereader.py +174 -0
  266. mindspore/mindrecord/filewriter.py +722 -0
  267. mindspore/mindrecord/mindpage.py +210 -0
  268. mindspore/mindrecord/shardheader.py +141 -0
  269. mindspore/mindrecord/shardindexgenerator.py +74 -0
  270. mindspore/mindrecord/shardreader.py +117 -0
  271. mindspore/mindrecord/shardsegment.py +128 -0
  272. mindspore/mindrecord/shardutils.py +185 -0
  273. mindspore/mindrecord/shardwriter.py +237 -0
  274. mindspore/mindrecord/tools/__init__.py +17 -0
  275. mindspore/mindrecord/tools/cifar10.py +140 -0
  276. mindspore/mindrecord/tools/cifar100.py +153 -0
  277. mindspore/mindrecord/tools/cifar100_to_mr.py +185 -0
  278. mindspore/mindrecord/tools/cifar10_to_mr.py +177 -0
  279. mindspore/mindrecord/tools/csv_to_mr.py +200 -0
  280. mindspore/mindrecord/tools/imagenet_to_mr.py +206 -0
  281. mindspore/mindrecord/tools/mnist_to_mr.py +259 -0
  282. mindspore/mindrecord/tools/tfrecord_to_mr.py +360 -0
  283. mindspore/mint/__init__.py +1586 -0
  284. mindspore/mint/distributed/__init__.py +31 -0
  285. mindspore/mint/distributed/distributed.py +254 -0
  286. mindspore/mint/linalg/__init__.py +22 -0
  287. mindspore/mint/nn/__init__.py +757 -0
  288. mindspore/mint/nn/functional.py +679 -0
  289. mindspore/mint/nn/layer/__init__.py +39 -0
  290. mindspore/mint/nn/layer/activation.py +133 -0
  291. mindspore/mint/nn/layer/normalization.py +477 -0
  292. mindspore/mint/nn/layer/pooling.py +110 -0
  293. mindspore/mint/optim/__init__.py +24 -0
  294. mindspore/mint/optim/adamw.py +206 -0
  295. mindspore/mint/special/__init__.py +63 -0
  296. mindspore/multiprocessing/__init__.py +73 -0
  297. mindspore/nn/__init__.py +47 -0
  298. mindspore/nn/cell.py +2787 -0
  299. mindspore/nn/dynamic_lr.py +482 -0
  300. mindspore/nn/grad/__init__.py +21 -0
  301. mindspore/nn/grad/cell_grad.py +196 -0
  302. mindspore/nn/layer/__init__.py +63 -0
  303. mindspore/nn/layer/activation.py +1822 -0
  304. mindspore/nn/layer/basic.py +1629 -0
  305. mindspore/nn/layer/channel_shuffle.py +90 -0
  306. mindspore/nn/layer/combined.py +248 -0
  307. mindspore/nn/layer/container.py +734 -0
  308. mindspore/nn/layer/conv.py +1505 -0
  309. mindspore/nn/layer/dense.py +204 -0
  310. mindspore/nn/layer/embedding.py +869 -0
  311. mindspore/nn/layer/image.py +661 -0
  312. mindspore/nn/layer/math.py +1069 -0
  313. mindspore/nn/layer/normalization.py +1273 -0
  314. mindspore/nn/layer/padding.py +880 -0
  315. mindspore/nn/layer/pooling.py +2302 -0
  316. mindspore/nn/layer/rnn_cells.py +388 -0
  317. mindspore/nn/layer/rnns.py +849 -0
  318. mindspore/nn/layer/thor_layer.py +963 -0
  319. mindspore/nn/layer/timedistributed.py +155 -0
  320. mindspore/nn/layer/transformer.py +823 -0
  321. mindspore/nn/learning_rate_schedule.py +512 -0
  322. mindspore/nn/loss/__init__.py +36 -0
  323. mindspore/nn/loss/loss.py +2924 -0
  324. mindspore/nn/metrics.py +53 -0
  325. mindspore/nn/optim/__init__.py +45 -0
  326. mindspore/nn/optim/_dist_optimizer_registry.py +111 -0
  327. mindspore/nn/optim/ada_grad.py +217 -0
  328. mindspore/nn/optim/adadelta.py +206 -0
  329. mindspore/nn/optim/adafactor.py +448 -0
  330. mindspore/nn/optim/adam.py +1297 -0
  331. mindspore/nn/optim/adamax.py +220 -0
  332. mindspore/nn/optim/adasum.py +548 -0
  333. mindspore/nn/optim/asgd.py +216 -0
  334. mindspore/nn/optim/ftrl.py +401 -0
  335. mindspore/nn/optim/lamb.py +296 -0
  336. mindspore/nn/optim/lars.py +202 -0
  337. mindspore/nn/optim/lazyadam.py +533 -0
  338. mindspore/nn/optim/momentum.py +239 -0
  339. mindspore/nn/optim/optimizer.py +1034 -0
  340. mindspore/nn/optim/proximal_ada_grad.py +242 -0
  341. mindspore/nn/optim/rmsprop.py +264 -0
  342. mindspore/nn/optim/rprop.py +251 -0
  343. mindspore/nn/optim/sgd.py +237 -0
  344. mindspore/nn/optim/tft_wrapper.py +127 -0
  345. mindspore/nn/optim/thor.py +1310 -0
  346. mindspore/nn/probability/__init__.py +22 -0
  347. mindspore/nn/probability/bijector/__init__.py +35 -0
  348. mindspore/nn/probability/bijector/bijector.py +337 -0
  349. mindspore/nn/probability/bijector/exp.py +65 -0
  350. mindspore/nn/probability/bijector/gumbel_cdf.py +144 -0
  351. mindspore/nn/probability/bijector/invert.py +126 -0
  352. mindspore/nn/probability/bijector/power_transform.py +196 -0
  353. mindspore/nn/probability/bijector/scalar_affine.py +167 -0
  354. mindspore/nn/probability/bijector/softplus.py +189 -0
  355. mindspore/nn/probability/bnn_layers/__init__.py +29 -0
  356. mindspore/nn/probability/bnn_layers/_util.py +46 -0
  357. mindspore/nn/probability/bnn_layers/bnn_cell_wrapper.py +112 -0
  358. mindspore/nn/probability/bnn_layers/conv_variational.py +267 -0
  359. mindspore/nn/probability/bnn_layers/dense_variational.py +302 -0
  360. mindspore/nn/probability/bnn_layers/layer_distribution.py +123 -0
  361. mindspore/nn/probability/distribution/__init__.py +56 -0
  362. mindspore/nn/probability/distribution/_utils/__init__.py +34 -0
  363. mindspore/nn/probability/distribution/_utils/custom_ops.py +96 -0
  364. mindspore/nn/probability/distribution/_utils/utils.py +362 -0
  365. mindspore/nn/probability/distribution/bernoulli.py +334 -0
  366. mindspore/nn/probability/distribution/beta.py +391 -0
  367. mindspore/nn/probability/distribution/categorical.py +435 -0
  368. mindspore/nn/probability/distribution/cauchy.py +383 -0
  369. mindspore/nn/probability/distribution/distribution.py +827 -0
  370. mindspore/nn/probability/distribution/exponential.py +350 -0
  371. mindspore/nn/probability/distribution/gamma.py +391 -0
  372. mindspore/nn/probability/distribution/geometric.py +335 -0
  373. mindspore/nn/probability/distribution/gumbel.py +257 -0
  374. mindspore/nn/probability/distribution/half_normal.py +133 -0
  375. mindspore/nn/probability/distribution/laplace.py +128 -0
  376. mindspore/nn/probability/distribution/log_normal.py +272 -0
  377. mindspore/nn/probability/distribution/logistic.py +379 -0
  378. mindspore/nn/probability/distribution/normal.py +336 -0
  379. mindspore/nn/probability/distribution/poisson.py +288 -0
  380. mindspore/nn/probability/distribution/student_t.py +149 -0
  381. mindspore/nn/probability/distribution/transformed_distribution.py +235 -0
  382. mindspore/nn/probability/distribution/uniform.py +375 -0
  383. mindspore/nn/reinforcement/__init__.py +24 -0
  384. mindspore/nn/reinforcement/_batch_read_write.py +142 -0
  385. mindspore/nn/reinforcement/_tensors_queue.py +152 -0
  386. mindspore/nn/reinforcement/tensor_array.py +145 -0
  387. mindspore/nn/sparse/__init__.py +23 -0
  388. mindspore/nn/sparse/sparse.py +147 -0
  389. mindspore/nn/wrap/__init__.py +49 -0
  390. mindspore/nn/wrap/cell_wrapper.py +968 -0
  391. mindspore/nn/wrap/grad_reducer.py +608 -0
  392. mindspore/nn/wrap/loss_scale.py +694 -0
  393. mindspore/numpy/__init__.py +121 -0
  394. mindspore/numpy/array_creations.py +2731 -0
  395. mindspore/numpy/array_ops.py +2629 -0
  396. mindspore/numpy/dtypes.py +185 -0
  397. mindspore/numpy/fft.py +966 -0
  398. mindspore/numpy/logic_ops.py +936 -0
  399. mindspore/numpy/math_ops.py +5911 -0
  400. mindspore/numpy/utils.py +214 -0
  401. mindspore/numpy/utils_const.py +565 -0
  402. mindspore/ops/__init__.py +56 -0
  403. mindspore/ops/_constants.py +30 -0
  404. mindspore/ops/_grad_experimental/__init__.py +31 -0
  405. mindspore/ops/_grad_experimental/grad_array_ops.py +830 -0
  406. mindspore/ops/_grad_experimental/grad_base.py +143 -0
  407. mindspore/ops/_grad_experimental/grad_comm_ops.py +714 -0
  408. mindspore/ops/_grad_experimental/grad_debug_ops.py +31 -0
  409. mindspore/ops/_grad_experimental/grad_implementations.py +203 -0
  410. mindspore/ops/_grad_experimental/grad_inner_ops.py +79 -0
  411. mindspore/ops/_grad_experimental/grad_math_ops.py +802 -0
  412. mindspore/ops/_grad_experimental/grad_nn_ops.py +231 -0
  413. mindspore/ops/_grad_experimental/grad_quant_ops.py +238 -0
  414. mindspore/ops/_grad_experimental/grad_sparse.py +342 -0
  415. mindspore/ops/_grad_experimental/grad_sparse_ops.py +399 -0
  416. mindspore/ops/_grad_experimental/taylor_rule.py +220 -0
  417. mindspore/ops/_op_impl/__init__.py +23 -0
  418. mindspore/ops/_op_impl/_custom_op/__init__.py +39 -0
  419. mindspore/ops/_op_impl/_custom_op/_basic.py +158 -0
  420. mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +279 -0
  421. mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +156 -0
  422. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +109 -0
  423. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +125 -0
  424. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +105 -0
  425. mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +124 -0
  426. mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +116 -0
  427. mindspore/ops/_op_impl/_custom_op/correction_mul.py +89 -0
  428. mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +196 -0
  429. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +366 -0
  430. mindspore/ops/_op_impl/_custom_op/dsd_impl.py +162 -0
  431. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +136 -0
  432. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +206 -0
  433. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +88 -0
  434. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +128 -0
  435. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +199 -0
  436. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +88 -0
  437. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +156 -0
  438. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +184 -0
  439. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +143 -0
  440. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +169 -0
  441. mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +548 -0
  442. mindspore/ops/_op_impl/_custom_op/img2col_impl.py +881 -0
  443. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +278 -0
  444. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +200 -0
  445. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +334 -0
  446. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +255 -0
  447. mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +222 -0
  448. mindspore/ops/_op_impl/_custom_op/matmul_dds_grad_impl.py +644 -0
  449. mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +488 -0
  450. mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +87 -0
  451. mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +129 -0
  452. mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +121 -0
  453. mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +352 -0
  454. mindspore/ops/_op_impl/aicpu/__init__.py +441 -0
  455. mindspore/ops/_op_impl/aicpu/abs.py +36 -0
  456. mindspore/ops/_op_impl/aicpu/acos.py +32 -0
  457. mindspore/ops/_op_impl/aicpu/acos_grad.py +33 -0
  458. mindspore/ops/_op_impl/aicpu/acosh.py +34 -0
  459. mindspore/ops/_op_impl/aicpu/acosh_grad.py +35 -0
  460. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d.py +34 -0
  461. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
  462. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d.py +39 -0
  463. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d_grad.py +39 -0
  464. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d.py +37 -0
  465. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d_grad.py +37 -0
  466. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d.py +42 -0
  467. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d_grad.py +152 -0
  468. mindspore/ops/_op_impl/aicpu/add.py +43 -0
  469. mindspore/ops/_op_impl/aicpu/add_n.py +41 -0
  470. mindspore/ops/_op_impl/aicpu/add_v2.py +40 -0
  471. mindspore/ops/_op_impl/aicpu/addcdiv.py +41 -0
  472. mindspore/ops/_op_impl/aicpu/addcmul.py +47 -0
  473. mindspore/ops/_op_impl/aicpu/adjust_contrastv2.py +32 -0
  474. mindspore/ops/_op_impl/aicpu/adjust_hue.py +31 -0
  475. mindspore/ops/_op_impl/aicpu/adjust_saturation.py +32 -0
  476. mindspore/ops/_op_impl/aicpu/affine_grid.py +33 -0
  477. mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
  478. mindspore/ops/_op_impl/aicpu/angle.py +31 -0
  479. mindspore/ops/_op_impl/aicpu/arg_max.py +75 -0
  480. mindspore/ops/_op_impl/aicpu/arg_min.py +75 -0
  481. mindspore/ops/_op_impl/aicpu/argmax_with_value.py +43 -0
  482. mindspore/ops/_op_impl/aicpu/argmin_with_value.py +43 -0
  483. mindspore/ops/_op_impl/aicpu/asin.py +32 -0
  484. mindspore/ops/_op_impl/aicpu/asin_grad.py +33 -0
  485. mindspore/ops/_op_impl/aicpu/asinh.py +34 -0
  486. mindspore/ops/_op_impl/aicpu/asinh_grad.py +35 -0
  487. mindspore/ops/_op_impl/aicpu/atanh.py +34 -0
  488. mindspore/ops/_op_impl/aicpu/avgpool_grad_v1.py +37 -0
  489. mindspore/ops/_op_impl/aicpu/avgpool_v1.py +36 -0
  490. mindspore/ops/_op_impl/aicpu/bartlett_window.py +36 -0
  491. mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -0
  492. mindspore/ops/_op_impl/aicpu/batch_norm_grad_grad.py +49 -0
  493. mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
  494. mindspore/ops/_op_impl/aicpu/bessel_i0.py +31 -0
  495. mindspore/ops/_op_impl/aicpu/betainc.py +31 -0
  496. mindspore/ops/_op_impl/aicpu/bias_add.py +44 -0
  497. mindspore/ops/_op_impl/aicpu/bias_add_grad.py +42 -0
  498. mindspore/ops/_op_impl/aicpu/bincount.py +33 -0
  499. mindspore/ops/_op_impl/aicpu/blackman_window.py +36 -0
  500. mindspore/ops/_op_impl/aicpu/broadcast_to.py +58 -0
  501. mindspore/ops/_op_impl/aicpu/bucketize.py +34 -0
  502. mindspore/ops/_op_impl/aicpu/cache_swap_table.py +102 -0
  503. mindspore/ops/_op_impl/aicpu/cast.py +225 -0
  504. mindspore/ops/_op_impl/aicpu/cauchy.py +33 -0
  505. mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
  506. mindspore/ops/_op_impl/aicpu/check_numerics.py +33 -0
  507. mindspore/ops/_op_impl/aicpu/cholesky.py +32 -0
  508. mindspore/ops/_op_impl/aicpu/cholesky_inverse.py +31 -0
  509. mindspore/ops/_op_impl/aicpu/cholesky_solve.py +33 -0
  510. mindspore/ops/_op_impl/aicpu/choleskygrad.py +32 -0
  511. mindspore/ops/_op_impl/aicpu/coalesce.py +37 -0
  512. mindspore/ops/_op_impl/aicpu/col2im.py +38 -0
  513. mindspore/ops/_op_impl/aicpu/combined_non_max_suppression.py +42 -0
  514. mindspore/ops/_op_impl/aicpu/compare_and_bitpack.py +37 -0
  515. mindspore/ops/_op_impl/aicpu/complex.py +32 -0
  516. mindspore/ops/_op_impl/aicpu/complex_abs.py +31 -0
  517. mindspore/ops/_op_impl/aicpu/compute_accidental_hits.py +44 -0
  518. mindspore/ops/_op_impl/aicpu/concat.py +57 -0
  519. mindspore/ops/_op_impl/aicpu/concat_offset.py +42 -0
  520. mindspore/ops/_op_impl/aicpu/concat_offset_v1.py +31 -0
  521. mindspore/ops/_op_impl/aicpu/conj.py +42 -0
  522. mindspore/ops/_op_impl/aicpu/conjugate_transpose.py +58 -0
  523. mindspore/ops/_op_impl/aicpu/cos.py +34 -0
  524. mindspore/ops/_op_impl/aicpu/cosh.py +34 -0
  525. mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
  526. mindspore/ops/_op_impl/aicpu/crop_and_resize.py +69 -0
  527. mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_boxes.py +68 -0
  528. mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_image.py +38 -0
  529. mindspore/ops/_op_impl/aicpu/cross.py +42 -0
  530. mindspore/ops/_op_impl/aicpu/csr_sparse_matrix_to_dense.py +48 -0
  531. mindspore/ops/_op_impl/aicpu/csr_sparse_matrix_to_sparse_tensor.py +51 -0
  532. mindspore/ops/_op_impl/aicpu/ctc_greedy_decoder.py +35 -0
  533. mindspore/ops/_op_impl/aicpu/ctc_loss_v2.py +43 -0
  534. mindspore/ops/_op_impl/aicpu/ctc_loss_v2_grad.py +45 -0
  535. mindspore/ops/_op_impl/aicpu/ctcloss.py +38 -0
  536. mindspore/ops/_op_impl/aicpu/cummax.py +41 -0
  537. mindspore/ops/_op_impl/aicpu/cumprod.py +58 -0
  538. mindspore/ops/_op_impl/aicpu/cumsum.py +58 -0
  539. mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +36 -0
  540. mindspore/ops/_op_impl/aicpu/data_format_vec_permute.py +32 -0
  541. mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
  542. mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
  543. mindspore/ops/_op_impl/aicpu/dense_to_csr_sparse_matrix.py +49 -0
  544. mindspore/ops/_op_impl/aicpu/dense_to_dense_set_operation.py +45 -0
  545. mindspore/ops/_op_impl/aicpu/dense_to_sparse_set_operation.py +48 -0
  546. mindspore/ops/_op_impl/aicpu/depth_to_space.py +44 -0
  547. mindspore/ops/_op_impl/aicpu/diag.py +36 -0
  548. mindspore/ops/_op_impl/aicpu/diag_part.py +36 -0
  549. mindspore/ops/_op_impl/aicpu/diagonal.py +35 -0
  550. mindspore/ops/_op_impl/aicpu/digamma.py +31 -0
  551. mindspore/ops/_op_impl/aicpu/div.py +41 -0
  552. mindspore/ops/_op_impl/aicpu/div_no_nan.py +35 -0
  553. mindspore/ops/_op_impl/aicpu/dropout2d.py +42 -0
  554. mindspore/ops/_op_impl/aicpu/dropout3d.py +42 -0
  555. mindspore/ops/_op_impl/aicpu/dropout_genmask.py +41 -0
  556. mindspore/ops/_op_impl/aicpu/dropout_genmask_v3.py +32 -0
  557. mindspore/ops/_op_impl/aicpu/dynamic_stitch.py +42 -0
  558. mindspore/ops/_op_impl/aicpu/edit_distance.py +56 -0
  559. mindspore/ops/_op_impl/aicpu/eig.py +35 -0
  560. mindspore/ops/_op_impl/aicpu/embedding_lookup.py +102 -0
  561. mindspore/ops/_op_impl/aicpu/end_of_sequence.py +30 -0
  562. mindspore/ops/_op_impl/aicpu/environ_create.py +28 -0
  563. mindspore/ops/_op_impl/aicpu/environ_destroy_all.py +28 -0
  564. mindspore/ops/_op_impl/aicpu/environ_get.py +41 -0
  565. mindspore/ops/_op_impl/aicpu/environ_set.py +40 -0
  566. mindspore/ops/_op_impl/aicpu/eps.py +32 -0
  567. mindspore/ops/_op_impl/aicpu/equal.py +41 -0
  568. mindspore/ops/_op_impl/aicpu/exp.py +37 -0
  569. mindspore/ops/_op_impl/aicpu/expand.py +45 -0
  570. mindspore/ops/_op_impl/aicpu/expand_dims.py +42 -0
  571. mindspore/ops/_op_impl/aicpu/expm1.py +34 -0
  572. mindspore/ops/_op_impl/aicpu/extract_glimpse.py +35 -0
  573. mindspore/ops/_op_impl/aicpu/eye.py +44 -0
  574. mindspore/ops/_op_impl/aicpu/fft_with_size.py +47 -0
  575. mindspore/ops/_op_impl/aicpu/fill_diagonal.py +39 -0
  576. mindspore/ops/_op_impl/aicpu/fill_v2.py +58 -0
  577. mindspore/ops/_op_impl/aicpu/flatten.py +43 -0
  578. mindspore/ops/_op_impl/aicpu/floor_div.py +38 -0
  579. mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
  580. mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
  581. mindspore/ops/_op_impl/aicpu/fractional_avg_pool.py +41 -0
  582. mindspore/ops/_op_impl/aicpu/fractional_avg_pool_grad.py +41 -0
  583. mindspore/ops/_op_impl/aicpu/fractional_max_pool.py +41 -0
  584. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_grad_with_fixed_ksize.py +43 -0
  585. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +65 -0
  586. mindspore/ops/_op_impl/aicpu/fractional_max_pool_grad.py +42 -0
  587. mindspore/ops/_op_impl/aicpu/fractional_max_pool_grad_with_fixed_ksize.py +42 -0
  588. mindspore/ops/_op_impl/aicpu/fractional_max_pool_with_fixed_ksize.py +49 -0
  589. mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
  590. mindspore/ops/_op_impl/aicpu/fused_sparse_adam.py +46 -0
  591. mindspore/ops/_op_impl/aicpu/fused_sparse_ftrl.py +41 -0
  592. mindspore/ops/_op_impl/aicpu/fused_sparse_lazy_adam.py +46 -0
  593. mindspore/ops/_op_impl/aicpu/fused_sparse_proximal_adagrad.py +39 -0
  594. mindspore/ops/_op_impl/aicpu/gamma.py +38 -0
  595. mindspore/ops/_op_impl/aicpu/gather.py +46 -0
  596. mindspore/ops/_op_impl/aicpu/gather_d.py +79 -0
  597. mindspore/ops/_op_impl/aicpu/gather_d_grad_v2.py +79 -0
  598. mindspore/ops/_op_impl/aicpu/gather_grad.py +54 -0
  599. mindspore/ops/_op_impl/aicpu/gather_nd.py +56 -0
  600. mindspore/ops/_op_impl/aicpu/gcd.py +32 -0
  601. mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +38 -0
  602. mindspore/ops/_op_impl/aicpu/geqrf.py +32 -0
  603. mindspore/ops/_op_impl/aicpu/get_next.py +39 -0
  604. mindspore/ops/_op_impl/aicpu/glu.py +33 -0
  605. mindspore/ops/_op_impl/aicpu/glu_grad.py +34 -0
  606. mindspore/ops/_op_impl/aicpu/greater.py +41 -0
  607. mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
  608. mindspore/ops/_op_impl/aicpu/grid_sampler_2d.py +35 -0
  609. mindspore/ops/_op_impl/aicpu/grid_sampler_2d_grad.py +38 -0
  610. mindspore/ops/_op_impl/aicpu/grid_sampler_3d.py +34 -0
  611. mindspore/ops/_op_impl/aicpu/grid_sampler_3d_grad.py +38 -0
  612. mindspore/ops/_op_impl/aicpu/hamming_window.py +57 -0
  613. mindspore/ops/_op_impl/aicpu/hard_sigmoid.py +32 -0
  614. mindspore/ops/_op_impl/aicpu/hard_sigmoid_grad.py +33 -0
  615. mindspore/ops/_op_impl/aicpu/heaviside.py +40 -0
  616. mindspore/ops/_op_impl/aicpu/histogram.py +35 -0
  617. mindspore/ops/_op_impl/aicpu/hsv_to_rgb.py +32 -0
  618. mindspore/ops/_op_impl/aicpu/hypot.py +32 -0
  619. mindspore/ops/_op_impl/aicpu/identity.py +42 -0
  620. mindspore/ops/_op_impl/aicpu/identity_n.py +41 -0
  621. mindspore/ops/_op_impl/aicpu/igamma.py +30 -0
  622. mindspore/ops/_op_impl/aicpu/igammac.py +30 -0
  623. mindspore/ops/_op_impl/aicpu/igammagrada.py +30 -0
  624. mindspore/ops/_op_impl/aicpu/im2col.py +43 -0
  625. mindspore/ops/_op_impl/aicpu/imag.py +31 -0
  626. mindspore/ops/_op_impl/aicpu/index_fill.py +54 -0
  627. mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
  628. mindspore/ops/_op_impl/aicpu/init_data_set_queue.py +27 -0
  629. mindspore/ops/_op_impl/aicpu/inplace_index_add.py +39 -0
  630. mindspore/ops/_op_impl/aicpu/instance_norm_v2.py +41 -0
  631. mindspore/ops/_op_impl/aicpu/instance_norm_v2_grad.py +44 -0
  632. mindspore/ops/_op_impl/aicpu/is_finite.py +40 -0
  633. mindspore/ops/_op_impl/aicpu/is_inf.py +31 -0
  634. mindspore/ops/_op_impl/aicpu/is_nan.py +31 -0
  635. mindspore/ops/_op_impl/aicpu/kldivloss.py +34 -0
  636. mindspore/ops/_op_impl/aicpu/kldivlossgrad.py +35 -0
  637. mindspore/ops/_op_impl/aicpu/layer_norm_grad_grad.py +47 -0
  638. mindspore/ops/_op_impl/aicpu/lcm.py +32 -0
  639. mindspore/ops/_op_impl/aicpu/left_shift.py +38 -0
  640. mindspore/ops/_op_impl/aicpu/less.py +41 -0
  641. mindspore/ops/_op_impl/aicpu/less_equal.py +41 -0
  642. mindspore/ops/_op_impl/aicpu/lgamma.py +33 -0
  643. mindspore/ops/_op_impl/aicpu/linear_sum_assignment.py +57 -0
  644. mindspore/ops/_op_impl/aicpu/linspace.py +33 -0
  645. mindspore/ops/_op_impl/aicpu/list_diff.py +50 -0
  646. mindspore/ops/_op_impl/aicpu/log.py +37 -0
  647. mindspore/ops/_op_impl/aicpu/log1p.py +34 -0
  648. mindspore/ops/_op_impl/aicpu/log_matrix_determinant.py +31 -0
  649. mindspore/ops/_op_impl/aicpu/log_normal_reverse.py +33 -0
  650. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +37 -0
  651. mindspore/ops/_op_impl/aicpu/logical_xor.py +30 -0
  652. mindspore/ops/_op_impl/aicpu/logit.py +33 -0
  653. mindspore/ops/_op_impl/aicpu/logit_grad.py +34 -0
  654. mindspore/ops/_op_impl/aicpu/logspace.py +36 -0
  655. mindspore/ops/_op_impl/aicpu/lower_bound.py +47 -0
  656. mindspore/ops/_op_impl/aicpu/lstsq.py +34 -0
  657. mindspore/ops/_op_impl/aicpu/lu.py +39 -0
  658. mindspore/ops/_op_impl/aicpu/lu_solve.py +32 -0
  659. mindspore/ops/_op_impl/aicpu/lu_unpack.py +114 -0
  660. mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +49 -0
  661. mindspore/ops/_op_impl/aicpu/masked_fill.py +42 -0
  662. mindspore/ops/_op_impl/aicpu/masked_scatter.py +40 -0
  663. mindspore/ops/_op_impl/aicpu/masked_select.py +31 -0
  664. mindspore/ops/_op_impl/aicpu/masked_select_grad.py +35 -0
  665. mindspore/ops/_op_impl/aicpu/matmul.py +39 -0
  666. mindspore/ops/_op_impl/aicpu/matrix_band_part.py +59 -0
  667. mindspore/ops/_op_impl/aicpu/matrix_determinant.py +30 -0
  668. mindspore/ops/_op_impl/aicpu/matrix_diag_part_v3.py +54 -0
  669. mindspore/ops/_op_impl/aicpu/matrix_diag_v3.py +56 -0
  670. mindspore/ops/_op_impl/aicpu/matrix_exp.py +34 -0
  671. mindspore/ops/_op_impl/aicpu/matrix_inverse.py +31 -0
  672. mindspore/ops/_op_impl/aicpu/matrix_logarithm.py +31 -0
  673. mindspore/ops/_op_impl/aicpu/matrix_power.py +37 -0
  674. mindspore/ops/_op_impl/aicpu/matrix_set_diag_v3.py +54 -0
  675. mindspore/ops/_op_impl/aicpu/matrix_solve.py +35 -0
  676. mindspore/ops/_op_impl/aicpu/matrix_solve_ls.py +36 -0
  677. mindspore/ops/_op_impl/aicpu/matrix_triangular_solve.py +36 -0
  678. mindspore/ops/_op_impl/aicpu/max_pool3d_grad_with_argmax.py +60 -0
  679. mindspore/ops/_op_impl/aicpu/max_pool3d_with_argmax.py +59 -0
  680. mindspore/ops/_op_impl/aicpu/max_unpool2d.py +57 -0
  681. mindspore/ops/_op_impl/aicpu/max_unpool2d_grad.py +58 -0
  682. mindspore/ops/_op_impl/aicpu/max_unpool3d.py +57 -0
  683. mindspore/ops/_op_impl/aicpu/max_unpool3d_grad.py +58 -0
  684. mindspore/ops/_op_impl/aicpu/maximum_grad_grad.py +40 -0
  685. mindspore/ops/_op_impl/aicpu/maxpool_grad_v1.py +46 -0
  686. mindspore/ops/_op_impl/aicpu/maxpool_v1.py +42 -0
  687. mindspore/ops/_op_impl/aicpu/median.py +39 -0
  688. mindspore/ops/_op_impl/aicpu/median_grad.py +45 -0
  689. mindspore/ops/_op_impl/aicpu/meshgrid.py +41 -0
  690. mindspore/ops/_op_impl/aicpu/minimum_grad_grad.py +40 -0
  691. mindspore/ops/_op_impl/aicpu/mirror_pad.py +50 -0
  692. mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +48 -0
  693. mindspore/ops/_op_impl/aicpu/mul.py +43 -0
  694. mindspore/ops/_op_impl/aicpu/mul_no_nan.py +42 -0
  695. mindspore/ops/_op_impl/aicpu/multi_margin_loss.py +37 -0
  696. mindspore/ops/_op_impl/aicpu/multi_margin_loss_grad.py +41 -0
  697. mindspore/ops/_op_impl/aicpu/multilabel_margin_loss_grad.py +37 -0
  698. mindspore/ops/_op_impl/aicpu/multinomial.py +47 -0
  699. mindspore/ops/_op_impl/aicpu/multinomial_with_replacement.py +35 -0
  700. mindspore/ops/_op_impl/aicpu/mvlgamma.py +32 -0
  701. mindspore/ops/_op_impl/aicpu/mvlgamma_grad.py +33 -0
  702. mindspore/ops/_op_impl/aicpu/nan_to_num.py +34 -0
  703. mindspore/ops/_op_impl/aicpu/neg.py +36 -0
  704. mindspore/ops/_op_impl/aicpu/nextafter.py +32 -0
  705. mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
  706. mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
  707. mindspore/ops/_op_impl/aicpu/no_repeat_ngram.py +34 -0
  708. mindspore/ops/_op_impl/aicpu/non_deterministic_ints.py +33 -0
  709. mindspore/ops/_op_impl/aicpu/non_max_suppression.py +36 -0
  710. mindspore/ops/_op_impl/aicpu/non_max_suppression_with_overlaps.py +35 -0
  711. mindspore/ops/_op_impl/aicpu/non_zero.py +43 -0
  712. mindspore/ops/_op_impl/aicpu/not_equal.py +39 -0
  713. mindspore/ops/_op_impl/aicpu/nth_element.py +39 -0
  714. mindspore/ops/_op_impl/aicpu/nuclear_norm.py +33 -0
  715. mindspore/ops/_op_impl/aicpu/one_hot.py +116 -0
  716. mindspore/ops/_op_impl/aicpu/ones_like.py +39 -0
  717. mindspore/ops/_op_impl/aicpu/orgqr.py +34 -0
  718. mindspore/ops/_op_impl/aicpu/pad_and_shift.py +33 -0
  719. mindspore/ops/_op_impl/aicpu/pad_v3.py +61 -0
  720. mindspore/ops/_op_impl/aicpu/pad_v3_grad.py +59 -0
  721. mindspore/ops/_op_impl/aicpu/padding.py +41 -0
  722. mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +54 -0
  723. mindspore/ops/_op_impl/aicpu/pdist_grad.py +33 -0
  724. mindspore/ops/_op_impl/aicpu/poisson.py +37 -0
  725. mindspore/ops/_op_impl/aicpu/polar.py +32 -0
  726. mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
  727. mindspore/ops/_op_impl/aicpu/pow.py +39 -0
  728. mindspore/ops/_op_impl/aicpu/print_tensor.py +39 -0
  729. mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +113 -0
  730. mindspore/ops/_op_impl/aicpu/qr.py +36 -0
  731. mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
  732. mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
  733. mindspore/ops/_op_impl/aicpu/ragged_range.py +49 -0
  734. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
  735. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_tensor.py +74 -0
  736. mindspore/ops/_op_impl/aicpu/random_categorical.py +68 -0
  737. mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +36 -0
  738. mindspore/ops/_op_impl/aicpu/random_gamma.py +38 -0
  739. mindspore/ops/_op_impl/aicpu/random_poisson.py +134 -0
  740. mindspore/ops/_op_impl/aicpu/random_shuffle.py +47 -0
  741. mindspore/ops/_op_impl/aicpu/randperm.py +38 -0
  742. mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
  743. mindspore/ops/_op_impl/aicpu/range.py +36 -0
  744. mindspore/ops/_op_impl/aicpu/range_v2.py +35 -0
  745. mindspore/ops/_op_impl/aicpu/real.py +31 -0
  746. mindspore/ops/_op_impl/aicpu/real_div.py +40 -0
  747. mindspore/ops/_op_impl/aicpu/reciprocal.py +34 -0
  748. mindspore/ops/_op_impl/aicpu/reciprocal_grad.py +35 -0
  749. mindspore/ops/_op_impl/aicpu/reduce_mean.py +57 -0
  750. mindspore/ops/_op_impl/aicpu/reduce_prod.py +57 -0
  751. mindspore/ops/_op_impl/aicpu/reduce_sum.py +57 -0
  752. mindspore/ops/_op_impl/aicpu/relu_grad_v3.py +41 -0
  753. mindspore/ops/_op_impl/aicpu/relu_v3.py +38 -0
  754. mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +96 -0
  755. mindspore/ops/_op_impl/aicpu/reshape.py +42 -0
  756. mindspore/ops/_op_impl/aicpu/resize_area.py +40 -0
  757. mindspore/ops/_op_impl/aicpu/resize_bicubic.py +20 -0
  758. mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +19 -0
  759. mindspore/ops/_op_impl/aicpu/resize_bilinear.py +32 -0
  760. mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py +32 -0
  761. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2.py +36 -0
  762. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2_grad.py +35 -0
  763. mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
  764. mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
  765. mindspore/ops/_op_impl/aicpu/reverse_sequence.py +55 -0
  766. mindspore/ops/_op_impl/aicpu/reversev2.py +54 -0
  767. mindspore/ops/_op_impl/aicpu/rgb_to_hsv.py +32 -0
  768. mindspore/ops/_op_impl/aicpu/right_shift.py +38 -0
  769. mindspore/ops/_op_impl/aicpu/rnnt_loss.py +35 -0
  770. mindspore/ops/_op_impl/aicpu/round.py +34 -0
  771. mindspore/ops/_op_impl/aicpu/rsqrt.py +33 -0
  772. mindspore/ops/_op_impl/aicpu/rsqrt_grad.py +36 -0
  773. mindspore/ops/_op_impl/aicpu/sample_distorted_bounding_box_v2.py +49 -0
  774. mindspore/ops/_op_impl/aicpu/scale_and_translate.py +52 -0
  775. mindspore/ops/_op_impl/aicpu/scale_and_translate_grad.py +36 -0
  776. mindspore/ops/_op_impl/aicpu/scatter.py +79 -0
  777. mindspore/ops/_op_impl/aicpu/scatter_add_with_axis.py +53 -0
  778. mindspore/ops/_op_impl/aicpu/scatter_elements.py +39 -0
  779. mindspore/ops/_op_impl/aicpu/scatter_nd.py +59 -0
  780. mindspore/ops/_op_impl/aicpu/scatter_nd_max.py +54 -0
  781. mindspore/ops/_op_impl/aicpu/scatter_nd_min.py +54 -0
  782. mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +59 -0
  783. mindspore/ops/_op_impl/aicpu/search_sorted.py +44 -0
  784. mindspore/ops/_op_impl/aicpu/segment_max.py +52 -0
  785. mindspore/ops/_op_impl/aicpu/segment_mean.py +56 -0
  786. mindspore/ops/_op_impl/aicpu/segment_min.py +52 -0
  787. mindspore/ops/_op_impl/aicpu/segment_prod.py +56 -0
  788. mindspore/ops/_op_impl/aicpu/segment_sum.py +56 -0
  789. mindspore/ops/_op_impl/aicpu/select.py +45 -0
  790. mindspore/ops/_op_impl/aicpu/self_adjoint_eig.py +34 -0
  791. mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
  792. mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
  793. mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
  794. mindspore/ops/_op_impl/aicpu/sequence_concat.py +40 -0
  795. mindspore/ops/_op_impl/aicpu/sequence_stack.py +40 -0
  796. mindspore/ops/_op_impl/aicpu/set_size.py +38 -0
  797. mindspore/ops/_op_impl/aicpu/sign.py +36 -0
  798. mindspore/ops/_op_impl/aicpu/sin.py +34 -0
  799. mindspore/ops/_op_impl/aicpu/sinc.py +43 -0
  800. mindspore/ops/_op_impl/aicpu/sinh.py +34 -0
  801. mindspore/ops/_op_impl/aicpu/slice.py +59 -0
  802. mindspore/ops/_op_impl/aicpu/slice_grad.py +76 -0
  803. mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
  804. mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
  805. mindspore/ops/_op_impl/aicpu/sort.py +39 -0
  806. mindspore/ops/_op_impl/aicpu/space_to_depth.py +44 -0
  807. mindspore/ops/_op_impl/aicpu/sparse_addmm.py +87 -0
  808. mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +80 -0
  809. mindspore/ops/_op_impl/aicpu/sparse_apply_centered_rms_prop.py +105 -0
  810. mindspore/ops/_op_impl/aicpu/sparse_apply_momentum.py +80 -0
  811. mindspore/ops/_op_impl/aicpu/sparse_apply_proximal_gradient_descent.py +79 -0
  812. mindspore/ops/_op_impl/aicpu/sparse_concat.py +59 -0
  813. mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
  814. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_add.py +58 -0
  815. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_div.py +58 -0
  816. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_mul.py +58 -0
  817. mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows.py +63 -0
  818. mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows_grad.py +45 -0
  819. mindspore/ops/_op_impl/aicpu/sparse_matrix_mat_mul.py +56 -0
  820. mindspore/ops/_op_impl/aicpu/sparse_matrix_nnz.py +81 -0
  821. mindspore/ops/_op_impl/aicpu/sparse_matrix_transpose.py +116 -0
  822. mindspore/ops/_op_impl/aicpu/sparse_reorder.py +56 -0
  823. mindspore/ops/_op_impl/aicpu/sparse_reshape.py +34 -0
  824. mindspore/ops/_op_impl/aicpu/sparse_segment_mean_grad.py +36 -0
  825. mindspore/ops/_op_impl/aicpu/sparse_segment_mean_with_num_segments.py +44 -0
  826. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n.py +43 -0
  827. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n_grad.py +38 -0
  828. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n_with_num_segments.py +44 -0
  829. mindspore/ops/_op_impl/aicpu/sparse_segment_sum.py +49 -0
  830. mindspore/ops/_op_impl/aicpu/sparse_segment_sum_with_num_segments.py +68 -0
  831. mindspore/ops/_op_impl/aicpu/sparse_slice.py +63 -0
  832. mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +61 -0
  833. mindspore/ops/_op_impl/aicpu/sparse_softmax.py +33 -0
  834. mindspore/ops/_op_impl/aicpu/sparse_softmax_cross_entropy_with_logits_v2.py +35 -0
  835. mindspore/ops/_op_impl/aicpu/sparse_sparse_maximum.py +53 -0
  836. mindspore/ops/_op_impl/aicpu/sparse_sparse_minimum.py +53 -0
  837. mindspore/ops/_op_impl/aicpu/sparse_tensor_dense_add.py +84 -0
  838. mindspore/ops/_op_impl/aicpu/sparse_tensor_dense_mat_mul.py +190 -0
  839. mindspore/ops/_op_impl/aicpu/sparse_tensor_to_csr_sparse_matrix.py +51 -0
  840. mindspore/ops/_op_impl/aicpu/sparse_to_dense_v2.py +73 -0
  841. mindspore/ops/_op_impl/aicpu/split.py +45 -0
  842. mindspore/ops/_op_impl/aicpu/sqrt.py +34 -0
  843. mindspore/ops/_op_impl/aicpu/sqrt_grad.py +35 -0
  844. mindspore/ops/_op_impl/aicpu/square.py +35 -0
  845. mindspore/ops/_op_impl/aicpu/squared_difference.py +37 -0
  846. mindspore/ops/_op_impl/aicpu/squeeze.py +42 -0
  847. mindspore/ops/_op_impl/aicpu/sspaddmm.py +97 -0
  848. mindspore/ops/_op_impl/aicpu/stack.py +45 -0
  849. mindspore/ops/_op_impl/aicpu/stack_push_pop.py +87 -0
  850. mindspore/ops/_op_impl/aicpu/standard_laplace.py +34 -0
  851. mindspore/ops/_op_impl/aicpu/standard_normal.py +34 -0
  852. mindspore/ops/_op_impl/aicpu/stateless_dropout_genmask.py +37 -0
  853. mindspore/ops/_op_impl/aicpu/stft.py +70 -0
  854. mindspore/ops/_op_impl/aicpu/strided_slice.py +43 -0
  855. mindspore/ops/_op_impl/aicpu/strided_slice_grad.py +50 -0
  856. mindspore/ops/_op_impl/aicpu/sub.py +41 -0
  857. mindspore/ops/_op_impl/aicpu/sub_and_filter.py +36 -0
  858. mindspore/ops/_op_impl/aicpu/tan.py +34 -0
  859. mindspore/ops/_op_impl/aicpu/tanh.py +34 -0
  860. mindspore/ops/_op_impl/aicpu/tanh_grad.py +35 -0
  861. mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
  862. mindspore/ops/_op_impl/aicpu/tile.py +56 -0
  863. mindspore/ops/_op_impl/aicpu/topk.py +34 -0
  864. mindspore/ops/_op_impl/aicpu/trace.py +40 -0
  865. mindspore/ops/_op_impl/aicpu/tracegrad.py +41 -0
  866. mindspore/ops/_op_impl/aicpu/trans_data.py +35 -0
  867. mindspore/ops/_op_impl/aicpu/transpose.py +58 -0
  868. mindspore/ops/_op_impl/aicpu/tridiagonal_matmul.py +42 -0
  869. mindspore/ops/_op_impl/aicpu/tridiagonal_solve.py +35 -0
  870. mindspore/ops/_op_impl/aicpu/tril.py +42 -0
  871. mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
  872. mindspore/ops/_op_impl/aicpu/triplet_margin_loss.py +62 -0
  873. mindspore/ops/_op_impl/aicpu/triu.py +43 -0
  874. mindspore/ops/_op_impl/aicpu/triu_indices.py +34 -0
  875. mindspore/ops/_op_impl/aicpu/truncated_normal.py +39 -0
  876. mindspore/ops/_op_impl/aicpu/uniform.py +36 -0
  877. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +41 -0
  878. mindspore/ops/_op_impl/aicpu/uniform_int.py +36 -0
  879. mindspore/ops/_op_impl/aicpu/uniform_real.py +33 -0
  880. mindspore/ops/_op_impl/aicpu/unique.py +31 -0
  881. mindspore/ops/_op_impl/aicpu/unique_consecutive.py +47 -0
  882. mindspore/ops/_op_impl/aicpu/unique_with_pad.py +32 -0
  883. mindspore/ops/_op_impl/aicpu/unravel_index.py +32 -0
  884. mindspore/ops/_op_impl/aicpu/unsorted_segment_prod.py +53 -0
  885. mindspore/ops/_op_impl/aicpu/unsorted_segment_sum.py +57 -0
  886. mindspore/ops/_op_impl/aicpu/unstack.py +45 -0
  887. mindspore/ops/_op_impl/aicpu/update_cache.py +44 -0
  888. mindspore/ops/_op_impl/aicpu/upper_bound.py +47 -0
  889. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d.py +42 -0
  890. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d_grad.py +49 -0
  891. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d.py +40 -0
  892. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d_grad.py +50 -0
  893. mindspore/ops/_op_impl/aicpu/xdivy.py +35 -0
  894. mindspore/ops/_op_impl/aicpu/xlogy.py +33 -0
  895. mindspore/ops/_op_impl/aicpu/zeros_like.py +42 -0
  896. mindspore/ops/_op_impl/aicpu/zeta.py +31 -0
  897. mindspore/ops/_op_impl/akg/__init__.py +19 -0
  898. mindspore/ops/_op_impl/akg/ascend/__init__.py +48 -0
  899. mindspore/ops/_op_impl/akg/ascend/abs.py +35 -0
  900. mindspore/ops/_op_impl/akg/ascend/add.py +42 -0
  901. mindspore/ops/_op_impl/akg/ascend/add_n.py +37 -0
  902. mindspore/ops/_op_impl/akg/ascend/batchmatmul.py +33 -0
  903. mindspore/ops/_op_impl/akg/ascend/cast.py +46 -0
  904. mindspore/ops/_op_impl/akg/ascend/equal.py +35 -0
  905. mindspore/ops/_op_impl/akg/ascend/exp.py +35 -0
  906. mindspore/ops/_op_impl/akg/ascend/expand_dims.py +33 -0
  907. mindspore/ops/_op_impl/akg/ascend/greater.py +34 -0
  908. mindspore/ops/_op_impl/akg/ascend/greater_equal.py +35 -0
  909. mindspore/ops/_op_impl/akg/ascend/less.py +31 -0
  910. mindspore/ops/_op_impl/akg/ascend/less_equal.py +35 -0
  911. mindspore/ops/_op_impl/akg/ascend/load_im2col.py +33 -0
  912. mindspore/ops/_op_impl/akg/ascend/log.py +34 -0
  913. mindspore/ops/_op_impl/akg/ascend/maximum.py +36 -0
  914. mindspore/ops/_op_impl/akg/ascend/minimum.py +39 -0
  915. mindspore/ops/_op_impl/akg/ascend/mul.py +41 -0
  916. mindspore/ops/_op_impl/akg/ascend/neg.py +37 -0
  917. mindspore/ops/_op_impl/akg/ascend/pow.py +35 -0
  918. mindspore/ops/_op_impl/akg/ascend/prod_force_se_a.py +33 -0
  919. mindspore/ops/_op_impl/akg/ascend/real_div.py +36 -0
  920. mindspore/ops/_op_impl/akg/ascend/reciprocal.py +32 -0
  921. mindspore/ops/_op_impl/akg/ascend/reduce_max.py +32 -0
  922. mindspore/ops/_op_impl/akg/ascend/reduce_min.py +32 -0
  923. mindspore/ops/_op_impl/akg/ascend/reduce_sum.py +37 -0
  924. mindspore/ops/_op_impl/akg/ascend/rsqrt.py +35 -0
  925. mindspore/ops/_op_impl/akg/ascend/select.py +37 -0
  926. mindspore/ops/_op_impl/akg/ascend/sqrt.py +35 -0
  927. mindspore/ops/_op_impl/akg/ascend/square.py +35 -0
  928. mindspore/ops/_op_impl/akg/ascend/sub.py +42 -0
  929. mindspore/ops/_op_impl/akg/cpu/__init__.py +23 -0
  930. mindspore/ops/_op_impl/akg/cpu/coo2csr.py +29 -0
  931. mindspore/ops/_op_impl/akg/cpu/csr2coo.py +29 -0
  932. mindspore/ops/_op_impl/akg/cpu/csr_gather.py +33 -0
  933. mindspore/ops/_op_impl/akg/cpu/csr_mm.py +34 -0
  934. mindspore/ops/_op_impl/akg/cpu/csr_mul.py +33 -0
  935. mindspore/ops/_op_impl/akg/cpu/csr_mv.py +33 -0
  936. mindspore/ops/_op_impl/akg/cpu/csr_reduce_sum.py +31 -0
  937. mindspore/ops/_op_impl/akg/gpu/__init__.py +24 -0
  938. mindspore/ops/_op_impl/akg/gpu/coo2csr.py +29 -0
  939. mindspore/ops/_op_impl/akg/gpu/csr2coo.py +29 -0
  940. mindspore/ops/_op_impl/akg/gpu/csr_div.py +36 -0
  941. mindspore/ops/_op_impl/akg/gpu/csr_gather.py +33 -0
  942. mindspore/ops/_op_impl/akg/gpu/csr_mm.py +37 -0
  943. mindspore/ops/_op_impl/akg/gpu/csr_mul.py +36 -0
  944. mindspore/ops/_op_impl/akg/gpu/csr_mv.py +36 -0
  945. mindspore/ops/_op_impl/akg/gpu/csr_reduce_sum.py +33 -0
  946. mindspore/ops/_op_impl/cpu/__init__.py +78 -0
  947. mindspore/ops/_op_impl/cpu/adam.py +49 -0
  948. mindspore/ops/_op_impl/cpu/adam_weight_decay.py +47 -0
  949. mindspore/ops/_op_impl/cpu/arg_max.py +30 -0
  950. mindspore/ops/_op_impl/cpu/arg_max_with_value.py +31 -0
  951. mindspore/ops/_op_impl/cpu/arg_min_with_value.py +31 -0
  952. mindspore/ops/_op_impl/cpu/buffer_append.py +28 -0
  953. mindspore/ops/_op_impl/cpu/buffer_get.py +28 -0
  954. mindspore/ops/_op_impl/cpu/buffer_sample.py +28 -0
  955. mindspore/ops/_op_impl/cpu/cast.py +171 -0
  956. mindspore/ops/_op_impl/cpu/concat_offset.py +38 -0
  957. mindspore/ops/_op_impl/cpu/conv2d.py +30 -0
  958. mindspore/ops/_op_impl/cpu/conv3d.py +30 -0
  959. mindspore/ops/_op_impl/cpu/div.py +32 -0
  960. mindspore/ops/_op_impl/cpu/dropout.py +31 -0
  961. mindspore/ops/_op_impl/cpu/dropout_grad.py +30 -0
  962. mindspore/ops/_op_impl/cpu/dynamic_shape.py +42 -0
  963. mindspore/ops/_op_impl/cpu/dynamic_stitch.py +41 -0
  964. mindspore/ops/_op_impl/cpu/equal_count.py +30 -0
  965. mindspore/ops/_op_impl/cpu/gather_d.py +49 -0
  966. mindspore/ops/_op_impl/cpu/gather_d_grad.py +38 -0
  967. mindspore/ops/_op_impl/cpu/gather_d_grad_v2.py +40 -0
  968. mindspore/ops/_op_impl/cpu/gather_v2.py +40 -0
  969. mindspore/ops/_op_impl/cpu/hsigmoid.py +33 -0
  970. mindspore/ops/_op_impl/cpu/hsigmoid_grad.py +34 -0
  971. mindspore/ops/_op_impl/cpu/hswish.py +32 -0
  972. mindspore/ops/_op_impl/cpu/hswish_grad.py +33 -0
  973. mindspore/ops/_op_impl/cpu/identity_n.py +40 -0
  974. mindspore/ops/_op_impl/cpu/is_finite.py +39 -0
  975. mindspore/ops/_op_impl/cpu/l2loss.py +30 -0
  976. mindspore/ops/_op_impl/cpu/layer_norm.py +36 -0
  977. mindspore/ops/_op_impl/cpu/layer_norm_grad.py +38 -0
  978. mindspore/ops/_op_impl/cpu/maximum.py +35 -0
  979. mindspore/ops/_op_impl/cpu/maximum_grad.py +47 -0
  980. mindspore/ops/_op_impl/cpu/minimum.py +40 -0
  981. mindspore/ops/_op_impl/cpu/minimum_grad.py +51 -0
  982. mindspore/ops/_op_impl/cpu/mirror_pad.py +36 -0
  983. mindspore/ops/_op_impl/cpu/mirror_pad_grad.py +36 -0
  984. mindspore/ops/_op_impl/cpu/mul.py +32 -0
  985. mindspore/ops/_op_impl/cpu/one_hot.py +31 -0
  986. mindspore/ops/_op_impl/cpu/pad.py +32 -0
  987. mindspore/ops/_op_impl/cpu/pow.py +32 -0
  988. mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +42 -0
  989. mindspore/ops/_op_impl/cpu/pyexecute.py +29 -0
  990. mindspore/ops/_op_impl/cpu/pyfunc.py +29 -0
  991. mindspore/ops/_op_impl/cpu/range.py +34 -0
  992. mindspore/ops/_op_impl/cpu/real_div.py +33 -0
  993. mindspore/ops/_op_impl/cpu/reduce_all.py +29 -0
  994. mindspore/ops/_op_impl/cpu/reduce_any.py +29 -0
  995. mindspore/ops/_op_impl/cpu/reduce_max.py +32 -0
  996. mindspore/ops/_op_impl/cpu/reduce_mean.py +40 -0
  997. mindspore/ops/_op_impl/cpu/reduce_min.py +32 -0
  998. mindspore/ops/_op_impl/cpu/reduce_prod.py +40 -0
  999. mindspore/ops/_op_impl/cpu/reduce_std.py +31 -0
  1000. mindspore/ops/_op_impl/cpu/reduce_sum.py +41 -0
  1001. mindspore/ops/_op_impl/cpu/space_to_batch_nd.py +38 -0
  1002. mindspore/ops/_op_impl/cpu/sparse_slice.py +62 -0
  1003. mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +60 -0
  1004. mindspore/ops/_op_impl/cpu/split.py +34 -0
  1005. mindspore/ops/_op_impl/cpu/sspaddmm.py +95 -0
  1006. mindspore/ops/_op_impl/cpu/stack.py +38 -0
  1007. mindspore/ops/_op_impl/cpu/sub.py +32 -0
  1008. mindspore/ops/_op_impl/cpu/tensor_copy_slices.py +41 -0
  1009. mindspore/ops/_op_impl/cpu/tile.py +37 -0
  1010. mindspore/ops/_op_impl/cpu/top_k.py +31 -0
  1011. mindspore/ops/_op_impl/cpu/transpose.py +39 -0
  1012. mindspore/ops/_primitive_cache.py +90 -0
  1013. mindspore/ops/_register_for_op.py +73 -0
  1014. mindspore/ops/_utils/__init__.py +20 -0
  1015. mindspore/ops/_utils/utils.py +147 -0
  1016. mindspore/ops/_vmap/__init__.py +25 -0
  1017. mindspore/ops/_vmap/vmap_array_ops.py +2149 -0
  1018. mindspore/ops/_vmap/vmap_base.py +533 -0
  1019. mindspore/ops/_vmap/vmap_convolution_ops.py +441 -0
  1020. mindspore/ops/_vmap/vmap_debug_ops.py +50 -0
  1021. mindspore/ops/_vmap/vmap_grad_math_ops.py +274 -0
  1022. mindspore/ops/_vmap/vmap_grad_nn_ops.py +806 -0
  1023. mindspore/ops/_vmap/vmap_image_ops.py +194 -0
  1024. mindspore/ops/_vmap/vmap_math_ops.py +993 -0
  1025. mindspore/ops/_vmap/vmap_nn_ops.py +2250 -0
  1026. mindspore/ops/_vmap/vmap_other_ops.py +105 -0
  1027. mindspore/ops/_vmap/vmap_random_ops.py +122 -0
  1028. mindspore/ops/_vmap/vmap_sparse_ops.py +89 -0
  1029. mindspore/ops/auto_generate/__init__.py +31 -0
  1030. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +309 -0
  1031. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +252 -0
  1032. mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
  1033. mindspore/ops/auto_generate/gen_extend_func.py +1701 -0
  1034. mindspore/ops/auto_generate/gen_ops_def.py +8482 -0
  1035. mindspore/ops/auto_generate/gen_ops_prim.py +16704 -0
  1036. mindspore/ops/auto_generate/pyboost_inner_prim.py +549 -0
  1037. mindspore/ops/composite/__init__.py +71 -0
  1038. mindspore/ops/composite/base.py +1318 -0
  1039. mindspore/ops/composite/env_ops.py +41 -0
  1040. mindspore/ops/composite/math_ops.py +125 -0
  1041. mindspore/ops/composite/multitype_ops/__init__.py +77 -0
  1042. mindspore/ops/composite/multitype_ops/_compile_utils.py +1459 -0
  1043. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +897 -0
  1044. mindspore/ops/composite/multitype_ops/add_impl.py +606 -0
  1045. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +56 -0
  1046. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +56 -0
  1047. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +56 -0
  1048. mindspore/ops/composite/multitype_ops/div_impl.py +189 -0
  1049. mindspore/ops/composite/multitype_ops/equal_impl.py +335 -0
  1050. mindspore/ops/composite/multitype_ops/floordiv_impl.py +88 -0
  1051. mindspore/ops/composite/multitype_ops/getitem_impl.py +400 -0
  1052. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +109 -0
  1053. mindspore/ops/composite/multitype_ops/greater_impl.py +110 -0
  1054. mindspore/ops/composite/multitype_ops/in_impl.py +196 -0
  1055. mindspore/ops/composite/multitype_ops/left_shift_impl.py +37 -0
  1056. mindspore/ops/composite/multitype_ops/less_equal_impl.py +111 -0
  1057. mindspore/ops/composite/multitype_ops/less_impl.py +112 -0
  1058. mindspore/ops/composite/multitype_ops/logic_not_impl.py +113 -0
  1059. mindspore/ops/composite/multitype_ops/logical_and_impl.py +60 -0
  1060. mindspore/ops/composite/multitype_ops/logical_or_impl.py +61 -0
  1061. mindspore/ops/composite/multitype_ops/mod_impl.py +86 -0
  1062. mindspore/ops/composite/multitype_ops/mul_impl.py +294 -0
  1063. mindspore/ops/composite/multitype_ops/negative_impl.py +79 -0
  1064. mindspore/ops/composite/multitype_ops/not_equal_impl.py +290 -0
  1065. mindspore/ops/composite/multitype_ops/not_in_impl.py +196 -0
  1066. mindspore/ops/composite/multitype_ops/ones_like_impl.py +96 -0
  1067. mindspore/ops/composite/multitype_ops/pow_impl.py +87 -0
  1068. mindspore/ops/composite/multitype_ops/right_shift_impl.py +37 -0
  1069. mindspore/ops/composite/multitype_ops/setitem_impl.py +884 -0
  1070. mindspore/ops/composite/multitype_ops/sub_impl.py +116 -0
  1071. mindspore/ops/composite/multitype_ops/uadd_impl.py +29 -0
  1072. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +228 -0
  1073. mindspore/ops/deprecated.py +315 -0
  1074. mindspore/ops/function/__init__.py +782 -0
  1075. mindspore/ops/function/array_func.py +7226 -0
  1076. mindspore/ops/function/clip_func.py +384 -0
  1077. mindspore/ops/function/debug_func.py +181 -0
  1078. mindspore/ops/function/fft_func.py +44 -0
  1079. mindspore/ops/function/grad/__init__.py +34 -0
  1080. mindspore/ops/function/grad/grad_func.py +1425 -0
  1081. mindspore/ops/function/image_func.py +292 -0
  1082. mindspore/ops/function/linalg_func.py +416 -0
  1083. mindspore/ops/function/math_func.py +12228 -0
  1084. mindspore/ops/function/nn_func.py +8609 -0
  1085. mindspore/ops/function/other_func.py +115 -0
  1086. mindspore/ops/function/parameter_func.py +134 -0
  1087. mindspore/ops/function/random_func.py +1715 -0
  1088. mindspore/ops/function/reshard_func.py +104 -0
  1089. mindspore/ops/function/sparse_func.py +884 -0
  1090. mindspore/ops/function/sparse_unary_func.py +2422 -0
  1091. mindspore/ops/function/spectral_func.py +150 -0
  1092. mindspore/ops/function/vmap_func.py +117 -0
  1093. mindspore/ops/functional.py +464 -0
  1094. mindspore/ops/op_info_register.py +1572 -0
  1095. mindspore/ops/operations/__init__.py +722 -0
  1096. mindspore/ops/operations/_csr_ops.py +403 -0
  1097. mindspore/ops/operations/_custom_grad.py +181 -0
  1098. mindspore/ops/operations/_embedding_cache_ops.py +307 -0
  1099. mindspore/ops/operations/_grad_ops.py +2978 -0
  1100. mindspore/ops/operations/_infer_ops.py +19 -0
  1101. mindspore/ops/operations/_inner_ops.py +2544 -0
  1102. mindspore/ops/operations/_map_tensor_ops.py +112 -0
  1103. mindspore/ops/operations/_ms_kernel.py +601 -0
  1104. mindspore/ops/operations/_ocr_ops.py +379 -0
  1105. mindspore/ops/operations/_opaque_predicate_registry.py +41 -0
  1106. mindspore/ops/operations/_pyfunc_registry.py +58 -0
  1107. mindspore/ops/operations/_quant_ops.py +1844 -0
  1108. mindspore/ops/operations/_rl_inner_ops.py +1231 -0
  1109. mindspore/ops/operations/_scalar_ops.py +106 -0
  1110. mindspore/ops/operations/_sequence_ops.py +1155 -0
  1111. mindspore/ops/operations/_sparse_grad_ops.py +56 -0
  1112. mindspore/ops/operations/_tensor_array.py +359 -0
  1113. mindspore/ops/operations/_thor_ops.py +807 -0
  1114. mindspore/ops/operations/array_ops.py +6124 -0
  1115. mindspore/ops/operations/comm_ops.py +1985 -0
  1116. mindspore/ops/operations/control_ops.py +127 -0
  1117. mindspore/ops/operations/custom_ops.py +1129 -0
  1118. mindspore/ops/operations/debug_ops.py +678 -0
  1119. mindspore/ops/operations/image_ops.py +1041 -0
  1120. mindspore/ops/operations/inner_ops.py +697 -0
  1121. mindspore/ops/operations/linalg_ops.py +95 -0
  1122. mindspore/ops/operations/manually_defined/__init__.py +24 -0
  1123. mindspore/ops/operations/manually_defined/_inner.py +73 -0
  1124. mindspore/ops/operations/manually_defined/ops_def.py +2271 -0
  1125. mindspore/ops/operations/math_ops.py +5095 -0
  1126. mindspore/ops/operations/nn_ops.py +9575 -0
  1127. mindspore/ops/operations/other_ops.py +874 -0
  1128. mindspore/ops/operations/random_ops.py +1288 -0
  1129. mindspore/ops/operations/reshard_ops.py +53 -0
  1130. mindspore/ops/operations/rl_ops.py +288 -0
  1131. mindspore/ops/operations/sparse_ops.py +2753 -0
  1132. mindspore/ops/operations/spectral_ops.py +111 -0
  1133. mindspore/ops/primitive.py +1046 -0
  1134. mindspore/ops/signature.py +54 -0
  1135. mindspore/ops/vm_impl_registry.py +91 -0
  1136. mindspore/ops_generate/__init__.py +27 -0
  1137. mindspore/ops_generate/arg_dtype_cast.py +252 -0
  1138. mindspore/ops_generate/arg_handler.py +197 -0
  1139. mindspore/ops_generate/gen_aclnn_implement.py +263 -0
  1140. mindspore/ops_generate/gen_constants.py +36 -0
  1141. mindspore/ops_generate/gen_ops.py +1099 -0
  1142. mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
  1143. mindspore/ops_generate/gen_pyboost_func.py +1052 -0
  1144. mindspore/ops_generate/gen_utils.py +209 -0
  1145. mindspore/ops_generate/op_proto.py +145 -0
  1146. mindspore/ops_generate/pyboost_utils.py +367 -0
  1147. mindspore/ops_generate/template.py +261 -0
  1148. mindspore/parallel/__init__.py +30 -0
  1149. mindspore/parallel/_auto_parallel_context.py +1486 -0
  1150. mindspore/parallel/_cell_wrapper.py +174 -0
  1151. mindspore/parallel/_cost_model_context.py +700 -0
  1152. mindspore/parallel/_dp_allreduce_fusion.py +159 -0
  1153. mindspore/parallel/_offload_context.py +275 -0
  1154. mindspore/parallel/_parallel_serialization.py +561 -0
  1155. mindspore/parallel/_ps_context.py +242 -0
  1156. mindspore/parallel/_recovery_context.py +110 -0
  1157. mindspore/parallel/_tensor.py +730 -0
  1158. mindspore/parallel/_transformer/__init__.py +35 -0
  1159. mindspore/parallel/_transformer/layers.py +765 -0
  1160. mindspore/parallel/_transformer/loss.py +251 -0
  1161. mindspore/parallel/_transformer/moe.py +693 -0
  1162. mindspore/parallel/_transformer/op_parallel_config.py +222 -0
  1163. mindspore/parallel/_transformer/transformer.py +3119 -0
  1164. mindspore/parallel/_utils.py +612 -0
  1165. mindspore/parallel/algo_parameter_config.py +400 -0
  1166. mindspore/parallel/checkpoint_transform.py +650 -0
  1167. mindspore/parallel/cluster/__init__.py +15 -0
  1168. mindspore/parallel/cluster/process_entity/__init__.py +18 -0
  1169. mindspore/parallel/cluster/process_entity/_api.py +352 -0
  1170. mindspore/parallel/cluster/process_entity/_utils.py +101 -0
  1171. mindspore/parallel/cluster/run.py +136 -0
  1172. mindspore/parallel/mpi/__init__.py +14 -0
  1173. mindspore/parallel/mpi/_mpi_config.py +116 -0
  1174. mindspore/parallel/parameter_broadcast.py +151 -0
  1175. mindspore/parallel/shard.py +481 -0
  1176. mindspore/parallel/transform_safetensors.py +993 -0
  1177. mindspore/profiler/__init__.py +28 -0
  1178. mindspore/profiler/common/__init__.py +14 -0
  1179. mindspore/profiler/common/constant.py +29 -0
  1180. mindspore/profiler/common/exceptions/__init__.py +14 -0
  1181. mindspore/profiler/common/exceptions/error_code.py +83 -0
  1182. mindspore/profiler/common/exceptions/exceptions.py +286 -0
  1183. mindspore/profiler/common/process_pool.py +41 -0
  1184. mindspore/profiler/common/registry.py +47 -0
  1185. mindspore/profiler/common/singleton.py +28 -0
  1186. mindspore/profiler/common/struct_type.py +118 -0
  1187. mindspore/profiler/common/util.py +472 -0
  1188. mindspore/profiler/common/validator/__init__.py +14 -0
  1189. mindspore/profiler/common/validator/validate_path.py +84 -0
  1190. mindspore/profiler/dynamic_profiler.py +694 -0
  1191. mindspore/profiler/envprofiling.py +254 -0
  1192. mindspore/profiler/parser/__init__.py +14 -0
  1193. mindspore/profiler/parser/aicpu_data_parser.py +272 -0
  1194. mindspore/profiler/parser/ascend_analysis/__init__.py +14 -0
  1195. mindspore/profiler/parser/ascend_analysis/constant.py +71 -0
  1196. mindspore/profiler/parser/ascend_analysis/file_manager.py +180 -0
  1197. mindspore/profiler/parser/ascend_analysis/function_event.py +185 -0
  1198. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +136 -0
  1199. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +131 -0
  1200. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +104 -0
  1201. mindspore/profiler/parser/ascend_analysis/path_manager.py +313 -0
  1202. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +123 -0
  1203. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
  1204. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +75 -0
  1205. mindspore/profiler/parser/ascend_cluster_generator.py +116 -0
  1206. mindspore/profiler/parser/ascend_communicate_generator.py +314 -0
  1207. mindspore/profiler/parser/ascend_flops_generator.py +116 -0
  1208. mindspore/profiler/parser/ascend_fpbp_generator.py +82 -0
  1209. mindspore/profiler/parser/ascend_hccl_generator.py +271 -0
  1210. mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
  1211. mindspore/profiler/parser/ascend_memory_generator.py +185 -0
  1212. mindspore/profiler/parser/ascend_msprof_exporter.py +282 -0
  1213. mindspore/profiler/parser/ascend_msprof_generator.py +187 -0
  1214. mindspore/profiler/parser/ascend_op_generator.py +334 -0
  1215. mindspore/profiler/parser/ascend_steptrace_generator.py +94 -0
  1216. mindspore/profiler/parser/ascend_timeline_generator.py +545 -0
  1217. mindspore/profiler/parser/base_timeline_generator.py +483 -0
  1218. mindspore/profiler/parser/container.py +229 -0
  1219. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +697 -0
  1220. mindspore/profiler/parser/flops_parser.py +531 -0
  1221. mindspore/profiler/parser/framework_enum.py +111 -0
  1222. mindspore/profiler/parser/framework_parser.py +464 -0
  1223. mindspore/profiler/parser/framework_struct.py +61 -0
  1224. mindspore/profiler/parser/gpu_analysis/__init__.py +14 -0
  1225. mindspore/profiler/parser/gpu_analysis/function_event.py +44 -0
  1226. mindspore/profiler/parser/gpu_analysis/fwk_file_parser.py +89 -0
  1227. mindspore/profiler/parser/gpu_analysis/profiler_info_parser.py +72 -0
  1228. mindspore/profiler/parser/hccl_parser.py +573 -0
  1229. mindspore/profiler/parser/hwts_log_parser.py +122 -0
  1230. mindspore/profiler/parser/integrator.py +526 -0
  1231. mindspore/profiler/parser/memory_usage_parser.py +277 -0
  1232. mindspore/profiler/parser/minddata_analyzer.py +800 -0
  1233. mindspore/profiler/parser/minddata_parser.py +186 -0
  1234. mindspore/profiler/parser/minddata_pipeline_parser.py +299 -0
  1235. mindspore/profiler/parser/op_intermediate_parser.py +149 -0
  1236. mindspore/profiler/parser/optime_parser.py +250 -0
  1237. mindspore/profiler/parser/profiler_info.py +213 -0
  1238. mindspore/profiler/parser/step_trace_parser.py +666 -0
  1239. mindspore/profiler/profiler.py +153 -0
  1240. mindspore/profiler/profiling.py +1922 -0
  1241. mindspore/rewrite/__init__.py +28 -0
  1242. mindspore/rewrite/api/__init__.py +17 -0
  1243. mindspore/rewrite/api/node.py +519 -0
  1244. mindspore/rewrite/api/node_type.py +53 -0
  1245. mindspore/rewrite/api/pattern_engine.py +490 -0
  1246. mindspore/rewrite/api/scoped_value.py +181 -0
  1247. mindspore/rewrite/api/symbol_tree.py +497 -0
  1248. mindspore/rewrite/ast_helpers/__init__.py +25 -0
  1249. mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
  1250. mindspore/rewrite/ast_helpers/ast_finder.py +404 -0
  1251. mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
  1252. mindspore/rewrite/ast_helpers/ast_modifier.py +605 -0
  1253. mindspore/rewrite/ast_helpers/ast_replacer.py +79 -0
  1254. mindspore/rewrite/common/__init__.py +19 -0
  1255. mindspore/rewrite/common/config.py +24 -0
  1256. mindspore/rewrite/common/error_log.py +39 -0
  1257. mindspore/rewrite/common/event.py +28 -0
  1258. mindspore/rewrite/common/namer.py +271 -0
  1259. mindspore/rewrite/common/namespace.py +118 -0
  1260. mindspore/rewrite/common/observable.py +44 -0
  1261. mindspore/rewrite/common/observer.py +54 -0
  1262. mindspore/rewrite/node/__init__.py +22 -0
  1263. mindspore/rewrite/node/call_function.py +95 -0
  1264. mindspore/rewrite/node/cell_container.py +139 -0
  1265. mindspore/rewrite/node/control_flow.py +113 -0
  1266. mindspore/rewrite/node/node.py +1428 -0
  1267. mindspore/rewrite/node/node_manager.py +283 -0
  1268. mindspore/rewrite/node/node_topological_manager.py +223 -0
  1269. mindspore/rewrite/parsers/__init__.py +29 -0
  1270. mindspore/rewrite/parsers/arguments_parser.py +63 -0
  1271. mindspore/rewrite/parsers/assign_parser.py +852 -0
  1272. mindspore/rewrite/parsers/attribute_parser.py +57 -0
  1273. mindspore/rewrite/parsers/class_def_parser.py +289 -0
  1274. mindspore/rewrite/parsers/constant_parser.py +104 -0
  1275. mindspore/rewrite/parsers/container_parser.py +88 -0
  1276. mindspore/rewrite/parsers/expr_parser.py +55 -0
  1277. mindspore/rewrite/parsers/for_parser.py +61 -0
  1278. mindspore/rewrite/parsers/function_def_parser.py +84 -0
  1279. mindspore/rewrite/parsers/if_parser.py +85 -0
  1280. mindspore/rewrite/parsers/module_parser.py +117 -0
  1281. mindspore/rewrite/parsers/parser.py +43 -0
  1282. mindspore/rewrite/parsers/parser_register.py +86 -0
  1283. mindspore/rewrite/parsers/return_parser.py +37 -0
  1284. mindspore/rewrite/parsers/while_parser.py +59 -0
  1285. mindspore/rewrite/sparsify/__init__.py +0 -0
  1286. mindspore/rewrite/sparsify/sparse_transformer.py +457 -0
  1287. mindspore/rewrite/sparsify/sparsify.py +112 -0
  1288. mindspore/rewrite/sparsify/utils.py +179 -0
  1289. mindspore/rewrite/symbol_tree/__init__.py +20 -0
  1290. mindspore/rewrite/symbol_tree/symbol_tree.py +1819 -0
  1291. mindspore/rewrite/symbol_tree/symbol_tree_builder.py +76 -0
  1292. mindspore/rewrite/symbol_tree/symbol_tree_dumper.py +142 -0
  1293. mindspore/run_check/__init__.py +20 -0
  1294. mindspore/run_check/_check_version.py +507 -0
  1295. mindspore/run_check/run_check.py +66 -0
  1296. mindspore/safeguard/__init__.py +18 -0
  1297. mindspore/safeguard/rewrite_obfuscation.py +875 -0
  1298. mindspore/scipy/__init__.py +18 -0
  1299. mindspore/scipy/fft.py +264 -0
  1300. mindspore/scipy/linalg.py +919 -0
  1301. mindspore/scipy/ops.py +165 -0
  1302. mindspore/scipy/ops_grad.py +115 -0
  1303. mindspore/scipy/ops_wrapper.py +74 -0
  1304. mindspore/scipy/optimize/__init__.py +20 -0
  1305. mindspore/scipy/optimize/_bfgs.py +230 -0
  1306. mindspore/scipy/optimize/_lagrange.py +201 -0
  1307. mindspore/scipy/optimize/_lbfgs.py +146 -0
  1308. mindspore/scipy/optimize/gradient_optimization_algorithm.py +168 -0
  1309. mindspore/scipy/optimize/line_search.py +370 -0
  1310. mindspore/scipy/optimize/linear_sum_assignment.py +78 -0
  1311. mindspore/scipy/optimize/minimize.py +200 -0
  1312. mindspore/scipy/utils.py +156 -0
  1313. mindspore/scipy/utils_const.py +246 -0
  1314. mindspore/train/__init__.py +48 -0
  1315. mindspore/train/_utils.py +465 -0
  1316. mindspore/train/amp.py +935 -0
  1317. mindspore/train/anf_ir_pb2.py +1517 -0
  1318. mindspore/train/callback/__init__.py +44 -0
  1319. mindspore/train/callback/_backup_and_restore.py +117 -0
  1320. mindspore/train/callback/_callback.py +613 -0
  1321. mindspore/train/callback/_checkpoint.py +814 -0
  1322. mindspore/train/callback/_cluster_monitor.py +201 -0
  1323. mindspore/train/callback/_dataset_graph.py +150 -0
  1324. mindspore/train/callback/_early_stop.py +239 -0
  1325. mindspore/train/callback/_flops_collector.py +239 -0
  1326. mindspore/train/callback/_history.py +92 -0
  1327. mindspore/train/callback/_lambda_callback.py +80 -0
  1328. mindspore/train/callback/_landscape.py +1049 -0
  1329. mindspore/train/callback/_loss_monitor.py +107 -0
  1330. mindspore/train/callback/_lr_scheduler_callback.py +76 -0
  1331. mindspore/train/callback/_on_request_exit.py +298 -0
  1332. mindspore/train/callback/_reduce_lr_on_plateau.py +226 -0
  1333. mindspore/train/callback/_summary_collector.py +1184 -0
  1334. mindspore/train/callback/_tft_register.py +352 -0
  1335. mindspore/train/callback/_time_monitor.py +141 -0
  1336. mindspore/train/checkpoint_pb2.py +233 -0
  1337. mindspore/train/data_sink.py +219 -0
  1338. mindspore/train/dataset_helper.py +692 -0
  1339. mindspore/train/lineage_pb2.py +1260 -0
  1340. mindspore/train/loss_scale_manager.py +213 -0
  1341. mindspore/train/memory_profiling_pb2.py +298 -0
  1342. mindspore/train/metrics/__init__.py +175 -0
  1343. mindspore/train/metrics/accuracy.py +133 -0
  1344. mindspore/train/metrics/auc.py +129 -0
  1345. mindspore/train/metrics/bleu_score.py +170 -0
  1346. mindspore/train/metrics/confusion_matrix.py +700 -0
  1347. mindspore/train/metrics/cosine_similarity.py +109 -0
  1348. mindspore/train/metrics/dice.py +116 -0
  1349. mindspore/train/metrics/error.py +175 -0
  1350. mindspore/train/metrics/fbeta.py +167 -0
  1351. mindspore/train/metrics/hausdorff_distance.py +333 -0
  1352. mindspore/train/metrics/loss.py +97 -0
  1353. mindspore/train/metrics/mean_surface_distance.py +189 -0
  1354. mindspore/train/metrics/metric.py +373 -0
  1355. mindspore/train/metrics/occlusion_sensitivity.py +225 -0
  1356. mindspore/train/metrics/perplexity.py +133 -0
  1357. mindspore/train/metrics/precision.py +160 -0
  1358. mindspore/train/metrics/recall.py +159 -0
  1359. mindspore/train/metrics/roc.py +223 -0
  1360. mindspore/train/metrics/root_mean_square_surface_distance.py +191 -0
  1361. mindspore/train/metrics/topk.py +167 -0
  1362. mindspore/train/mind_ir_pb2.py +1908 -0
  1363. mindspore/train/model.py +2252 -0
  1364. mindspore/train/node_strategy_pb2.py +653 -0
  1365. mindspore/train/print_pb2.py +184 -0
  1366. mindspore/train/profiling_parallel_pb2.py +151 -0
  1367. mindspore/train/serialization.py +3325 -0
  1368. mindspore/train/summary/__init__.py +23 -0
  1369. mindspore/train/summary/_lineage_adapter.py +41 -0
  1370. mindspore/train/summary/_summary_adapter.py +496 -0
  1371. mindspore/train/summary/_writer_pool.py +207 -0
  1372. mindspore/train/summary/enums.py +56 -0
  1373. mindspore/train/summary/summary_record.py +581 -0
  1374. mindspore/train/summary/writer.py +167 -0
  1375. mindspore/train/summary_pb2.py +1165 -0
  1376. mindspore/train/train_thor/__init__.py +20 -0
  1377. mindspore/train/train_thor/convert_utils.py +268 -0
  1378. mindspore/train/train_thor/dataset_helper.py +192 -0
  1379. mindspore/train/train_thor/model_thor.py +257 -0
  1380. mindspore/utils/__init__.py +21 -0
  1381. mindspore/utils/utils.py +60 -0
  1382. mindspore/version.py +1 -0
  1383. mindspore-2.4.0.dist-info/METADATA +352 -0
  1384. mindspore-2.4.0.dist-info/RECORD +1387 -0
  1385. mindspore-2.4.0.dist-info/WHEEL +5 -0
  1386. mindspore-2.4.0.dist-info/entry_points.txt +3 -0
  1387. mindspore-2.4.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2129 @@
1
+ /**
2
+ * Copyright 2020-2024 Huawei Technologies Co., Ltd
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_VISION_H_
18
+ #define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_VISION_H_
19
+
20
+ #include <map>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <tuple>
24
+ #include <utility>
25
+ #include <vector>
26
+
27
+ #include "include/api/dual_abi_helper.h"
28
+ #include "include/api/status.h"
29
+ #include "include/dataset/constants.h"
30
+ #include "include/dataset/transforms.h"
31
+ #include "include/dataset/vision_lite.h"
32
+
33
+ namespace mindspore {
34
+ namespace dataset {
35
+ class TensorOperation;
36
+
37
+ // Transform operations for performing computer vision.
38
+ namespace vision {
39
+ /// \brief Apply brightness adjustment on input image.
40
+ class DATASET_API AdjustBrightness final : public TensorTransform {
41
+ public:
42
+ /// \brief Constructor.
43
+ /// \param[in] brightness_factor Adjusts image brightness, non negative real number.
44
+ /// \par Example
45
+ /// \code
46
+ /// /* Define operations */
47
+ /// auto decode_op = vision::Decode();
48
+ /// auto adjust_brightness_op = vision::AdjustBrightness(2.0);
49
+ ///
50
+ /// /* dataset is an instance of Dataset object */
51
+ /// dataset = dataset->Map({decode_op, adjust_brightness_op}, // operations
52
+ /// {"image"}); // input columns
53
+ /// \endcode
54
+ explicit AdjustBrightness(float brightness_factor);
55
+
56
+ /// \brief Destructor.
57
+ ~AdjustBrightness() override = default;
58
+
59
+ protected:
60
+ /// \brief Function to convert TensorTransform object into a TensorOperation object.
61
+ /// \return Shared pointer to TensorOperation object.
62
+ std::shared_ptr<TensorOperation> Parse() override;
63
+
64
+ private:
65
+ struct Data;
66
+ std::shared_ptr<Data> data_;
67
+ };
68
+
69
+ /// \brief Apply contrast adjustment on input image.
70
+ class DATASET_API AdjustContrast final : public TensorTransform {
71
+ public:
72
+ /// \brief Constructor.
73
+ /// \param[in] contrast_factor Adjusts image contrast, non negative real number.
74
+ /// \par Example
75
+ /// \code
76
+ /// /* Define operations */
77
+ /// auto decode_op = vision::Decode();
78
+ /// auto adjust_contrast_op = vision::AdjustContrast(10.0);
79
+ ///
80
+ /// /* dataset is an instance of Dataset object */
81
+ /// dataset = dataset->Map({decode_op, adjust_contrast_op}, // operations
82
+ /// {"image"}); // input columns
83
+ /// \endcode
84
+ explicit AdjustContrast(float contrast_factor);
85
+
86
+ /// \brief Destructor.
87
+ ~AdjustContrast() override = default;
88
+
89
+ protected:
90
+ /// \brief Function to convert TensorTransform object into a TensorOperation object.
91
+ /// \return Shared pointer to TensorOperation object.
92
+ std::shared_ptr<TensorOperation> Parse() override;
93
+
94
+ private:
95
+ struct Data;
96
+ std::shared_ptr<Data> data_;
97
+ };
98
+
99
+ /// \brief AdjustGamma TensorTransform.
100
+ /// \note Apply gamma correction on input image.
101
+ class DATASET_API AdjustGamma final : public TensorTransform {
102
+ public:
103
+ /// \brief Constructor.
104
+ /// \param[in] gamma Non negative real number, which makes the output image pixel value
105
+ /// exponential in relation to the input image pixel value.
106
+ /// \param[in] gain The constant multiplier. Default: 1.0.
107
+ /// \par Example
108
+ /// \code
109
+ /// /* Define operations */
110
+ /// auto decode_op = vision::Decode();
111
+ /// auto adjust_gamma_op = vision::AdjustGamma(10.0);
112
+ ///
113
+ /// /* dataset is an instance of Dataset object */
114
+ /// dataset = dataset->Map({decode_op, adjust_gamma_op}, // operations
115
+ /// {"image"}); // input columns
116
+ /// \endcode
117
+ explicit AdjustGamma(float gamma, float gain = 1.0);
118
+
119
+ /// \brief Destructor.
120
+ ~AdjustGamma() override = default;
121
+
122
+ protected:
123
+ /// \brief Function to convert TensorTransform object into a TensorOperation object.
124
+ /// \return Shared pointer to TensorOperation object.
125
+ std::shared_ptr<TensorOperation> Parse() override;
126
+
127
+ private:
128
+ struct Data;
129
+ std::shared_ptr<Data> data_;
130
+ };
131
+
132
+ /// \note Apply hue adjustment on input image.
133
+ class DATASET_API AdjustHue final : public TensorTransform {
134
+ public:
135
+ /// \brief Constructor.
136
+ /// \param[in] hue_factor How much to shift the hue channel, must be in the interval [-0.5, 0.5].
137
+ /// \par Example
138
+ /// \code
139
+ /// /* Define operations */
140
+ /// auto decode_op = vision::Decode();
141
+ /// auto adjust_hue_op = vision::AdjustHue(0.2);
142
+ ///
143
+ /// /* dataset is an instance of Dataset object */
144
+ /// dataset = dataset->Map({decode_op, adjust_contrast_op}, // operations
145
+ /// {"image"}); // input columns
146
+ /// \endcode
147
+ explicit AdjustHue(float hue_factor);
148
+
149
+ /// \brief Destructor.
150
+ ~AdjustHue() override = default;
151
+
152
+ protected:
153
+ /// \brief Function to convert TensorTransform object into a TensorOperation object.
154
+ /// \return Shared pointer to TensorOperation object.
155
+ std::shared_ptr<TensorOperation> Parse() override;
156
+
157
+ private:
158
+ struct Data;
159
+ std::shared_ptr<Data> data_;
160
+ };
161
+
162
+ /// \brief Adjust the color saturation of the input image.
163
+ class DATASET_API AdjustSaturation final : public TensorTransform {
164
+ public:
165
+ /// \brief Constructor.
166
+ /// \param[in] saturation_factor Adjust image saturation, non negative real number.
167
+ /// \par Example
168
+ /// \code
169
+ /// /* Define operations */
170
+ /// auto decode_op = vision::Decode();
171
+ /// auto adjust_saturation_op = vision::AdjustSaturation(2.0);
172
+ ///
173
+ /// /* dataset is an instance of Dataset object */
174
+ /// dataset = dataset->Map({decode_op, adjust_saturation_op}, // operations
175
+ /// {"image"}); // input columns
176
+ /// \endcode
177
+ explicit AdjustSaturation(float saturation_factor);
178
+
179
+ /// \brief Destructor.
180
+ ~AdjustSaturation() override = default;
181
+
182
+ protected:
183
+ /// \brief Function to convert TensorTransform object into a TensorOperation object.
184
+ /// \return Shared pointer to TensorOperation object.
185
+ std::shared_ptr<TensorOperation> Parse() override;
186
+
187
+ private:
188
+ struct Data;
189
+ std::shared_ptr<Data> data_;
190
+ };
191
+
192
+ /// \brief Apply adjust sharpness on input image. Input image is expected to be in [H, W, C] or [H, W] format.
193
+ class DATASET_API AdjustSharpness final : public TensorTransform {
194
+ public:
195
+ /// \brief Constructor.
196
+ /// \param[in] sharpness_factor How much to adjust the sharpness. Can be any Non negative real number.
197
+ /// 0 gives a blurred image, 1 gives the original image while 2 increases the Sharpness by a factor of 2.
198
+ /// \par Example
199
+ /// \code
200
+ /// /* Define operations */
201
+ /// auto decode_op = vision::Decode();
202
+ /// auto adjust_sharpness_op = vision::AdjustSharpness(2.0);
203
+ ///
204
+ /// /* dataset is an instance of Dataset object */
205
+ /// dataset = dataset->Map({decode_op, adjust_sharpness_op}, // operations
206
+ /// {"image"}); // input columns
207
+ /// \endcode
208
+ explicit AdjustSharpness(float sharpness_factor);
209
+
210
+ /// \brief Destructor.
211
+ ~AdjustSharpness() override = default;
212
+
213
+ protected:
214
+ /// \brief Function to convert TensorTransform object into a TensorOperation object.
215
+ /// \return Shared pointer to TensorOperation object.
216
+ std::shared_ptr<TensorOperation> Parse() override;
217
+
218
+ private:
219
+ struct Data;
220
+ std::shared_ptr<Data> data_;
221
+ };
222
+
223
+ /// \brief Apply AutoAugment data augmentation method.
224
+ class DATASET_API AutoAugment final : public TensorTransform {
225
+ public:
226
+ /// \brief Constructor.
227
+ /// \param[in] policy An enum for the data auto augmentation policy (default=AutoAugmentPolicy::kImageNet).
228
+ /// - AutoAugmentPolicy::kImageNet, AutoAugment policy learned on the ImageNet dataset.
229
+ /// - AutoAugmentPolicy::kCifar10, AutoAugment policy learned on the Cifar10 dataset.
230
+ /// - AutoAugmentPolicy::kSVHN, AutoAugment policy learned on the SVHN dataset.
231
+ /// \param[in] interpolation An enum for the mode of interpolation (default=InterpolationMode::kNearestNeighbour).
232
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
233
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
234
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
235
+ /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation.
236
+ /// \param[in] fill_value A vector representing the pixel intensity of the borders (default={0, 0, 0}).
237
+ /// \par Example
238
+ /// \code
239
+ /// /* Define operations */
240
+ /// auto decode_op = vision::Decode();
241
+ /// auto auto_augment_op = vision::AutoAugment(AutoAugmentPolicy::kImageNet,
242
+ /// InterpolationMode::kNearestNeighbour, {0, 0, 0});
243
+ /// /* dataset is an instance of Dataset object */
244
+ /// dataset = dataset->Map({decode_op, auto_augment_op}, // operations
245
+ /// {"image"}); // input columns
246
+ /// \endcode
247
+ explicit AutoAugment(AutoAugmentPolicy policy = AutoAugmentPolicy::kImageNet,
248
+ InterpolationMode interpolation = InterpolationMode::kNearestNeighbour,
249
+ const std::vector<uint8_t> &fill_value = {0, 0, 0});
250
+
251
+ /// \brief Destructor.
252
+ ~AutoAugment() override = default;
253
+
254
+ protected:
255
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
256
+ /// \return Shared pointer to TensorOperation object.
257
+ std::shared_ptr<TensorOperation> Parse() override;
258
+
259
+ private:
260
+ struct Data;
261
+ std::shared_ptr<Data> data_;
262
+ };
263
+
264
+ /// \brief Apply automatic contrast on the input image.
265
+ class DATASET_API AutoContrast final : public TensorTransform {
266
+ public:
267
+ /// \brief Constructor.
268
+ /// \param[in] cutoff Percent of pixels to cut off from the histogram, the valid range of cutoff value is 0 to 50.
269
+ /// \param[in] ignore Pixel values to ignore.
270
+ /// \par Example
271
+ /// \code
272
+ /// /* Define operations */
273
+ /// auto decode_op = vision::Decode();
274
+ /// auto autocontrast_op = vision::AutoContrast(10.0, {10, 20});
275
+ ///
276
+ /// /* dataset is an instance of Dataset object */
277
+ /// dataset = dataset->Map({decode_op, autocontrast_op}, // operations
278
+ /// {"image"}); // input columns
279
+ /// \endcode
280
+ explicit AutoContrast(float cutoff = 0.0, const std::vector<uint32_t> &ignore = {});
281
+
282
+ /// \brief Destructor.
283
+ ~AutoContrast() override = default;
284
+
285
+ protected:
286
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
287
+ /// \return Shared pointer to TensorOperation object.
288
+ std::shared_ptr<TensorOperation> Parse() override;
289
+
290
+ private:
291
+ struct Data;
292
+ std::shared_ptr<Data> data_;
293
+ };
294
+
295
+ /// \brief BoundingBoxAugment TensorTransform.
296
+ /// \note Apply a given image transform on a random selection of bounding box regions of a given image.
297
+ class DATASET_API BoundingBoxAugment final : public TensorTransform {
298
+ public:
299
+ /// \brief Constructor.
300
+ /// \param[in] transform Raw pointer to the TensorTransform operation.
301
+ /// \param[in] ratio Ratio of bounding boxes to apply augmentation on. Range: [0, 1] (default=0.3).
302
+ /// \par Example
303
+ /// \code
304
+ /// /* Define operations */
305
+ /// TensorTransform *rotate_op = new vision::RandomRotation({-180, 180});
306
+ /// auto bbox_aug_op = vision::BoundingBoxAugment(rotate_op, 0.5);
307
+ ///
308
+ /// /* dataset is an instance of Dataset object */
309
+ /// dataset = dataset->Map({bbox_aug_op}, // operations
310
+ /// {"image", "bbox"}); // input columns
311
+ /// \endcode
312
+ explicit BoundingBoxAugment(TensorTransform *transform, float ratio = 0.3);
313
+
314
+ /// \brief Constructor.
315
+ /// \param[in] transform Smart pointer to the TensorTransform operation.
316
+ /// \param[in] ratio Ratio of bounding boxes where augmentation is applied to. Range: [0, 1] (default=0.3).
317
+ /// \par Example
318
+ /// \code
319
+ /// /* Define operations */
320
+ /// std::shared_ptr<TensorTransform> flip_op = std::make_shared<vision::RandomHorizontalFlip>(0.5);
321
+ /// std::shared_ptr<TensorTransform> bbox_aug_op = std::make_shared<vision::BoundingBoxAugment>(flip_op, 0.1);
322
+ ///
323
+ /// /* dataset is an instance of Dataset object */
324
+ /// dataset = dataset->Map({bbox_aug_op}, // operations
325
+ /// {"image", "bbox"}); // input columns
326
+ /// \endcode
327
+ explicit BoundingBoxAugment(const std::shared_ptr<TensorTransform> &transform, float ratio = 0.3);
328
+
329
+ /// \brief Constructor.
330
+ /// \param[in] transform Object pointer to the TensorTransform operation.
331
+ /// \param[in] ratio Ratio of bounding boxes where augmentation is applied to. Range: [0, 1] (default=0.3).
332
+ /// \par Example
333
+ /// \code
334
+ /// /* Define operations */
335
+ /// vision::RandomColor random_color_op = vision::RandomColor(0.5, 1.0);
336
+ /// vision::BoundingBoxAugment bbox_aug_op = vision::BoundingBoxAugment(random_color_op, 0.8);
337
+ ///
338
+ /// /* dataset is an instance of Dataset object */
339
+ /// dataset = dataset->Map({bbox_aug_op}, // operations
340
+ /// {"image", "bbox"}); // input columns
341
+ /// \endcode
342
+ explicit BoundingBoxAugment(const std::reference_wrapper<TensorTransform> &transform, float ratio = 0.3);
343
+
344
+ /// \brief Destructor.
345
+ ~BoundingBoxAugment() override = default;
346
+
347
+ protected:
348
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
349
+ /// \return Shared pointer to TensorOperation object.
350
+ std::shared_ptr<TensorOperation> Parse() override;
351
+
352
+ private:
353
+ struct Data;
354
+ std::shared_ptr<Data> data_;
355
+ };
356
+
357
+ /// \brief Change the color space of the image.
358
+ class DATASET_API ConvertColor final : public TensorTransform {
359
+ public:
360
+ /// \brief Constructor.
361
+ /// \param[in] convert_mode The mode of image channel conversion.
362
+ /// \par Example
363
+ /// \code
364
+ /// /* dataset is an instance of Dataset object */
365
+ /// dataset = dataset->Map({std::make_shared<vision::Decode>(),
366
+ /// std::make_shared<vision::ConvertColor>(ConvertMode::COLOR_BGR2RGB)}, // operations
367
+ /// {"image"}); // input columns
368
+ /// \endcode
369
+ explicit ConvertColor(ConvertMode convert_mode);
370
+
371
+ /// \brief Destructor.
372
+ ~ConvertColor() override = default;
373
+
374
+ protected:
375
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
376
+ /// \return Shared pointer to TensorOperation object.
377
+ std::shared_ptr<TensorOperation> Parse() override;
378
+
379
+ private:
380
+ struct Data;
381
+ std::shared_ptr<Data> data_;
382
+ };
383
+
384
+ /// \brief Mask a random section of each image with the corresponding part of another randomly
385
+ /// selected image in that batch.
386
+ class DATASET_API CutMixBatch final : public TensorTransform {
387
+ public:
388
+ /// \brief Constructor.
389
+ /// \param[in] image_batch_format The format of the batch.
390
+ /// \param[in] alpha The hyperparameter of beta distribution (default = 1.0).
391
+ /// \param[in] prob The probability by which CutMix is applied to each image (default = 1.0).
392
+ /// \par Example
393
+ /// \code
394
+ /// /* dataset is an instance of Dataset object */
395
+ /// dataset = dataset->Batch(5);
396
+ /// dataset = dataset->Map({std::make_shared<vision::CutMixBatch>(ImageBatchFormat::kNHWC)}, // operations
397
+ /// {"image", "label"}); // input columns
398
+ /// \endcode
399
+ explicit CutMixBatch(ImageBatchFormat image_batch_format, float alpha = 1.0, float prob = 1.0);
400
+
401
+ /// \brief Destructor.
402
+ ~CutMixBatch() override = default;
403
+
404
+ protected:
405
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
406
+ /// \return Shared pointer to TensorOperation object.
407
+ std::shared_ptr<TensorOperation> Parse() override;
408
+
409
+ private:
410
+ struct Data;
411
+ std::shared_ptr<Data> data_;
412
+ };
413
+
414
+ /// \brief Randomly cut (mask) out a given number of square patches from the input image.
415
+ class DATASET_API CutOut final : public TensorTransform {
416
+ public:
417
+ /// \brief Constructor.
418
+ /// \param[in] length Integer representing the side length of each square patch.
419
+ /// \param[in] num_patches Integer representing the number of patches to be cut out of an image.
420
+ /// \param[in] is_hwc A boolean to indicate whether the input image is in HWC format (true) or CHW
421
+ /// format (false) (default = true).
422
+ /// \par Example
423
+ /// \code
424
+ /// /* dataset is an instance of Dataset object */
425
+ /// dataset = dataset->Map({std::make_shared<vision::Decode>(),
426
+ /// std::make_shared<vision::CutOut>(1, 4, true)}, // operations
427
+ /// {"image"}); // input columns
428
+ /// \endcode
429
+ explicit CutOut(int32_t length, int32_t num_patches = 1, bool is_hwc = true);
430
+
431
+ /// \brief Destructor.
432
+ ~CutOut() override = default;
433
+
434
+ protected:
435
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
436
+ /// \return Shared pointer to TensorOperation object.
437
+ std::shared_ptr<TensorOperation> Parse() override;
438
+
439
+ private:
440
+ struct Data;
441
+ std::shared_ptr<Data> data_;
442
+ };
443
+
444
+ #ifdef ENABLE_FFMPEG
445
+ /// \brief Decode the input video.
446
+ class DATASET_API DecodeVideo final : public TensorTransform {
447
+ public:
448
+ /// \brief Constructor. It will decode a vector containing a raw video tensor into a vector containing two tensors.
449
+ /// The raw video tensor in the input vector should be 1D array of UINT8.
450
+ /// The first tensor in the output vector is a visual tensor, the shape is <T,H,W,C>, the type is DE_UINT8. Pixel
451
+ /// order is RGB. The second tensor in the output vector is an audio tensor, the shape is <C, L>.
452
+ /// \par Example
453
+ /// \code
454
+ /// /* Read video file into tensor */
455
+ /// mindspore::MSTensor video;
456
+ /// ASSERT_OK(mindspore::dataset::vision::ReadFile("/path/to/video/file", &video));
457
+ /// std::vector<mindspore::MSTensor> input_tensor;
458
+ /// std::vector<mindspore::MSTensor> output_tensor;
459
+ /// input_tensor.push_back(video);
460
+ /// auto decode_video = vision::DecodeVideo();
461
+ /// auto transform = Execute(decode_video);
462
+ /// Status rc = transform(input_tensor, &output_tensor);
463
+ /// \endcode
464
+ DecodeVideo();
465
+
466
+ /// \brief Destructor.
467
+ ~DecodeVideo() = default;
468
+
469
+ protected:
470
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
471
+ /// \return Shared pointer to TensorOperation object.
472
+ std::shared_ptr<TensorOperation> Parse() override;
473
+ };
474
+ #endif
475
+
476
+ /// \brief Encode the image as JPEG data.
477
+ /// \param[in] image The image to be encoded.
478
+ /// \param[out] output The Tensor data.
479
+ /// \param[in] quality The quality for the output tensor, in range of [1, 100]. Default: 75.
480
+ /// \return The status code.
481
+ Status DATASET_API EncodeJpeg(const mindspore::MSTensor &image, mindspore::MSTensor *output, int quality = 75);
482
+
483
+ /// \brief Encode the image as PNG data.
484
+ /// \param[in] image The image to be encoded.
485
+ /// \param[out] output The Tensor data.
486
+ /// \param[in] compression_level The compression_level for encoding, in range of [0, 9]. Default: 6.
487
+ /// \return The status code.
488
+ Status DATASET_API EncodePng(const mindspore::MSTensor &image, mindspore::MSTensor *output, int compression_level = 6);
489
+
490
+ /// \brief Apply histogram equalization on the input image.
491
+ class DATASET_API Equalize final : public TensorTransform {
492
+ public:
493
+ /// \brief Constructor.
494
+ /// \par Example
495
+ /// \code
496
+ /// /* dataset is an instance of Dataset object */
497
+ /// dataset = dataset->Map({std::make_shared<vision::Decode>(),
498
+ /// std::make_shared<vision::Equalize>()}, // operations
499
+ /// {"image"}); // input columns
500
+ /// \endcode
501
+ Equalize();
502
+
503
+ /// \brief Destructor.
504
+ ~Equalize() override = default;
505
+
506
+ protected:
507
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
508
+ /// \return Shared pointer to TensorOperation object.
509
+ std::shared_ptr<TensorOperation> Parse() override;
510
+ };
511
+
512
+ /// \brief Erase the input image with given value.
513
+ class DATASET_API Erase final : public TensorTransform {
514
+ public:
515
+ /// \brief Constructor.
516
+ /// \param[in] top Vertical ordinate of the upper left corner of erased region.
517
+ /// \param[in] left Horizontal ordinate of the upper left corner of erased region.
518
+ /// \param[in] height Height of erased region.
519
+ /// \param[in] width Width of erased region.
520
+ /// \param[in] value Pixel value used to pad the erased area.
521
+ /// If a single integer is provided, it will be used for all RGB channels.
522
+ /// If a sequence of length 3 is provided, it will be used for R, G, B channels respectively. Default: 0.
523
+ /// \param[in] inplace Whether to erase inplace. Default: False.
524
+ /// \par Example
525
+ /// \code
526
+ /// /* dataset is an instance of Dataset object */
527
+ /// dataset = dataset->Map({std::make_shared<vision::Decode>(),
528
+ /// std::make_shared<vision::Erase>(10, 10, 10, 10)}, // operations
529
+ /// {"image"}); // input columns
530
+ /// \endcode
531
+ Erase(int32_t top, int32_t left, int32_t height, int32_t width, const std::vector<float> &value = {0., 0., 0.},
532
+ bool inplace = false);
533
+
534
+ /// \brief Destructor.
535
+ ~Erase() override = default;
536
+
537
+ protected:
538
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
539
+ /// \return Shared pointer to TensorOperation object.
540
+ std::shared_ptr<TensorOperation> Parse() override;
541
+
542
+ private:
543
+ struct Data;
544
+ std::shared_ptr<Data> data_;
545
+ };
546
+
547
+ /// \brief Get the number of input image channels.
548
+ /// \param[in] image Tensor of the image.
549
+ /// \param[out] channels Channels of the image.
550
+ /// \return The status code.
551
+ Status DATASET_API GetImageNumChannels(const mindspore::MSTensor &image, dsize_t *channels);
552
+
553
+ /// \brief Get the size of input image.
554
+ /// \param[in] image Tensor of the image.
555
+ /// \param[out] size Size of the image as [height, width].
556
+ /// \return The status code.
557
+ Status DATASET_API GetImageSize(const mindspore::MSTensor &image, std::vector<dsize_t> *size);
558
+
559
+ /// \brief Flip the input image horizontally.
560
+ class DATASET_API HorizontalFlip final : public TensorTransform {
561
+ public:
562
+ /// \brief Constructor.
563
+ /// \par Example
564
+ /// \code
565
+ /// /* dataset is an instance of Dataset object */
566
+ /// dataset = dataset->Map({std::make_shared<vision::Decode>(),
567
+ /// std::make_shared<vision::HorizontalFlip>()}, // operations
568
+ /// {"image"}); // input columns
569
+ /// \endcode
570
+ HorizontalFlip();
571
+
572
+ /// \brief Destructor.
573
+ ~HorizontalFlip() override = default;
574
+
575
+ protected:
576
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
577
+ /// \return Shared pointer to TensorOperation object.
578
+ std::shared_ptr<TensorOperation> Parse() override;
579
+ };
580
+
581
+ /// \brief Apply invert on the input image in RGB mode.
582
+ class DATASET_API Invert final : public TensorTransform {
583
+ public:
584
+ /// \brief Constructor.
585
+ /// \par Example
586
+ /// \code
587
+ /// /* dataset is an instance of Dataset object */
588
+ /// dataset = dataset->Map({std::make_shared<vision::Decode>(),
589
+ /// std::make_shared<vision::Invert>()}, // operations
590
+ /// {"image"}); // input columns
591
+ /// \endcode
592
+ Invert();
593
+
594
+ /// \brief Destructor.
595
+ ~Invert() override = default;
596
+
597
+ protected:
598
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
599
+ /// \return Shared pointer to TensorOperation object.
600
+ std::shared_ptr<TensorOperation> Parse() override;
601
+ };
602
+
603
+ /// \brief Apply MixUp transformation on an input batch of images and labels. The labels must be in
604
+ /// one-hot format and Batch must be called before calling this function.
605
+ class DATASET_API MixUpBatch final : public TensorTransform {
606
+ public:
607
+ /// \brief Constructor.
608
+ /// \param[in] alpha hyperparameter of beta distribution (default = 1.0).
609
+ /// \par Example
610
+ /// \code
611
+ /// /* dataset is an instance of Dataset object */
612
+ /// dataset = dataset->Batch(5);
613
+ /// dataset = dataset->Map({std::make_shared<vision::MixUpBatch>()}, // operations
614
+ /// {"image"}); // input columns
615
+ /// \endcode
616
+ explicit MixUpBatch(float alpha = 1.0);
617
+
618
+ /// \brief Destructor.
619
+ ~MixUpBatch() override = default;
620
+
621
+ protected:
622
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
623
+ /// \return Shared pointer to TensorOperation object.
624
+ std::shared_ptr<TensorOperation> Parse() override;
625
+
626
+ private:
627
+ struct Data;
628
+ std::shared_ptr<Data> data_;
629
+ };
630
+
631
+ /// \brief Normalize the input image with respect to mean and standard deviation and pads an extra
632
+ /// channel with value zero.
633
+ class DATASET_API NormalizePad final : public TensorTransform {
634
+ public:
635
+ /// \brief Constructor.
636
+ /// \param[in] mean A vector of mean values for each channel, with respect to channel order.
637
+ /// The mean values must be in range [0.0, 255.0].
638
+ /// \param[in] std A vector of standard deviations for each channel, with respect to channel order.
639
+ /// The standard deviation values must be in range (0.0, 255.0].
640
+ /// \param[in] dtype The output datatype of Tensor.
641
+ /// The standard deviation values must be "float32" or "float16"(default = "float32").
642
+ /// \param[in] is_hwc A boolean to indicate whether the input image is in HWC format (true) or CHW
643
+ /// format (false) (default = true).
644
+ /// \par Example
645
+ /// \code
646
+ /// /* Define operations */
647
+ /// auto decode_op = vision::Decode();
648
+ /// auto normalize_pad_op = vision::NormalizePad({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0});
649
+ ///
650
+ /// /* dataset is an instance of Dataset object */
651
+ /// dataset = dataset->Map({decode_op, normalize_pad_op}, // operations
652
+ /// {"image"}); // input columns
653
+ /// \endcode
654
+ NormalizePad(const std::vector<float> &mean, const std::vector<float> &std, const std::string &dtype = "float32",
655
+ bool is_hwc = true)
656
+ : NormalizePad(mean, std, StringToChar(dtype), is_hwc) {}
657
+
658
+ NormalizePad(const std::vector<float> &mean, const std::vector<float> &std, const std::vector<char> &dtype,
659
+ bool is_hwc = true);
660
+
661
+ /// \brief Destructor.
662
+ ~NormalizePad() override = default;
663
+
664
+ protected:
665
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
666
+ /// \return Shared pointer to TensorOperation object.
667
+ std::shared_ptr<TensorOperation> Parse() override;
668
+
669
+ private:
670
+ struct Data;
671
+ std::shared_ptr<Data> data_;
672
+ };
673
+
674
+ /// \brief Pad the image to a fixed size.
675
+ class DATASET_API PadToSize final : public TensorTransform {
676
+ public:
677
+ /// \brief Constructor.
678
+ /// \param[in] size A two element vector representing the target size to pad, in order of [height, width].
679
+ /// \param[in] offset A two element vector representing the lengths to pad on the top and left,
680
+ /// in order of [top, left]. Default: {}, means to pad symmetrically, keeping the original image in center.
681
+ /// \param[in] fill_value A vector representing the pixel intensity of the borders. Only valid if the
682
+ /// padding_mode is BorderType.kConstant. If 1 value is provided, it is used for all RGB channels.
683
+ /// If 3 values are provided, it is used to fill R, G, B channels respectively. Default: {0}.
684
+ /// \param[in] padding_mode The method of padding, which can be one of BorderType.kConstant, BorderType.kEdge,
685
+ /// BorderType.kReflect or BorderType.kSymmetric. Default: BorderType.kConstant.
686
+ /// - BorderType.kConstant, pads with a constant value.
687
+ /// - BorderType.kEdge, pads with the last value at the edge of the image.
688
+ /// - BorderType.kReflect, pads with reflection of the image omitting the last value on the edge.
689
+ /// - BorderType.kSymmetric, pads with reflection of the image repeating the last value on the edge.
690
+ /// \par Example
691
+ /// \code
692
+ /// /* Define operations */
693
+ /// auto decode_op = vision::Decode();
694
+ /// auto pad_to_size_op = vision::PadToSize({256, 256}, {10, 20}, {255, 255, 255});
695
+ ///
696
+ /// /* dataset is an instance of Dataset object */
697
+ /// dataset = dataset->Map({decode_op, pad_to_size_op}, // operations
698
+ /// {"image"}); // input columns
699
+ /// \endcode
700
+ explicit PadToSize(const std::vector<int32_t> &size, const std::vector<int32_t> &offset = {},
701
+ const std::vector<uint8_t> &fill_value = {0}, BorderType padding_mode = BorderType::kConstant);
702
+
703
+ /// \brief Destructor.
704
+ ~PadToSize() override = default;
705
+
706
+ protected:
707
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
708
+ /// \return Shared pointer to TensorOperation object.
709
+ std::shared_ptr<TensorOperation> Parse() override;
710
+
711
+ private:
712
+ struct Data;
713
+ std::shared_ptr<Data> data_;
714
+ };
715
+
716
+ /// \brief Perform perspective transform on the image.
717
+ class DATASET_API Perspective final : public TensorTransform {
718
+ public:
719
+ /// \brief Constructor.
720
+ /// \param[in] start_points List containing four lists of two integers corresponding to four
721
+ /// corners [top-left, top-right, bottom-right, bottom-left] of the original image.
722
+ /// \param[in] end_points List containing four lists of two integers corresponding to four
723
+ /// corners [top-left, top-right, bottom-right, bottom-left] of the transformed image.
724
+ /// \param[in] interpolation An enum for the mode of interpolation. Default: InterpolationMode::kLinear.
725
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
726
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
727
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
728
+ /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation.
729
+ /// - InterpolationMode::kCubicPil, Interpolation method is bicubic interpolation like implemented in pillow.
730
+ /// \par Example
731
+ /// \code
732
+ /// /* Define operations */
733
+ /// auto decode_op = vision::Decode();
734
+ /// std::vector<std::vector<int32_t>> start_points = {{0, 0}, {1, 0}, {1, 1}, {0, 1}};
735
+ /// std::vector<std::vector<int32_t>> end_points = {{0, 2}, {2, 0}, {2, 2}, {0, 2}};
736
+ /// auto perspective_op = vision::Perspective(start_points, end_points, InterpolationMode::kLinear);
737
+ ///
738
+ /// /* dataset is an instance of Dataset object */
739
+ /// dataset = dataset->Map({decode_op, perspective_op}, // operations
740
+ /// {"image"}); // input columns
741
+ /// \endcode
742
+ Perspective(const std::vector<std::vector<int32_t>> &start_points,
743
+ const std::vector<std::vector<int32_t>> &end_points, InterpolationMode interpolation);
744
+
745
+ /// \brief Destructor.
746
+ ~Perspective() override = default;
747
+
748
+ protected:
749
+ /// \brief Function to convert TensorTransform object into a TensorOperation object.
750
+ /// \return Shared pointer to TensorOperation object.
751
+ std::shared_ptr<TensorOperation> Parse() override;
752
+
753
+ private:
754
+ struct Data;
755
+ std::shared_ptr<Data> data_;
756
+ };
757
+
758
+ /// \brief Posterize an image by reducing the number of bits for each color channel.
759
+ class DATASET_API Posterize final : public TensorTransform {
760
+ public:
761
+ /// \brief Constructor.
762
+ /// \param[in] bits The number of bits to keep for each channel,
763
+ /// should be in range of [0, 8].
764
+ /// \par Example
765
+ /// \code
766
+ /// /* Define operations */
767
+ /// auto decode_op = vision::Decode();
768
+ /// auto posterize_op = vision::Posterize(8);
769
+ ///
770
+ /// /* dataset is an instance of Dataset object */
771
+ /// dataset = dataset->Map({decode_op, posterize_op}, // operations
772
+ /// {"image"}); // input columns
773
+ /// \endcode
774
+ explicit Posterize(uint8_t bits);
775
+
776
+ /// \brief Destructor.
777
+ ~Posterize() override = default;
778
+
779
+ protected:
780
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
781
+ /// \return Shared pointer to TensorOperation object.
782
+ std::shared_ptr<TensorOperation> Parse() override;
783
+
784
+ private:
785
+ struct Data;
786
+ std::shared_ptr<Data> data_;
787
+ };
788
+
789
+ /// \brief Apply RandAugment data augmentation method.
790
+ class DATASET_API RandAugment final : public TensorTransform {
791
+ public:
792
+ /// \brief Constructor.
793
+ /// \param[in] num_ops Number of augmentation transformations to apply sequentially. Default: 2.
794
+ /// \param[in] magnitude Magnitude for all the transformations. Default: 9.
795
+ /// \param[in] num_magnitude_bins The number of different magnitude values. Default: 31.
796
+ /// \param[in] interpolation An enum for the mode of interpolation. Default: InterpolationMode::kNearestNeighbour.
797
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
798
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
799
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
800
+ /// \param[in] fill_value A vector representing the pixel intensity of the borders. Default: {0, 0, 0}.
801
+ /// \par Example
802
+ /// \code
803
+ /// /* Define operations */
804
+ /// auto decode_op = vision::Decode();
805
+ /// auto rand_augment_op = vision::RandAugment();
806
+ /// /* dataset is an instance of Dataset object */
807
+ /// dataset = dataset->Map({decode_op, rand_augment_op}, // operations
808
+ /// {"image"}); // input columns
809
+ /// \endcode
810
+ explicit RandAugment(int32_t num_ops = 2, int32_t magnitude = 9, int32_t num_magnitude_bins = 31,
811
+ InterpolationMode interpolation = InterpolationMode::kNearestNeighbour,
812
+ const std::vector<uint8_t> &fill_value = {0, 0, 0});
813
+
814
+ /// \brief Destructor.
815
+ ~RandAugment() override = default;
816
+
817
+ protected:
818
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
819
+ /// \return Shared pointer to TensorOperation object.
820
+ std::shared_ptr<TensorOperation> Parse() override;
821
+
822
+ private:
823
+ struct Data;
824
+ std::shared_ptr<Data> data_;
825
+ };
826
+
827
+ /// \brief Automatically adjust the contrast of the image with a given probability.
828
+ class DATASET_API RandomAutoContrast final : public TensorTransform {
829
+ public:
830
+ /// \brief Constructor.
831
+ /// \param[in] cutoff Percent of the lightest and darkest pixels to be cut off from
832
+ /// the histogram of the input image. The value must be in range of [0.0, 50.0) (default=0.0).
833
+ /// \param[in] ignore The background pixel values to be ignored, each of which must be
834
+ /// in range of [0, 255] (default={}).
835
+ /// \param[in] prob A float representing the probability of AutoContrast, which must be
836
+ /// in range of [0, 1] (default=0.5).
837
+ /// \par Example
838
+ /// \code
839
+ /// /* Define operations */
840
+ /// auto decode_op = vision::Decode();
841
+ /// auto random_auto_contrast_op = vision::RandomAutoContrast(5.0);
842
+ ///
843
+ /// /* dataset is an instance of Dataset object */
844
+ /// dataset = dataset->Map({decode_op, random_auto_contrast_op}, // operations
845
+ /// {"image"}); // input columns
846
+ /// \endcode
847
+ explicit RandomAutoContrast(float cutoff = 0.0, const std::vector<uint32_t> &ignore = {}, float prob = 0.5);
848
+
849
+ /// \brief Destructor.
850
+ ~RandomAutoContrast() override = default;
851
+
852
+ protected:
853
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
854
+ /// \return Shared pointer to TensorOperation object.
855
+ std::shared_ptr<TensorOperation> Parse() override;
856
+
857
+ private:
858
+ struct Data;
859
+ std::shared_ptr<Data> data_;
860
+ };
861
+
862
+ /// \brief Randomly adjust the sharpness of the input image with a given probability.
863
+ class DATASET_API RandomAdjustSharpness final : public TensorTransform {
864
+ public:
865
+ /// \brief Constructor.
866
+ /// \param[in] degree A float representing sharpness adjustment degree, which must be non negative.
867
+ /// \param[in] prob A float representing the probability of the image being sharpness adjusted, which
868
+ /// must in range of [0, 1] (default=0.5).
869
+ /// \par Example
870
+ /// \code
871
+ /// /* Define operations */
872
+ /// auto decode_op = vision::Decode();
873
+ /// auto random_adjust_sharpness_op = vision::RandomAdjustSharpness(30.0);
874
+ ///
875
+ /// /* dataset is an instance of Dataset object */
876
+ /// dataset = dataset->Map({decode_op, random_adjust_sharpness_op}, // operations
877
+ /// {"image"}); // input columns
878
+ /// \endcode
879
+ explicit RandomAdjustSharpness(float degree, float prob = 0.5);
880
+
881
+ /// \brief Destructor.
882
+ ~RandomAdjustSharpness() override = default;
883
+
884
+ protected:
885
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
886
+ /// \return Shared pointer to TensorOperation object.
887
+ std::shared_ptr<TensorOperation> Parse() override;
888
+
889
+ private:
890
+ struct Data;
891
+ std::shared_ptr<Data> data_;
892
+ };
893
+
894
+ /// \brief Blend an image with its grayscale version with random weights
895
+ /// t and 1 - t generated from a given range. If the range is trivial
896
+ /// then the weights are determinate and t equals to the bound of the interval.
897
+ class DATASET_API RandomColor final : public TensorTransform {
898
+ public:
899
+ /// \brief Constructor.
900
+ /// \param[in] t_lb Lower bound random weights.
901
+ /// \param[in] t_ub Upper bound random weights.
902
+ /// \par Example
903
+ /// \code
904
+ /// /* Define operations */
905
+ /// auto decode_op = vision::Decode();
906
+ /// auto random_color_op = vision::RandomColor(5.0, 50.0);
907
+ ///
908
+ /// /* dataset is an instance of Dataset object */
909
+ /// dataset = dataset->Map({decode_op, random_color_op}, // operations
910
+ /// {"image"}); // input columns
911
+ /// \endcode
912
+ RandomColor(float t_lb, float t_ub);
913
+
914
+ /// \brief Destructor.
915
+ ~RandomColor() override = default;
916
+
917
+ protected:
918
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
919
+ /// \return Shared pointer to TensorOperation object.
920
+ std::shared_ptr<TensorOperation> Parse() override;
921
+
922
+ private:
923
+ struct Data;
924
+ std::shared_ptr<Data> data_;
925
+ };
926
+
927
+ /// \brief Randomly adjust the brightness, contrast, saturation, and hue of the input image.
928
+ class DATASET_API RandomColorAdjust final : public TensorTransform {
929
+ public:
930
+ /// \brief Constructor.
931
+ /// \param[in] brightness Brightness adjustment factor. Must be a vector of one or two values
932
+ /// if it is a vector of two values it needs to be in the form of [min, max] (Default={1, 1}).
933
+ /// \param[in] contrast Contrast adjustment factor. Must be a vector of one or two values
934
+ /// if it is a vector of two values, it needs to be in the form of [min, max] (Default={1, 1}).
935
+ /// \param[in] saturation Saturation adjustment factor. Must be a vector of one or two values
936
+ /// if it is a vector of two values, it needs to be in the form of [min, max] (Default={1, 1}).
937
+ /// \param[in] hue Hue adjustment factor. Must be a vector of one or two values
938
+ /// if it is a vector of two values, it must be in the form of [min, max] where -0.5 <= min <= max <= 0.5
939
+ /// (Default={0, 0}).
940
+ /// \par Example
941
+ /// \code
942
+ /// /* Define operations */
943
+ /// auto decode_op = vision::Decode();
944
+ /// auto random_color_adjust_op = vision::RandomColorAdjust({1.0, 5.0}, {10.0, 20.0}, {40.0, 40.0});
945
+ ///
946
+ /// /* dataset is an instance of Dataset object */
947
+ /// dataset = dataset->Map({decode_op, random_color_adjust_op}, // operations
948
+ /// {"image"}); // input columns
949
+ /// \endcode
950
+ explicit RandomColorAdjust(const std::vector<float> &brightness = {1.0, 1.0},
951
+ const std::vector<float> &contrast = {1.0, 1.0},
952
+ const std::vector<float> &saturation = {1.0, 1.0},
953
+ const std::vector<float> &hue = {0.0, 0.0});
954
+
955
+ /// \brief Destructor.
956
+ ~RandomColorAdjust() override = default;
957
+
958
+ protected:
959
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
960
+ /// \return Shared pointer to TensorOperation object.
961
+ std::shared_ptr<TensorOperation> Parse() override;
962
+
963
+ private:
964
+ struct Data;
965
+ std::shared_ptr<Data> data_;
966
+ };
967
+
968
+ /// \brief Crop the input image at a random location.
969
+ class DATASET_API RandomCrop final : public TensorTransform {
970
+ public:
971
+ /// \brief Constructor.
972
+ /// \param[in] size A vector representing the output size of the cropped image.
973
+ /// If the size is a single value, a squared crop of size (size, size) is returned.
974
+ /// If the size has 2 values, it should be (height, width).
975
+ /// \param[in] padding A vector representing the number of pixels to pad the image.
976
+ /// If the vector has one value, it pads all sides of the image with that value.
977
+ /// If the vector has two values, it pads left and right with the first and
978
+ /// top and bottom with the second value.
979
+ /// If the vector has four values, it pads left, top, right, and bottom with
980
+ /// those values respectively.
981
+ /// \param[in] pad_if_needed A boolean indicating that whether to pad the image
982
+ /// if either side is smaller than the given output size.
983
+ /// \param[in] fill_value A vector representing the pixel intensity of the borders if the padding_mode is
984
+ /// BorderType.kConstant. If 1 value is provided, it is used for all RGB channels.
985
+ /// If 3 values are provided, it is used to fill R, G, B channels respectively.
986
+ /// \param[in] padding_mode The method of padding (default=BorderType::kConstant).It can be any of
987
+ /// [BorderType::kConstant, BorderType::kEdge, BorderType::kReflect, BorderType::kSymmetric].
988
+ /// - BorderType::kConstant, Fill the border with constant values.
989
+ /// - BorderType::kEdge, Fill the border with the last value on the edge.
990
+ /// - BorderType::kReflect, Reflect the values on the edge omitting the last value of edge.
991
+ /// - BorderType::kSymmetric, Reflect the values on the edge repeating the last value of edge.
992
+ /// \note If the input image is more than one, then make sure that the image size is the same.
993
+ /// \par Example
994
+ /// \code
995
+ /// /* Define operations */
996
+ /// auto decode_op = vision::Decode();
997
+ /// auto random_crop_op = vision::RandomCrop({255, 255}, {10, 10, 10, 10});
998
+ ///
999
+ /// /* dataset is an instance of Dataset object */
1000
+ /// dataset = dataset->Map({decode_op, random_crop_op}, // operations
1001
+ /// {"image"}); // input columns
1002
+ /// \endcode
1003
+ explicit RandomCrop(const std::vector<int32_t> &size, const std::vector<int32_t> &padding = {0, 0, 0, 0},
1004
+ bool pad_if_needed = false, const std::vector<uint8_t> &fill_value = {0, 0, 0},
1005
+ BorderType padding_mode = BorderType::kConstant);
1006
+
1007
+ /// \brief Destructor.
1008
+ ~RandomCrop() override = default;
1009
+
1010
+ protected:
1011
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1012
+ /// \return Shared pointer to TensorOperation object.
1013
+ std::shared_ptr<TensorOperation> Parse() override;
1014
+
1015
+ private:
1016
+ struct Data;
1017
+ std::shared_ptr<Data> data_;
1018
+ };
1019
+
1020
+ /// \brief Equivalent to RandomResizedCrop TensorTransform, but crop the image before decoding.
1021
+ class DATASET_API RandomCropDecodeResize final : public TensorTransform {
1022
+ public:
1023
+ /// \brief Constructor.
1024
+ /// \param[in] size A vector representing the output size of the cropped image.
1025
+ /// If the size is a single value, a squared crop of size (size, size) is returned.
1026
+ /// If the size has 2 values, it should be (height, width).
1027
+ /// \param[in] scale Range [min, max) of respective size of the
1028
+ /// original size to be cropped (default=(0.08, 1.0)).
1029
+ /// \param[in] ratio Range [min, max) of aspect ratio to be
1030
+ /// cropped (default=(3. / 4., 4. / 3.)).
1031
+ /// \param[in] interpolation An enum for the mode of interpolation.
1032
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
1033
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
1034
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
1035
+ /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation.
1036
+ /// - InterpolationMode::kCubicPil, Interpolation method is bicubic interpolation like implemented in pillow.
1037
+ /// \param[in] max_attempts The maximum number of attempts to propose a valid crop_area (default=10).
1038
+ /// If exceeded, fall back to use center_crop instead.
1039
+ /// \par Example
1040
+ /// \code
1041
+ /// /* Define operations */
1042
+ /// auto random_op = vision::RandomCropDecodeResize({255, 255}, {0.1, 0.5});
1043
+ ///
1044
+ /// /* dataset is an instance of Dataset object */
1045
+ /// dataset = dataset->Map({random_op}, // operations
1046
+ /// {"image"}); // input columns
1047
+ /// \endcode
1048
+ explicit RandomCropDecodeResize(const std::vector<int32_t> &size, const std::vector<float> &scale = {0.08, 1.0},
1049
+ const std::vector<float> &ratio = {3. / 4., 4. / 3.},
1050
+ InterpolationMode interpolation = InterpolationMode::kLinear,
1051
+ int32_t max_attempts = 10);
1052
+
1053
+ /// \brief Destructor.
1054
+ ~RandomCropDecodeResize() override = default;
1055
+
1056
+ protected:
1057
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1058
+ /// \return Shared pointer to TensorOperation object.
1059
+ std::shared_ptr<TensorOperation> Parse() override;
1060
+
1061
+ private:
1062
+ struct Data;
1063
+ std::shared_ptr<Data> data_;
1064
+ };
1065
+
1066
+ /// \brief Crop the input image at a random location and adjust bounding boxes accordingly.
1067
+ /// If the cropped area is out of bbox, the returned bbox will be empty.
1068
+ class DATASET_API RandomCropWithBBox final : public TensorTransform {
1069
+ public:
1070
+ /// \brief Constructor.
1071
+ /// \param[in] size A vector representing the output size of the cropped image.
1072
+ /// If the size is a single value, a squared crop of size (size, size) is returned.
1073
+ /// If the size has 2 values, it should be (height, width).
1074
+ /// \param[in] padding A vector representing the number of pixels to pad the image
1075
+ /// If the vector has one value, it pads all sides of the image with that value.
1076
+ /// If the vector has two values, it pads left and right with the first and
1077
+ /// top and bottom with the second value.
1078
+ /// If the vector has four values, it pads left, top, right, and bottom with
1079
+ /// those values respectively.
1080
+ /// \param[in] pad_if_needed A boolean indicating that whether to pad the image
1081
+ /// if either side is smaller than the given output size.
1082
+ /// \param[in] fill_value A vector representing the pixel intensity of the borders. Only valid
1083
+ /// if the padding_mode is BorderType.kConstant. If 1 value is provided, it is used for all
1084
+ /// RGB channels. If 3 values are provided, it is used to fill R, G, B channels respectively.
1085
+ /// \param[in] padding_mode The method of padding (default=BorderType::kConstant).It can be any of
1086
+ /// [BorderType::kConstant, BorderType::kEdge, BorderType::kReflect, BorderType::kSymmetric].
1087
+ /// - BorderType::kConstant, Fill the border with constant values.
1088
+ /// - BorderType::kEdge, Fill the border with the last value on the edge.
1089
+ /// - BorderType::kReflect, Reflect the values on the edge omitting the last value of edge.
1090
+ /// - BorderType::kSymmetric, Reflect the values on the edge repeating the last value of edge.
1091
+ /// \par Example
1092
+ /// \code
1093
+ /// /* Define operations */
1094
+ /// auto random_op = vision::RandomCropWithBBox({224, 224}, {0, 0, 0, 0});
1095
+ ///
1096
+ /// /* dataset is an instance of Dataset object */
1097
+ /// dataset = dataset->Map({random_op}, // operations
1098
+ /// {"image", "bbox"}); // input columns
1099
+ /// \endcode
1100
+ explicit RandomCropWithBBox(const std::vector<int32_t> &size, const std::vector<int32_t> &padding = {0, 0, 0, 0},
1101
+ bool pad_if_needed = false, const std::vector<uint8_t> &fill_value = {0, 0, 0},
1102
+ BorderType padding_mode = BorderType::kConstant);
1103
+
1104
+ /// \brief Destructor.
1105
+ ~RandomCropWithBBox() override = default;
1106
+
1107
+ protected:
1108
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1109
+ /// \return Shared pointer to TensorOperation object.
1110
+ std::shared_ptr<TensorOperation> Parse() override;
1111
+
1112
+ private:
1113
+ struct Data;
1114
+ std::shared_ptr<Data> data_;
1115
+ };
1116
+
1117
+ /// \brief Randomly apply histogram equalization on the input image with a given probability.
1118
+ class DATASET_API RandomEqualize final : public TensorTransform {
1119
+ public:
1120
+ /// \brief Constructor.
1121
+ /// \param[in] prob A float representing the probability of equalization, which
1122
+ /// must be in range of [0, 1] (default=0.5).
1123
+ /// \par Example
1124
+ /// \code
1125
+ /// /* Define operations */
1126
+ /// auto decode_op = vision::Decode();
1127
+ /// auto random_op = vision::RandomEqualize(0.5);
1128
+ ///
1129
+ /// /* dataset is an instance of Dataset object */
1130
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1131
+ /// {"image"}); // input columns
1132
+ /// \endcode
1133
+ explicit RandomEqualize(float prob = 0.5);
1134
+
1135
+ /// \brief Destructor.
1136
+ ~RandomEqualize() override = default;
1137
+
1138
+ protected:
1139
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1140
+ /// \return Shared pointer to TensorOperation object.
1141
+ std::shared_ptr<TensorOperation> Parse() override;
1142
+
1143
+ private:
1144
+ struct Data;
1145
+ std::shared_ptr<Data> data_;
1146
+ };
1147
+
1148
+ /// \brief Randomly flip the input image horizontally with a given probability.
1149
+ class DATASET_API RandomHorizontalFlip final : public TensorTransform {
1150
+ public:
1151
+ /// \brief Constructor.
1152
+ /// \param[in] prob A float representing the probability of flip.
1153
+ /// \par Example
1154
+ /// \code
1155
+ /// /* Define operations */
1156
+ /// auto decode_op = vision::Decode();
1157
+ /// auto random_op = vision::RandomHorizontalFlip(0.8);
1158
+ ///
1159
+ /// /* dataset is an instance of Dataset object */
1160
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1161
+ /// {"image"}); // input columns
1162
+ /// \endcode
1163
+ explicit RandomHorizontalFlip(float prob = 0.5);
1164
+
1165
+ /// \brief Destructor.
1166
+ ~RandomHorizontalFlip() override = default;
1167
+
1168
+ protected:
1169
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1170
+ /// \return Shared pointer to TensorOperation object.
1171
+ std::shared_ptr<TensorOperation> Parse() override;
1172
+
1173
+ private:
1174
+ struct Data;
1175
+ std::shared_ptr<Data> data_;
1176
+ };
1177
+
1178
+ /// \brief Randomly flip the input image horizontally with a given probability and adjust bounding boxes accordingly.
1179
+ class DATASET_API RandomHorizontalFlipWithBBox final : public TensorTransform {
1180
+ public:
1181
+ /// \brief Constructor.
1182
+ /// \param[in] prob A float representing the probability of flip.
1183
+ /// \par Example
1184
+ /// \code
1185
+ /// /* Define operations */
1186
+ /// auto random_op = vision::RandomHorizontalFlipWithBBox(1.0);
1187
+ ///
1188
+ /// /* dataset is an instance of Dataset object */
1189
+ /// dataset = dataset->Map({random_op}, // operations
1190
+ /// {"image", "bbox"}); // input columns
1191
+ /// \endcode
1192
+ explicit RandomHorizontalFlipWithBBox(float prob = 0.5);
1193
+
1194
+ /// \brief Destructor.
1195
+ ~RandomHorizontalFlipWithBBox() override = default;
1196
+
1197
+ protected:
1198
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1199
+ /// \return Shared pointer to TensorOperation object.
1200
+ std::shared_ptr<TensorOperation> Parse() override;
1201
+
1202
+ private:
1203
+ struct Data;
1204
+ std::shared_ptr<Data> data_;
1205
+ };
1206
+
1207
+ /// \brief Randomly invert the input image with a given probability.
1208
+ class DATASET_API RandomInvert final : public TensorTransform {
1209
+ public:
1210
+ /// \brief Constructor.
1211
+ /// \param[in] prob A float representing the probability of the image being inverted, which
1212
+ /// must be in range of [0, 1] (default=0.5).
1213
+ /// \par Example
1214
+ /// \code
1215
+ /// /* Define operations */
1216
+ /// auto decode_op = vision::Decode();
1217
+ /// auto random_op = vision::RandomInvert(0.8);
1218
+ ///
1219
+ /// /* dataset is an instance of Dataset object */
1220
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1221
+ /// {"image"}); // input columns
1222
+ /// \endcode
1223
+ explicit RandomInvert(float prob = 0.5);
1224
+
1225
+ /// \brief Destructor.
1226
+ ~RandomInvert() override = default;
1227
+
1228
+ protected:
1229
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1230
+ /// \return Shared pointer to TensorOperation object.
1231
+ std::shared_ptr<TensorOperation> Parse() override;
1232
+
1233
+ private:
1234
+ struct Data;
1235
+ std::shared_ptr<Data> data_;
1236
+ };
1237
+
1238
+ /// \brief Add AlexNet-style PCA-based noise to an image.
1239
+ class DATASET_API RandomLighting final : public TensorTransform {
1240
+ public:
1241
+ /// \brief Constructor.
1242
+ /// \param[in] alpha A float representing the intensity of the image (default=0.05).
1243
+ /// \par Example
1244
+ /// \code
1245
+ /// /* Define operations */
1246
+ /// auto decode_op = vision::Decode();
1247
+ /// auto random_op = vision::RandomLighting(0.1);
1248
+ ///
1249
+ /// /* dataset is an instance of Dataset object */
1250
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1251
+ /// {"image"}); // input columns
1252
+ /// \endcode
1253
+ explicit RandomLighting(float alpha = 0.05);
1254
+
1255
+ /// \brief Destructor.
1256
+ ~RandomLighting() override = default;
1257
+
1258
+ protected:
1259
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1260
+ /// \return Shared pointer to TensorOperation object.
1261
+ std::shared_ptr<TensorOperation> Parse() override;
1262
+
1263
+ private:
1264
+ struct Data;
1265
+ std::shared_ptr<Data> data_;
1266
+ };
1267
+
1268
+ /// \brief Reduce the number of bits for each color channel randomly.
1269
+ class DATASET_API RandomPosterize final : public TensorTransform {
1270
+ public:
1271
+ /// \brief Constructor.
1272
+ /// \param[in] bit_range Range of random posterize to compress image.
1273
+ /// uint8_t vector representing the minimum and maximum bit in range of [1,8] (Default={4, 8}).
1274
+ /// \par Example
1275
+ /// \code
1276
+ /// /* Define operations */
1277
+ /// auto decode_op = vision::Decode();
1278
+ /// auto random_op = vision::RandomPosterize({4, 8});
1279
+ ///
1280
+ /// /* dataset is an instance of Dataset object */
1281
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1282
+ /// {"image"}); // input columns
1283
+ /// \endcode
1284
+ explicit RandomPosterize(const std::vector<uint8_t> &bit_range = {4, 8});
1285
+
1286
+ /// \brief Destructor.
1287
+ ~RandomPosterize() override = default;
1288
+
1289
+ protected:
1290
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1291
+ /// \return Shared pointer to TensorOperation object.
1292
+ std::shared_ptr<TensorOperation> Parse() override;
1293
+
1294
+ private:
1295
+ struct Data;
1296
+ std::shared_ptr<Data> data_;
1297
+ };
1298
+
1299
+ /// \brief Resize the input image using a randomly selected interpolation mode.
1300
+ class DATASET_API RandomResize final : public TensorTransform {
1301
+ public:
1302
+ /// \brief Constructor.
1303
+ /// \param[in] size A vector representing the output size of the resized image.
1304
+ /// If the size is a single value, the smaller edge of the image will be resized to this value with
1305
+ /// the same image aspect ratio. If the size has 2 values, it should be (height, width).
1306
+ /// \par Example
1307
+ /// \code
1308
+ /// /* Define operations */
1309
+ /// auto decode_op = vision::Decode();
1310
+ /// auto random_op = vision::RandomResize({32, 32});
1311
+ ///
1312
+ /// /* dataset is an instance of Dataset object */
1313
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1314
+ /// {"image"}); // input columns
1315
+ /// \endcode
1316
+ explicit RandomResize(const std::vector<int32_t> &size);
1317
+
1318
+ /// \brief Destructor.
1319
+ ~RandomResize() override = default;
1320
+
1321
+ protected:
1322
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1323
+ /// \return Shared pointer to TensorOperation object.
1324
+ std::shared_ptr<TensorOperation> Parse() override;
1325
+
1326
+ private:
1327
+ struct Data;
1328
+ std::shared_ptr<Data> data_;
1329
+ };
1330
+
1331
+ /// \brief Resize the input image using a randomly selected interpolation mode and adjust
1332
+ /// bounding boxes accordingly.
1333
+ class DATASET_API RandomResizeWithBBox final : public TensorTransform {
1334
+ public:
1335
+ /// \brief Constructor.
1336
+ /// \param[in] size A vector representing the output size of the resized image.
1337
+ /// If the size is a single value, the smaller edge of the image will be resized to this value with
1338
+ /// the same image aspect ratio. If the size has 2 values, it should be (height, width).
1339
+ /// \par Example
1340
+ /// \code
1341
+ /// /* Define operations */
1342
+ /// auto random_op = vision::RandomResizeWithBBox({50, 50});
1343
+ ///
1344
+ /// /* dataset is an instance of Dataset object */
1345
+ /// dataset = dataset->Map({random_op}, // operations
1346
+ /// {"image", "bbox"}); // input columns
1347
+ /// \endcode
1348
+ explicit RandomResizeWithBBox(const std::vector<int32_t> &size);
1349
+
1350
+ /// \brief Destructor.
1351
+ ~RandomResizeWithBBox() override = default;
1352
+
1353
+ protected:
1354
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1355
+ /// \return Shared pointer to TensorOperation object.
1356
+ std::shared_ptr<TensorOperation> Parse() override;
1357
+
1358
+ private:
1359
+ struct Data;
1360
+ std::shared_ptr<Data> data_;
1361
+ };
1362
+
1363
+ /// \brief Crop the input image to a random size and aspect ratio.
1364
+ class DATASET_API RandomResizedCrop final : public TensorTransform {
1365
+ public:
1366
+ /// \brief Constructor.
1367
+ /// \param[in] size A vector representing the output size of the cropped image.
1368
+ /// If the size is a single value, a squared crop of size (size, size) is returned.
1369
+ /// If the size has 2 values, it should be (height, width).
1370
+ /// \param[in] scale Range [min, max) of respective size of the original
1371
+ /// size to be cropped (default=(0.08, 1.0)).
1372
+ /// \param[in] ratio Range [min, max) of aspect ratio to be cropped
1373
+ /// (default=(3. / 4., 4. / 3.)).
1374
+ /// \param[in] interpolation Image interpolation mode (default=InterpolationMode::kLinear).
1375
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
1376
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
1377
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
1378
+ /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation.
1379
+ /// - InterpolationMode::kCubicPil, Interpolation method is bicubic interpolation like implemented in pillow.
1380
+ /// \param[in] max_attempts The maximum number of attempts to propose a valid.
1381
+ /// crop_area (default=10). If exceeded, fall back to use center_crop instead.
1382
+ /// \note If the input image is more than one, then make sure that the image size is the same.
1383
+ /// \par Example
1384
+ /// \code
1385
+ /// /* Define operations */
1386
+ /// auto decode_op = vision::Decode();
1387
+ /// auto random_op = vision::RandomResizedCrop({32, 32}, {0.08, 1.0});
1388
+ ///
1389
+ /// /* dataset is an instance of Dataset object */
1390
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1391
+ /// {"image"}); // input columns
1392
+ /// \endcode
1393
+ explicit RandomResizedCrop(const std::vector<int32_t> &size, const std::vector<float> &scale = {0.08, 1.0},
1394
+ const std::vector<float> &ratio = {3. / 4., 4. / 3.},
1395
+ InterpolationMode interpolation = InterpolationMode::kLinear, int32_t max_attempts = 10);
1396
+
1397
+ /// \brief Destructor.
1398
+ ~RandomResizedCrop() override = default;
1399
+
1400
+ protected:
1401
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1402
+ /// \return Shared pointer to TensorOperation object.
1403
+ std::shared_ptr<TensorOperation> Parse() override;
1404
+
1405
+ private:
1406
+ struct Data;
1407
+ std::shared_ptr<Data> data_;
1408
+ };
1409
+
1410
+ /// \brief Crop the input image to a random size and aspect ratio.
1411
+ /// If cropped area is out of bbox, the return bbox will be empty.
1412
+ class DATASET_API RandomResizedCropWithBBox final : public TensorTransform {
1413
+ public:
1414
+ /// \brief Constructor.
1415
+ /// \param[in] size A vector representing the output size of the cropped image.
1416
+ /// If the size is a single value, a squared crop of size (size, size) is returned.
1417
+ /// If the size has 2 values, it should be (height, width).
1418
+ /// \param[in] scale Range [min, max) of respective size of the original
1419
+ /// size to be cropped (default=(0.08, 1.0)).
1420
+ /// \param[in] ratio Range [min, max) of aspect ratio to be cropped
1421
+ /// (default=(3. / 4., 4. / 3.)).
1422
+ /// \param[in] interpolation Image interpolation mode (default=InterpolationMode::kLinear).
1423
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
1424
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
1425
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
1426
+ /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation.
1427
+ /// - InterpolationMode::kCubicPil, Interpolation method is bicubic interpolation like implemented in pillow.
1428
+ /// \param[in] max_attempts The maximum number of attempts to propose a valid
1429
+ /// crop_area (default=10). If exceeded, fall back to use center_crop instead.
1430
+ /// \par Example
1431
+ /// \code
1432
+ /// /* Define operations */
1433
+ /// auto random_op = vision::RandomResizedCropWithBBox({50, 50}, {0.05, 0.5}, {0.2, 0.4},
1434
+ /// InterpolationMode::kCubic);
1435
+ ///
1436
+ /// /* dataset is an instance of Dataset object */
1437
+ /// dataset = dataset->Map({random_op}, // operations
1438
+ /// {"image", "bbox"}); // input columns
1439
+ /// \endcode
1440
+ explicit RandomResizedCropWithBBox(const std::vector<int32_t> &size, const std::vector<float> &scale = {0.08, 1.0},
1441
+ const std::vector<float> &ratio = {3. / 4., 4. / 3.},
1442
+ InterpolationMode interpolation = InterpolationMode::kLinear,
1443
+ int32_t max_attempts = 10);
1444
+
1445
+ /// \brief Destructor.
1446
+ ~RandomResizedCropWithBBox() override = default;
1447
+
1448
+ protected:
1449
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1450
+ /// \return Shared pointer to TensorOperation object.
1451
+ std::shared_ptr<TensorOperation> Parse() override;
1452
+
1453
+ private:
1454
+ struct Data;
1455
+ std::shared_ptr<Data> data_;
1456
+ };
1457
+
1458
+ /// \brief Rotate the image according to parameters.
1459
+ class DATASET_API RandomRotation final : public TensorTransform {
1460
+ public:
1461
+ /// \brief Constructor.
1462
+ /// \param[in] degrees A float vector of size 2, representing the starting and ending degrees.
1463
+ /// \param[in] resample An enum for the mode of interpolation.
1464
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
1465
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
1466
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
1467
+ /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation.
1468
+ /// - InterpolationMode::kCubicPil, Interpolation method is bicubic interpolation like implemented in pillow.
1469
+ /// \param[in] expand A boolean representing whether the image is expanded after rotation.
1470
+ /// \param[in] center A float vector of size 2 or empty, representing the x and y center of rotation
1471
+ /// or the center of the image.
1472
+ /// \param[in] fill_value A vector representing the value to fill the area outside the transform
1473
+ /// in the output image. If 1 value is provided, it is used for all RGB channels.
1474
+ /// If 3 values are provided, it is used to fill R, G, B channels respectively.
1475
+ /// \par Example
1476
+ /// \code
1477
+ /// /* Define operations */
1478
+ /// auto decode_op = vision::Decode();
1479
+ /// auto random_op = vision::RandomRotation({30, 60}, InterpolationMode::kNearestNeighbour);
1480
+ ///
1481
+ /// /* dataset is an instance of Dataset object */
1482
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1483
+ /// {"image"}); // input columns
1484
+ /// \endcode
1485
+ explicit RandomRotation(const std::vector<float> &degrees,
1486
+ InterpolationMode resample = InterpolationMode::kNearestNeighbour, bool expand = false,
1487
+ const std::vector<float> &center = {}, const std::vector<uint8_t> &fill_value = {0, 0, 0});
1488
+
1489
+ /// \brief Destructor.
1490
+ ~RandomRotation() override = default;
1491
+
1492
+ protected:
1493
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1494
+ /// \return Shared pointer to TensorOperation object.
1495
+ std::shared_ptr<TensorOperation> Parse() override;
1496
+
1497
+ private:
1498
+ struct Data;
1499
+ std::shared_ptr<Data> data_;
1500
+ };
1501
+
1502
+ /// \brief Choose a random sub-policy from a list to be applied on the input image. A sub-policy is a list of tuples
1503
+ /// (operation, prob), where operation is a TensorTransform operation and prob is the probability that this
1504
+ /// operation will be applied. Once a sub-policy is selected, each operation within the sub-policy with be
1505
+ /// applied in sequence according to its probability.
1506
+ class DATASET_API RandomSelectSubpolicy final : public TensorTransform {
1507
+ public:
1508
+ /// \brief Constructor.
1509
+ /// \param[in] policy Vector of sub-policies to choose from, in which the TensorTransform objects are raw pointers.
1510
+ /// \par Example
1511
+ /// \code
1512
+ /// /* Define operations */
1513
+ /// auto invert_op(new vision::Invert());
1514
+ /// auto equalize_op(new vision::Equalize());
1515
+ ///
1516
+ /// std::vector<std::pair<TensorTransform *, double>> policy = {{invert_op, 0.5}, {equalize_op, 0.4}};
1517
+ /// vision::RandomSelectSubpolicy random_select_subpolicy_op = vision::RandomSelectSubpolicy({policy});
1518
+ ///
1519
+ /// /* dataset is an instance of Dataset object */
1520
+ /// dataset = dataset->Map({random_select_subpolicy_op}, // operations
1521
+ /// {"image"}); // input columns
1522
+ /// \endcode
1523
+ explicit RandomSelectSubpolicy(const std::vector<std::vector<std::pair<TensorTransform *, double>>> &policy);
1524
+
1525
+ /// \brief Constructor.
1526
+ /// \param[in] policy Vector of sub-policies to choose from, in which the TensorTransform objects are shared pointers.
1527
+ /// \par Example
1528
+ /// \code
1529
+ /// /* Define operations */
1530
+ /// std::shared_ptr<TensorTransform> invert_op(new vision::Invert());
1531
+ /// std::shared_ptr<TensorTransform> equalize_op(new vision::Equalize());
1532
+ /// std::shared_ptr<TensorTransform> resize_op(new vision::Resize({15, 15}));
1533
+ ///
1534
+ /// auto random_select_subpolicy_op = vision::RandomSelectSubpolicy({
1535
+ /// {{invert_op, 0.5}, {equalize_op, 0.4}},
1536
+ /// {{resize_op, 0.1}}
1537
+ /// });
1538
+ ///
1539
+ /// /* dataset is an instance of Dataset object */
1540
+ /// dataset = dataset->Map({random_select_subpolicy_op}, // operations
1541
+ /// {"image"}); // input columns
1542
+ /// \endcode
1543
+ explicit RandomSelectSubpolicy(
1544
+ const std::vector<std::vector<std::pair<std::shared_ptr<TensorTransform>, double>>> &policy);
1545
+
1546
+ /// \brief Constructor.
1547
+ /// \param[in] policy Vector of sub-policies to choose from, in which the TensorTransform objects are object pointers.
1548
+ /// \par Example
1549
+ /// \code
1550
+ /// /* Define operations */
1551
+ /// vision::Invert invert_op = vision::Invert();
1552
+ /// vision::Equalize equalize_op = vision::Equalize();
1553
+ /// vision::Resize resize_op = vision::Resize({15, 15});
1554
+ ///
1555
+ /// auto random_select_subpolicy_op = vision::RandomSelectSubpolicy({
1556
+ /// {{invert_op, 0.5}, {equalize_op, 0.4}},
1557
+ /// {{resize_op, 0.1}}
1558
+ /// });
1559
+ ///
1560
+ /// /* dataset is an instance of Dataset object */
1561
+ /// dataset = dataset->Map({random_select_subpolicy_op}, // operations
1562
+ /// {"image"}); // input columns
1563
+ /// \endcode
1564
+ explicit RandomSelectSubpolicy(
1565
+ const std::vector<std::vector<std::pair<std::reference_wrapper<TensorTransform>, double>>> &policy);
1566
+
1567
+ /// \brief Destructor.
1568
+ ~RandomSelectSubpolicy() override = default;
1569
+
1570
+ protected:
1571
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1572
+ /// \return Shared pointer to TensorOperation object.
1573
+ std::shared_ptr<TensorOperation> Parse() override;
1574
+
1575
+ private:
1576
+ struct Data;
1577
+ std::shared_ptr<Data> data_;
1578
+ };
1579
+
1580
+ /// \brief Adjust the sharpness of the input image by a fixed or random degree.
1581
+ class DATASET_API RandomSharpness final : public TensorTransform {
1582
+ public:
1583
+ /// \brief Constructor.
1584
+ /// \param[in] degrees A float vector of size 2, representing the range of random sharpness
1585
+ /// adjustment degrees. It should be in (min, max) format. If min=max, then it is a
1586
+ /// single fixed magnitude operation (default = (0.1, 1.9)).
1587
+ /// \par Example
1588
+ /// \code
1589
+ /// /* Define operations */
1590
+ /// auto decode_op = vision::Decode();
1591
+ /// auto random_op = vision::RandomSharpness({0.1, 1.5});
1592
+ ///
1593
+ /// /* dataset is an instance of Dataset object */
1594
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1595
+ /// {"image"}); // input columns
1596
+ /// \endcode
1597
+ explicit RandomSharpness(const std::vector<float> &degrees = {0.1, 1.9});
1598
+
1599
+ /// \brief Destructor.
1600
+ ~RandomSharpness() override = default;
1601
+
1602
+ protected:
1603
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1604
+ /// \return Shared pointer to TensorOperation object.
1605
+ std::shared_ptr<TensorOperation> Parse() override;
1606
+
1607
+ private:
1608
+ struct Data;
1609
+ std::shared_ptr<Data> data_;
1610
+ };
1611
+
1612
+ /// \brief Invert pixels randomly within a specified range.
1613
+ class DATASET_API RandomSolarize final : public TensorTransform {
1614
+ public:
1615
+ /// \brief Constructor.
1616
+ /// \param[in] threshold A vector with two elements specifying the pixel range to invert.
1617
+ /// Threshold values should always be in (min, max) format.
1618
+ /// If min=max, it will to invert all pixels above min(max).
1619
+ /// \par Example
1620
+ /// \code
1621
+ /// /* Define operations */
1622
+ /// auto decode_op = vision::Decode();
1623
+ /// auto random_op = vision::RandomSharpness({0, 255});
1624
+ ///
1625
+ /// /* dataset is an instance of Dataset object */
1626
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1627
+ /// {"image"}); // input columns
1628
+ /// \endcode
1629
+ explicit RandomSolarize(const std::vector<uint8_t> &threshold = {0, 255});
1630
+
1631
+ /// \brief Destructor.
1632
+ ~RandomSolarize() override = default;
1633
+
1634
+ protected:
1635
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1636
+ /// \return Shared pointer to TensorOperation object.
1637
+ std::shared_ptr<TensorOperation> Parse() override;
1638
+
1639
+ private:
1640
+ struct Data;
1641
+ std::shared_ptr<Data> data_;
1642
+ };
1643
+
1644
+ /// \brief Randomly flip the input image vertically with a given probability.
1645
+ class DATASET_API RandomVerticalFlip final : public TensorTransform {
1646
+ public:
1647
+ /// \brief Constructor.
1648
+ /// \param[in] prob A float representing the probability of flip.
1649
+ /// \par Example
1650
+ /// \code
1651
+ /// /* Define operations */
1652
+ /// auto decode_op = vision::Decode();
1653
+ /// auto random_op = vision::RandomVerticalFlip();
1654
+ ///
1655
+ /// /* dataset is an instance of Dataset object */
1656
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1657
+ /// {"image"}); // input columns
1658
+ /// \endcode
1659
+ explicit RandomVerticalFlip(float prob = 0.5);
1660
+
1661
+ /// \brief Destructor.
1662
+ ~RandomVerticalFlip() override = default;
1663
+
1664
+ protected:
1665
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1666
+ /// \return Shared pointer to TensorOperation object.
1667
+ std::shared_ptr<TensorOperation> Parse() override;
1668
+
1669
+ private:
1670
+ struct Data;
1671
+ std::shared_ptr<Data> data_;
1672
+ };
1673
+
1674
+ /// \brief Randomly flip the input image vertically with a given probability and adjust bounding boxes accordingly.
1675
+ class DATASET_API RandomVerticalFlipWithBBox final : public TensorTransform {
1676
+ public:
1677
+ /// \brief Constructor.
1678
+ /// \param[in] prob A float representing the probability of flip.
1679
+ /// \par Example
1680
+ /// \code
1681
+ /// /* Define operations */
1682
+ /// auto random_op = vision::RandomVerticalFlipWithBBox();
1683
+ ///
1684
+ /// /* dataset is an instance of Dataset object */
1685
+ /// dataset = dataset->Map({random_op}, // operations
1686
+ /// {"image", "bbox"}); // input columns
1687
+ /// \endcode
1688
+ explicit RandomVerticalFlipWithBBox(float prob = 0.5);
1689
+
1690
+ /// \brief Destructor.
1691
+ ~RandomVerticalFlipWithBBox() override = default;
1692
+
1693
+ protected:
1694
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1695
+ /// \return Shared pointer to TensorOperation object.
1696
+ std::shared_ptr<TensorOperation> Parse() override;
1697
+
1698
+ private:
1699
+ struct Data;
1700
+ std::shared_ptr<Data> data_;
1701
+ };
1702
+
1703
+ /// \brief Reads a file in binary mode.
1704
+ /// \param[in] filename The path to the file to be read.
1705
+ /// \param[out] output The binary data.
1706
+ /// \return The status code.
1707
+ Status DATASET_API ReadFile(const std::string &filename, mindspore::MSTensor *output);
1708
+
1709
+ /// \brief Read a image file and decode it into one or three channels data.
1710
+ /// \param[in] filename The path to the file to be read.
1711
+ /// \param[out] output The Tensor data.
1712
+ /// \param[in] mode The read mode used for optionally converting the image, can be one of
1713
+ /// [ImageReadMode::kUNCHANGED, ImageReadMode::kGRAYSCALE, ImageReadMode::kCOLOR]. Default:
1714
+ /// ImageReadMode::kUNCHANGED.
1715
+ /// - ImageReadMode::kUNCHANGED, remain the output in the original format.
1716
+ /// - ImageReadMode::kGRAYSCALE, convert the output into one channel grayscale data.
1717
+ /// - ImageReadMode::kCOLOR, convert the output into three channels RGB color data.
1718
+ /// \return The status code.
1719
+ Status DATASET_API ReadImage(const std::string &filename, mindspore::MSTensor *output,
1720
+ ImageReadMode mode = ImageReadMode::kUNCHANGED);
1721
+
1722
+ #ifdef ENABLE_FFMPEG
1723
+ /// \brief Read the video, audio, metadata from a video file. It supports AVI, H264, H265, MOV, MP4, WMV file formats.
1724
+ /// \param[in] filename The path to the videoe file to be read.
1725
+ /// \param[out] video_output The video frames of the video file.
1726
+ /// \param[out] audio_output The audio frames of the video file.
1727
+ /// \param[out] metadata_output The metadata contains video_fps, audio_fps.
1728
+ /// \param[in] start_pts The start presentation timestamp of the video. Default: 0.0.
1729
+ /// \param[in] end_pts The end presentation timestamp of the video. Default: 2147483647.0.
1730
+ /// \param[in] pts_unit The unit for the timestamps, can be one of ["pts", "sec"]. Default: "pts".
1731
+ /// \return The status code.
1732
+ Status DATASET_API ReadVideo(const std::string &filename, mindspore::MSTensor *video_output,
1733
+ mindspore::MSTensor *audio_output, std::map<std::string, std::string> *metadata_output,
1734
+ float start_pts = 0.0, float end_pts = 2147483647.0, const std::string &pts_unit = "pts");
1735
+
1736
+ /// \brief Read the timestamps and frame rate of a video file. It supports AVI, H264, H265, MOV, MP4, WMV files.
1737
+ /// \param[in] filename The path to the videoe file to be read.
1738
+ /// \param[out] output The tuple(video_timestamps, video_fps) of the video.
1739
+ /// \param[in] pts_unit The unit for the timestamps, can be one of ["pts", "sec"]. Default: "pts".
1740
+ /// \return The status code.
1741
+ Status DATASET_API ReadVideoTimestamps(const std::string &filename, std::tuple<std::vector<float>, float> *output,
1742
+ const std::string &pts_unit = "pts");
1743
+ #endif
1744
+
1745
+ /// \brief Crop the given image and zoom to the specified size.
1746
+ class DATASET_API ResizedCrop final : public TensorTransform {
1747
+ public:
1748
+ /// \brief Constructor.
1749
+ /// \param[in] top Horizontal ordinate of the upper left corner of the crop image.
1750
+ /// \param[in] left Vertical ordinate of the upper left corner of the crop image.
1751
+ /// \param[in] height Height of cropped image.
1752
+ /// \param[in] width Width of cropped image.
1753
+ /// \param[in] size A vector representing the output size of the image.
1754
+ /// If the size is a single value, a squared resized of size (size, size) is returned.
1755
+ /// If the size has 2 values, it should be (height, width).
1756
+ /// \param[in] interpolation Image interpolation mode. Default: InterpolationMode::kLinear.
1757
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
1758
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
1759
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
1760
+ /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation.
1761
+ /// - InterpolationMode::kCubicPil, Interpolation method is bicubic interpolation like implemented in pillow.
1762
+ /// \note If the input image is more than one, then make sure that the image size is the same.
1763
+ /// \par Example
1764
+ /// \code
1765
+ /// /* Define operations */
1766
+ /// auto decode_op = vision::Decode();
1767
+ /// auto resized_crop_op = vision::ResizedCrop(128, 128, 256, 256, {128, 128});
1768
+ ///
1769
+ /// /* dataset is an instance of Dataset object */
1770
+ /// dataset = dataset->Map({decode_op, resized_crop_op}, // operations
1771
+ /// {"image"}); // input columns
1772
+ /// \endcode
1773
+ ResizedCrop(int32_t top, int32_t left, int32_t height, int32_t width, const std::vector<int32_t> &size,
1774
+ InterpolationMode interpolation = InterpolationMode::kLinear);
1775
+
1776
+ /// \brief Destructor.
1777
+ ~ResizedCrop() override = default;
1778
+
1779
+ protected:
1780
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1781
+ /// \return Shared pointer to TensorOperation object.
1782
+ std::shared_ptr<TensorOperation> Parse() override;
1783
+
1784
+ private:
1785
+ struct Data;
1786
+ std::shared_ptr<Data> data_;
1787
+ };
1788
+
1789
+ /// \brief Resize the input image to the given size and adjust bounding boxes accordingly.
1790
+ class DATASET_API ResizeWithBBox final : public TensorTransform {
1791
+ public:
1792
+ /// \brief Constructor.
1793
+ /// \param[in] size The output size of the resized image.
1794
+ /// If the size is an integer, smaller edge of the image will be resized to this value with the same image aspect
1795
+ /// ratio. If the size is a sequence of length 2, it should be (height, width).
1796
+ /// \param[in] interpolation An enum for the mode of interpolation (default=InterpolationMode::kLinear).
1797
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
1798
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
1799
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
1800
+ /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation.
1801
+ /// - InterpolationMode::kCubicPil, Interpolation method is bicubic interpolation like implemented in pillow.
1802
+ /// \par Example
1803
+ /// \code
1804
+ /// /* Define operations */
1805
+ /// auto random_op = vision::ResizeWithBBox({100, 100}, InterpolationMode::kNearestNeighbour);
1806
+ ///
1807
+ /// /* dataset is an instance of Dataset object */
1808
+ /// dataset = dataset->Map({random_op}, // operations
1809
+ /// {"image", "bbox"}); // input columns
1810
+ /// \endcode
1811
+ explicit ResizeWithBBox(const std::vector<int32_t> &size,
1812
+ InterpolationMode interpolation = InterpolationMode::kLinear);
1813
+
1814
+ /// \brief Destructor.
1815
+ ~ResizeWithBBox() override = default;
1816
+
1817
+ protected:
1818
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1819
+ /// \return Shared pointer to TensorOperation object.
1820
+ std::shared_ptr<TensorOperation> Parse() override;
1821
+
1822
+ private:
1823
+ struct Data;
1824
+ std::shared_ptr<Data> data_;
1825
+ };
1826
+
1827
+ /// \brief Change the format of input tensor from 4-channel RGBA to 3-channel BGR.
1828
+ class DATASET_API RGBA2BGR final : public TensorTransform {
1829
+ public:
1830
+ /// \brief Constructor.
1831
+ /// \par Example
1832
+ /// \code
1833
+ /// /* Define operations */
1834
+ /// auto decode_op = vision::Decode();
1835
+ /// auto rgb2bgr_op = vision::RGBA2BGR();
1836
+ ///
1837
+ /// /* dataset is an instance of Dataset object */
1838
+ /// dataset = dataset->Map({decode_op, rgb2bgr_op}, // operations
1839
+ /// {"image"}); // input columns
1840
+ /// \endcode
1841
+ RGBA2BGR();
1842
+
1843
+ /// \brief Destructor.
1844
+ ~RGBA2BGR() override = default;
1845
+
1846
+ protected:
1847
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1848
+ /// \return Shared pointer to TensorOperation object.
1849
+ std::shared_ptr<TensorOperation> Parse() override;
1850
+ };
1851
+
1852
+ /// \brief Change the input 4 channel RGBA tensor to 3 channel RGB.
1853
+ class DATASET_API RGBA2RGB final : public TensorTransform {
1854
+ public:
1855
+ /// \brief Constructor.
1856
+ /// \par Example
1857
+ /// \code
1858
+ /// /* Define operations */
1859
+ /// auto decode_op = vision::Decode();
1860
+ /// auto rgba2rgb_op = vision::RGBA2RGB();
1861
+ ///
1862
+ /// /* dataset is an instance of Dataset object */
1863
+ /// dataset = dataset->Map({decode_op, rgba2rgb_op}, // operations
1864
+ /// {"image"}); // input columns
1865
+ /// \endcode
1866
+ RGBA2RGB();
1867
+
1868
+ /// \brief Destructor.
1869
+ ~RGBA2RGB() override = default;
1870
+
1871
+ protected:
1872
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1873
+ /// \return Shared pointer to TensorOperation object.
1874
+ std::shared_ptr<TensorOperation> Parse() override;
1875
+ };
1876
+
1877
+ /// \note Slice the tensor to multiple patches in horizontal and vertical directions.
1878
+ class DATASET_API SlicePatches final : public TensorTransform {
1879
+ public:
1880
+ /// \brief Constructor.
1881
+ /// \param[in] num_height The number of patches in vertical direction (default=1).
1882
+ /// \param[in] num_width The number of patches in horizontal direction (default=1).
1883
+ /// \param[in] slice_mode An enum for the mode of slice (default=SliceMode::kPad).
1884
+ /// \param[in] fill_value A value representing the pixel to fill the padding area in right and
1885
+ /// bottom border if slice_mode is kPad. Then padded tensor could be just sliced to multiple patches (default=0).
1886
+ /// \note The usage scenerio is suitable to tensor with large height and width. The tensor will keep the same
1887
+ /// if set both num_height and num_width to 1. And the number of output tensors is equal to num_height*num_width.
1888
+ /// \par Example
1889
+ /// \code
1890
+ /// /* Define operations */
1891
+ /// auto decode_op = vision::Decode();
1892
+ /// auto slice_patch_op = vision::SlicePatches(255, 255);
1893
+ ///
1894
+ /// /* dataset is an instance of Dataset object */
1895
+ /// dataset = dataset->Map({decode_op, slice_patch_op}, // operations
1896
+ /// {"image"}); // input columns
1897
+ /// \endcode
1898
+ explicit SlicePatches(int32_t num_height = 1, int32_t num_width = 1, SliceMode slice_mode = SliceMode::kPad,
1899
+ uint8_t fill_value = 0);
1900
+
1901
+ /// \brief Destructor.
1902
+ ~SlicePatches() override = default;
1903
+
1904
+ protected:
1905
+ /// \brief Function to convert TensorTransform object into a TensorOperation object.
1906
+ /// \return Shared pointer to TensorOperation object.
1907
+ std::shared_ptr<TensorOperation> Parse() override;
1908
+
1909
+ private:
1910
+ struct Data;
1911
+ std::shared_ptr<Data> data_;
1912
+ };
1913
+
1914
+ /// \brief Invert pixels within a specified range.
1915
+ class DATASET_API Solarize final : public TensorTransform {
1916
+ public:
1917
+ /// \brief Constructor.
1918
+ /// \param[in] threshold A vector with two elements specifying the pixel range to invert.
1919
+ /// Threshold values should always be in (min, max) format.
1920
+ /// If min=max, it will to invert all pixels above min(max).
1921
+ /// \par Example
1922
+ /// \code
1923
+ /// /* Define operations */
1924
+ /// auto decode_op = vision::Decode();
1925
+ /// auto solarize_op = vision::Solarize({0, 255});
1926
+ ///
1927
+ /// /* dataset is an instance of Dataset object */
1928
+ /// dataset = dataset->Map({decode_op, solarize_op}, // operations
1929
+ /// {"image"}); // input columns
1930
+ /// \endcode
1931
+ explicit Solarize(const std::vector<float> &threshold);
1932
+
1933
+ /// \brief Destructor.
1934
+ ~Solarize() override = default;
1935
+
1936
+ protected:
1937
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1938
+ /// \return Shared pointer to TensorOperation object.
1939
+ std::shared_ptr<TensorOperation> Parse() override;
1940
+
1941
+ private:
1942
+ struct Data;
1943
+ std::shared_ptr<Data> data_;
1944
+ };
1945
+
1946
+ /// \brief Divide the pixel values by 255 and convert from HWC format to CHW format with required datatype.
1947
+ class DATASET_API ToTensor final : public TensorTransform {
1948
+ public:
1949
+ /// \brief Constructor.
1950
+ /// \param[in] output_type The type of the output tensor of type mindspore::DataType or String
1951
+ /// (default=mindspore::DataType::kNumberTypeFloat32).
1952
+ /// \par Example
1953
+ /// \code
1954
+ /// /* Define operations */
1955
+ /// auto to_tensor_op = vision::ToTensor();
1956
+ ///
1957
+ /// /* dataset is an instance of Dataset object */
1958
+ /// dataset = dataset->Map({to_tensor_op}, // operations
1959
+ /// {"image"}); // input columns
1960
+ /// \endcode
1961
+ ToTensor();
1962
+ explicit ToTensor(std::string output_type);
1963
+ explicit ToTensor(mindspore::DataType output_type);
1964
+
1965
+ /// \brief Destructor.
1966
+ ~ToTensor() override = default;
1967
+
1968
+ protected:
1969
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1970
+ /// \return Shared pointer to TensorOperation object.
1971
+ std::shared_ptr<TensorOperation> Parse() override;
1972
+
1973
+ private:
1974
+ struct Data;
1975
+ std::shared_ptr<Data> data_;
1976
+ };
1977
+
1978
+ /// \brief Dataset-independent data-augmentation with TrivialAugment Wide.
1979
+ class DATASET_API TrivialAugmentWide final : public TensorTransform {
1980
+ public:
1981
+ /// \brief Constructor.
1982
+ /// \param[in] num_magnitude_bins The number of different magnitude values. Default: 31.
1983
+ /// \param[in] interpolation An enum for the mode of interpolation. Default: InterpolationMode::kNearestNeighbour.
1984
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
1985
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
1986
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
1987
+ /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation.
1988
+ /// \param[in] fill_value A vector representing the pixel intensity of the borders. Default: {0, 0, 0}.
1989
+ /// \par Example
1990
+ /// \code
1991
+ /// /* Define operations */
1992
+ /// auto decode_op = vision::Decode();
1993
+ /// auto trivial_augment_wide_op = vision::TrivialAugmentWide();
1994
+ /// /* dataset is an instance of Dataset object */
1995
+ /// dataset = dataset->Map({decode_op, trivial_augment_wide_op}, // operations
1996
+ /// {"image"}); // input columns
1997
+ /// \endcode
1998
+ explicit TrivialAugmentWide(int32_t num_magnitude_bins = 31,
1999
+ InterpolationMode interpolation = InterpolationMode::kNearestNeighbour,
2000
+ const std::vector<uint8_t> &fill_value = {0, 0, 0});
2001
+
2002
+ /// \brief Destructor.
2003
+ ~TrivialAugmentWide() override = default;
2004
+
2005
+ protected:
2006
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
2007
+ /// \return Shared pointer to TensorOperation object.
2008
+ std::shared_ptr<TensorOperation> Parse() override;
2009
+
2010
+ private:
2011
+ struct Data;
2012
+ std::shared_ptr<Data> data_;
2013
+ };
2014
+
2015
+ /// \brief Randomly perform transformations, as selected from input transform list, on the input tensor.
2016
+ class DATASET_API UniformAugment final : public TensorTransform {
2017
+ public:
2018
+ /// \brief Constructor.
2019
+ /// \param[in] transforms Raw pointer to vector of TensorTransform operations.
2020
+ /// \param[in] num_ops An integer representing the number of operations to be selected and applied.
2021
+ /// \par Example
2022
+ /// \code
2023
+ /// /* Define operations */
2024
+ /// auto resize_op(new vision::Resize({30, 30}));
2025
+ /// auto random_crop_op(new vision::RandomCrop({28, 28}));
2026
+ /// auto center_crop_op(new vision::CenterCrop({16, 16}));
2027
+ /// auto uniform_op(new vision::UniformAugment({random_crop_op, center_crop_op}, 2));
2028
+ ///
2029
+ /// /* dataset is an instance of Dataset object */
2030
+ /// dataset = dataset->Map({resize_op, uniform_op}, // operations
2031
+ /// {"image"}); // input columns
2032
+ /// \endcode
2033
+ explicit UniformAugment(const std::vector<TensorTransform *> &transforms, int32_t num_ops = 2);
2034
+
2035
+ /// \brief Constructor.
2036
+ /// \param[in] transforms Smart pointer to vector of TensorTransform operations.
2037
+ /// \param[in] num_ops An integer representing the number of operations to be selected and applied.
2038
+ /// \par Example
2039
+ /// \code
2040
+ /// /* Define operations */
2041
+ /// std::shared_ptr<TensorTransform> resize_op(new vision::Resize({30, 30}));
2042
+ /// std::shared_ptr<TensorTransform> random_crop_op(new vision::RandomCrop({28, 28}));
2043
+ /// std::shared_ptr<TensorTransform> center_crop_op(new vision::CenterCrop({16, 16}));
2044
+ /// std::shared_ptr<TensorTransform> uniform_op(new vision::UniformAugment({random_crop_op, center_crop_op}, 2));
2045
+ ///
2046
+ /// /* dataset is an instance of Dataset object */
2047
+ /// dataset = dataset->Map({resize_op, uniform_op}, // operations
2048
+ /// {"image"}); // input columns
2049
+ /// \endcode
2050
+ explicit UniformAugment(const std::vector<std::shared_ptr<TensorTransform>> &transforms, int32_t num_ops = 2);
2051
+
2052
+ /// \brief Constructor.
2053
+ /// \param[in] transforms Object pointer to vector of TensorTransform operations.
2054
+ /// \param[in] num_ops An integer representing the number of operations to be selected and applied.
2055
+ /// \par Example
2056
+ /// \code
2057
+ /// /* Define operations */
2058
+ /// vision::Resize resize_op = vision::Resize({30, 30});
2059
+ /// vision::RandomCrop random_crop_op = vision::RandomCrop({28, 28});
2060
+ /// vision::CenterCrop center_crop_op = vision::CenterCrop({16, 16});
2061
+ /// vision::UniformAugment uniform_op = vision::UniformAugment({random_crop_op, center_crop_op}, 2);
2062
+ ///
2063
+ /// /* dataset is an instance of Dataset object */
2064
+ /// dataset = dataset->Map({resize_op, uniform_op}, // operations
2065
+ /// {"image"}); // input columns
2066
+ /// \endcode
2067
+ explicit UniformAugment(const std::vector<std::reference_wrapper<TensorTransform>> &transforms, int32_t num_ops = 2);
2068
+
2069
+ /// \brief Destructor.
2070
+ ~UniformAugment() override = default;
2071
+
2072
+ protected:
2073
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
2074
+ /// \return Shared pointer to TensorOperation object.
2075
+ std::shared_ptr<TensorOperation> Parse() override;
2076
+
2077
+ private:
2078
+ struct Data;
2079
+ std::shared_ptr<Data> data_;
2080
+ };
2081
+
2082
+ /// \brief Flip the input image vertically.
2083
+ class DATASET_API VerticalFlip final : public TensorTransform {
2084
+ public:
2085
+ /// \brief Constructor.
2086
+ /// \par Example
2087
+ /// \code
2088
+ /// /* Define operations */
2089
+ /// auto decode_op = vision::Decode();
2090
+ /// auto flip_op = vision::VerticalFlip();
2091
+ ///
2092
+ /// /* dataset is an instance of Dataset object */
2093
+ /// dataset = dataset->Map({decode_op, flip_op}, // operations
2094
+ /// {"image"}); // input columns
2095
+ /// \endcode
2096
+ VerticalFlip();
2097
+
2098
+ /// \brief Destructor.
2099
+ ~VerticalFlip() override = default;
2100
+
2101
+ protected:
2102
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
2103
+ /// \return Shared pointer to TensorOperation object.
2104
+ std::shared_ptr<TensorOperation> Parse() override;
2105
+ };
2106
+
2107
+ /// \brief Write the one dimension uint8 data into a file using binary mode.
2108
+ /// \param[in] filename The path to the file to be written.
2109
+ /// \param[in] data The tensor data.
2110
+ /// \return The status code.
2111
+ Status DATASET_API WriteFile(const std::string &filename, const mindspore::MSTensor &data);
2112
+
2113
+ /// \brief Write the image data into a JPEG file.
2114
+ /// \param[in] filename The path to the file to be written.
2115
+ /// \param[in] image The data tensor.
2116
+ /// \param[in] quality The quality for JPEG file, in range of [1, 100]. Default: 75.
2117
+ /// \return The status code.
2118
+ Status DATASET_API WriteJpeg(const std::string &filename, const mindspore::MSTensor &image, int quality = 75);
2119
+
2120
+ /// \brief Write the image into a PNG file.
2121
+ /// \param[in] filename The path to the file to be written.
2122
+ /// \param[in] image The data tensor.
2123
+ /// \param[in] compression_level The compression level for PNG file, in range of [0, 9]. Default: 6.
2124
+ /// \return The status code.
2125
+ Status DATASET_API WritePng(const std::string &filename, const mindspore::MSTensor &image, int compression_level = 6);
2126
+ } // namespace vision
2127
+ } // namespace dataset
2128
+ } // namespace mindspore
2129
+ #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_VISION_H_