mindspore 2.3.0__cp310-cp310-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1400) hide show
  1. mindspore/.commit_id +1 -0
  2. mindspore/ConcurrencyCheck.dll +0 -0
  3. mindspore/CppBuildInsights.dll +0 -0
  4. mindspore/CppCoreCheck.dll +0 -0
  5. mindspore/EnumIndex.dll +0 -0
  6. mindspore/EspXEngine.dll +0 -0
  7. mindspore/HResultCheck.dll +0 -0
  8. mindspore/KernelTraceControl.dll +0 -0
  9. mindspore/LocalESPC.dll +0 -0
  10. mindspore/Microsoft.Diagnostics.Tracing.EventSource.dll +0 -0
  11. mindspore/Microsoft.VisualStudio.RemoteControl.dll +0 -0
  12. mindspore/Microsoft.VisualStudio.Telemetry.dll +0 -0
  13. mindspore/Microsoft.VisualStudio.Utilities.Internal.dll +0 -0
  14. mindspore/Newtonsoft.Json.dll +0 -0
  15. mindspore/System.Runtime.CompilerServices.Unsafe.dll +0 -0
  16. mindspore/VariantClear.dll +0 -0
  17. mindspore/__init__.py +51 -0
  18. mindspore/_c_dataengine.cp310-win_amd64.pyd +0 -0
  19. mindspore/_c_expression.cp310-win_amd64.pyd +0 -0
  20. mindspore/_c_mindrecord.cp310-win_amd64.pyd +0 -0
  21. mindspore/_check_jit_forbidden_api.py +106 -0
  22. mindspore/_checkparam.py +1378 -0
  23. mindspore/_extends/__init__.py +23 -0
  24. mindspore/_extends/builtin_operations.py +224 -0
  25. mindspore/_extends/graph_kernel/__init__.py +17 -0
  26. mindspore/_extends/graph_kernel/model/__init__.py +19 -0
  27. mindspore/_extends/graph_kernel/model/graph_parallel.py +311 -0
  28. mindspore/_extends/graph_kernel/model/graph_split.py +1348 -0
  29. mindspore/_extends/graph_kernel/model/model.py +553 -0
  30. mindspore/_extends/graph_kernel/model/model_builder.py +216 -0
  31. mindspore/_extends/graph_kernel/parallel_estimate.py +60 -0
  32. mindspore/_extends/graph_kernel/splitter.py +140 -0
  33. mindspore/_extends/graph_kernel/utils.py +28 -0
  34. mindspore/_extends/parallel_compile/__init__.py +19 -0
  35. mindspore/_extends/parallel_compile/akg_compiler/__init__.py +19 -0
  36. mindspore/_extends/parallel_compile/akg_compiler/akg_process.py +269 -0
  37. mindspore/_extends/parallel_compile/akg_compiler/build_tbe_kernel.py +529 -0
  38. mindspore/_extends/parallel_compile/akg_compiler/compiler.py +56 -0
  39. mindspore/_extends/parallel_compile/akg_compiler/gen_custom_op_files.py +96 -0
  40. mindspore/_extends/parallel_compile/akg_compiler/get_file_path.py +36 -0
  41. mindspore/_extends/parallel_compile/akg_compiler/tbe_topi.py +556 -0
  42. mindspore/_extends/parallel_compile/akg_compiler/util.py +159 -0
  43. mindspore/_extends/parse/__init__.py +49 -0
  44. mindspore/_extends/parse/compile_config.py +258 -0
  45. mindspore/_extends/parse/namespace.py +136 -0
  46. mindspore/_extends/parse/parser.py +1446 -0
  47. mindspore/_extends/parse/resources.py +213 -0
  48. mindspore/_extends/parse/standard_method.py +4437 -0
  49. mindspore/_extends/parse/trope.py +97 -0
  50. mindspore/_extends/pijit/__init__.py +23 -0
  51. mindspore/_extends/pijit/pijit_func_white_list.py +343 -0
  52. mindspore/_extends/remote/__init__.py +19 -0
  53. mindspore/_extends/remote/kernel_build_server.py +199 -0
  54. mindspore/_extends/remote/kernel_build_server_akg.py +55 -0
  55. mindspore/_extends/remote/kernel_build_server_akg_v2.py +55 -0
  56. mindspore/_extends/remote/kernel_build_server_ascend.py +75 -0
  57. mindspore/_extends/utils.py +68 -0
  58. mindspore/_install_custom.py +43 -0
  59. mindspore/_profiler.py +30 -0
  60. mindspore/amp.py +419 -0
  61. mindspore/atlprov.dll +0 -0
  62. mindspore/avcodec-59.dll +0 -0
  63. mindspore/avdevice-59.dll +0 -0
  64. mindspore/avfilter-8.dll +0 -0
  65. mindspore/avformat-59.dll +0 -0
  66. mindspore/avutil-57.dll +0 -0
  67. mindspore/boost/__init__.py +42 -0
  68. mindspore/boost/adasum.py +319 -0
  69. mindspore/boost/base.py +535 -0
  70. mindspore/boost/boost.py +400 -0
  71. mindspore/boost/boost_cell_wrapper.py +790 -0
  72. mindspore/boost/dim_reduce.py +323 -0
  73. mindspore/boost/grad_accumulation.py +79 -0
  74. mindspore/boost/grad_freeze.py +382 -0
  75. mindspore/boost/group_loss_scale_manager.py +166 -0
  76. mindspore/boost/less_batch_normalization.py +174 -0
  77. mindspore/c1.dll +0 -0
  78. mindspore/c1xx.dll +0 -0
  79. mindspore/c2.dll +0 -0
  80. mindspore/cfgpersist.dll +0 -0
  81. mindspore/clang_rt.asan_dbg_dynamic-x86_64.dll +0 -0
  82. mindspore/clang_rt.asan_dynamic-x86_64.dll +0 -0
  83. mindspore/common/__init__.py +84 -0
  84. mindspore/common/_auto_dynamic.py +68 -0
  85. mindspore/common/_decorator.py +50 -0
  86. mindspore/common/_jit_fallback_utils.py +110 -0
  87. mindspore/common/_monad.py +25 -0
  88. mindspore/common/_register_for_adapter.py +74 -0
  89. mindspore/common/_register_for_recompute.py +48 -0
  90. mindspore/common/_register_for_tensor.py +45 -0
  91. mindspore/common/_stub_tensor.py +210 -0
  92. mindspore/common/_utils.py +122 -0
  93. mindspore/common/api.py +2049 -0
  94. mindspore/common/auto_dynamic_shape.py +507 -0
  95. mindspore/common/dtype.py +422 -0
  96. mindspore/common/dump.py +131 -0
  97. mindspore/common/file_system.py +48 -0
  98. mindspore/common/generator.py +260 -0
  99. mindspore/common/hook_handle.py +155 -0
  100. mindspore/common/initializer.py +880 -0
  101. mindspore/common/jit_config.py +98 -0
  102. mindspore/common/lazy_inline.py +240 -0
  103. mindspore/common/mindir_util.py +111 -0
  104. mindspore/common/mutable.py +234 -0
  105. mindspore/common/no_inline.py +54 -0
  106. mindspore/common/np_dtype.py +25 -0
  107. mindspore/common/parameter.py +1048 -0
  108. mindspore/common/recompute.py +262 -0
  109. mindspore/common/seed.py +260 -0
  110. mindspore/common/sparse_tensor.py +1171 -0
  111. mindspore/common/symbol.py +122 -0
  112. mindspore/common/tensor.py +4859 -0
  113. mindspore/communication/__init__.py +37 -0
  114. mindspore/communication/_comm_helper.py +466 -0
  115. mindspore/communication/_hccl_management.py +297 -0
  116. mindspore/communication/comm_func.py +1140 -0
  117. mindspore/communication/management.py +673 -0
  118. mindspore/config/op_info.config +533 -0
  119. mindspore/context.py +1976 -0
  120. mindspore/d3dcompiler_47.dll +0 -0
  121. mindspore/dataset/__init__.py +90 -0
  122. mindspore/dataset/audio/__init__.py +61 -0
  123. mindspore/dataset/audio/transforms.py +3690 -0
  124. mindspore/dataset/audio/utils.py +386 -0
  125. mindspore/dataset/audio/validators.py +1172 -0
  126. mindspore/dataset/callback/__init__.py +20 -0
  127. mindspore/dataset/callback/ds_callback.py +368 -0
  128. mindspore/dataset/callback/validators.py +32 -0
  129. mindspore/dataset/core/__init__.py +13 -0
  130. mindspore/dataset/core/config.py +1088 -0
  131. mindspore/dataset/core/datatypes.py +101 -0
  132. mindspore/dataset/core/py_util_helpers.py +65 -0
  133. mindspore/dataset/core/validator_helpers.py +774 -0
  134. mindspore/dataset/debug/__init__.py +21 -0
  135. mindspore/dataset/debug/debug_hook.py +97 -0
  136. mindspore/dataset/debug/pre_defined_hook.py +67 -0
  137. mindspore/dataset/engine/__init__.py +124 -0
  138. mindspore/dataset/engine/cache_admin.py +47 -0
  139. mindspore/dataset/engine/cache_client.py +129 -0
  140. mindspore/dataset/engine/datasets.py +4554 -0
  141. mindspore/dataset/engine/datasets_audio.py +911 -0
  142. mindspore/dataset/engine/datasets_standard_format.py +493 -0
  143. mindspore/dataset/engine/datasets_text.py +2161 -0
  144. mindspore/dataset/engine/datasets_user_defined.py +1114 -0
  145. mindspore/dataset/engine/datasets_vision.py +4816 -0
  146. mindspore/dataset/engine/iterators.py +342 -0
  147. mindspore/dataset/engine/obs/__init__.py +23 -0
  148. mindspore/dataset/engine/obs/config_loader.py +68 -0
  149. mindspore/dataset/engine/obs/obs_mindrecord_dataset.py +508 -0
  150. mindspore/dataset/engine/obs/util.py +475 -0
  151. mindspore/dataset/engine/offload.py +596 -0
  152. mindspore/dataset/engine/queue.py +250 -0
  153. mindspore/dataset/engine/samplers.py +895 -0
  154. mindspore/dataset/engine/serializer_deserializer.py +159 -0
  155. mindspore/dataset/engine/validators.py +2875 -0
  156. mindspore/dataset/text/__init__.py +54 -0
  157. mindspore/dataset/text/transforms.py +1703 -0
  158. mindspore/dataset/text/utils.py +715 -0
  159. mindspore/dataset/text/validators.py +642 -0
  160. mindspore/dataset/transforms/__init__.py +48 -0
  161. mindspore/dataset/transforms/c_transforms.py +638 -0
  162. mindspore/dataset/transforms/py_transforms.py +393 -0
  163. mindspore/dataset/transforms/py_transforms_util.py +255 -0
  164. mindspore/dataset/transforms/transforms.py +1260 -0
  165. mindspore/dataset/transforms/validators.py +410 -0
  166. mindspore/dataset/utils/__init__.py +19 -0
  167. mindspore/dataset/utils/browse_dataset.py +190 -0
  168. mindspore/dataset/utils/line_reader.py +124 -0
  169. mindspore/dataset/vision/__init__.py +68 -0
  170. mindspore/dataset/vision/c_transforms.py +2641 -0
  171. mindspore/dataset/vision/py_transforms.py +2120 -0
  172. mindspore/dataset/vision/py_transforms_util.py +1660 -0
  173. mindspore/dataset/vision/transforms.py +7295 -0
  174. mindspore/dataset/vision/utils.py +863 -0
  175. mindspore/dataset/vision/validators.py +1482 -0
  176. mindspore/default_config.py +2 -0
  177. mindspore/dnnl.dll +0 -0
  178. mindspore/dpcmi.dll +0 -0
  179. mindspore/experimental/__init__.py +20 -0
  180. mindspore/experimental/map_parameter.py +309 -0
  181. mindspore/experimental/optim/__init__.py +40 -0
  182. mindspore/experimental/optim/adadelta.py +161 -0
  183. mindspore/experimental/optim/adagrad.py +168 -0
  184. mindspore/experimental/optim/adam.py +193 -0
  185. mindspore/experimental/optim/adamax.py +170 -0
  186. mindspore/experimental/optim/adamw.py +205 -0
  187. mindspore/experimental/optim/asgd.py +153 -0
  188. mindspore/experimental/optim/lr_scheduler.py +1371 -0
  189. mindspore/experimental/optim/nadam.py +157 -0
  190. mindspore/experimental/optim/optimizer.py +259 -0
  191. mindspore/experimental/optim/radam.py +194 -0
  192. mindspore/experimental/optim/rmsprop.py +154 -0
  193. mindspore/experimental/optim/rprop.py +164 -0
  194. mindspore/experimental/optim/sgd.py +156 -0
  195. mindspore/hal/__init__.py +40 -0
  196. mindspore/hal/_ascend.py +57 -0
  197. mindspore/hal/_base.py +57 -0
  198. mindspore/hal/_cpu.py +56 -0
  199. mindspore/hal/_gpu.py +57 -0
  200. mindspore/hal/device.py +356 -0
  201. mindspore/hal/event.py +179 -0
  202. mindspore/hal/memory.py +326 -0
  203. mindspore/hal/stream.py +339 -0
  204. mindspore/include/OWNERS +7 -0
  205. mindspore/include/api/allocator.h +97 -0
  206. mindspore/include/api/callback/callback.h +93 -0
  207. mindspore/include/api/callback/ckpt_saver.h +41 -0
  208. mindspore/include/api/callback/loss_monitor.h +33 -0
  209. mindspore/include/api/callback/lr_scheduler.h +51 -0
  210. mindspore/include/api/callback/time_monitor.h +34 -0
  211. mindspore/include/api/callback/train_accuracy.h +37 -0
  212. mindspore/include/api/cell.h +90 -0
  213. mindspore/include/api/cfg.h +82 -0
  214. mindspore/include/api/context.h +602 -0
  215. mindspore/include/api/data_type.h +47 -0
  216. mindspore/include/api/delegate.h +178 -0
  217. mindspore/include/api/delegate_api.h +75 -0
  218. mindspore/include/api/dual_abi_helper.h +208 -0
  219. mindspore/include/api/format.h +28 -0
  220. mindspore/include/api/graph.h +46 -0
  221. mindspore/include/api/kernel.h +58 -0
  222. mindspore/include/api/kernel_api.h +168 -0
  223. mindspore/include/api/metrics/accuracy.h +36 -0
  224. mindspore/include/api/metrics/metrics.h +41 -0
  225. mindspore/include/api/model.h +438 -0
  226. mindspore/include/api/model_group.h +79 -0
  227. mindspore/include/api/model_parallel_runner.h +168 -0
  228. mindspore/include/api/serialization.h +185 -0
  229. mindspore/include/api/status.h +192 -0
  230. mindspore/include/api/types.h +431 -0
  231. mindspore/include/api/visible.h +41 -0
  232. mindspore/include/c_api/context_c.h +179 -0
  233. mindspore/include/c_api/data_type_c.h +52 -0
  234. mindspore/include/c_api/format_c.h +46 -0
  235. mindspore/include/c_api/model_c.h +347 -0
  236. mindspore/include/c_api/ms/abstract.h +67 -0
  237. mindspore/include/c_api/ms/attribute.h +197 -0
  238. mindspore/include/c_api/ms/base/handle_types.h +43 -0
  239. mindspore/include/c_api/ms/base/macros.h +32 -0
  240. mindspore/include/c_api/ms/base/status.h +33 -0
  241. mindspore/include/c_api/ms/base/types.h +283 -0
  242. mindspore/include/c_api/ms/context.h +102 -0
  243. mindspore/include/c_api/ms/graph.h +160 -0
  244. mindspore/include/c_api/ms/node.h +606 -0
  245. mindspore/include/c_api/ms/tensor.h +161 -0
  246. mindspore/include/c_api/ms/value.h +84 -0
  247. mindspore/include/c_api/status_c.h +79 -0
  248. mindspore/include/c_api/tensor_c.h +146 -0
  249. mindspore/include/c_api/types_c.h +67 -0
  250. mindspore/include/dataset/config.h +163 -0
  251. mindspore/include/dataset/constants.h +363 -0
  252. mindspore/include/dataset/execute.h +196 -0
  253. mindspore/include/dataset/text.h +1092 -0
  254. mindspore/include/dataset/transforms.h +638 -0
  255. mindspore/include/dataset/vision.h +2125 -0
  256. mindspore/include/dataset/vision_ascend.h +206 -0
  257. mindspore/include/dataset/vision_lite.h +625 -0
  258. mindspore/jpeg62.dll +0 -0
  259. mindspore/log.py +633 -0
  260. mindspore/mindrecord/__init__.py +43 -0
  261. mindspore/mindrecord/common/__init__.py +17 -0
  262. mindspore/mindrecord/common/constant.py +20 -0
  263. mindspore/mindrecord/common/enums.py +44 -0
  264. mindspore/mindrecord/common/exceptions.py +311 -0
  265. mindspore/mindrecord/config.py +809 -0
  266. mindspore/mindrecord/filereader.py +174 -0
  267. mindspore/mindrecord/filewriter.py +705 -0
  268. mindspore/mindrecord/mindpage.py +210 -0
  269. mindspore/mindrecord/shardheader.py +141 -0
  270. mindspore/mindrecord/shardindexgenerator.py +74 -0
  271. mindspore/mindrecord/shardreader.py +117 -0
  272. mindspore/mindrecord/shardsegment.py +128 -0
  273. mindspore/mindrecord/shardutils.py +185 -0
  274. mindspore/mindrecord/shardwriter.py +237 -0
  275. mindspore/mindrecord/tools/__init__.py +17 -0
  276. mindspore/mindrecord/tools/cifar10.py +140 -0
  277. mindspore/mindrecord/tools/cifar100.py +153 -0
  278. mindspore/mindrecord/tools/cifar100_to_mr.py +185 -0
  279. mindspore/mindrecord/tools/cifar10_to_mr.py +177 -0
  280. mindspore/mindrecord/tools/csv_to_mr.py +200 -0
  281. mindspore/mindrecord/tools/imagenet_to_mr.py +206 -0
  282. mindspore/mindrecord/tools/mnist_to_mr.py +259 -0
  283. mindspore/mindrecord/tools/tfrecord_to_mr.py +360 -0
  284. mindspore/mindspore_backend.dll +0 -0
  285. mindspore/mindspore_common.dll +0 -0
  286. mindspore/mindspore_core.dll +0 -0
  287. mindspore/mindspore_glog.dll +0 -0
  288. mindspore/mindspore_np_dtype.dll +0 -0
  289. mindspore/mindspore_shared_lib.dll +0 -0
  290. mindspore/mint/__init__.py +1137 -0
  291. mindspore/mint/linalg/__init__.py +22 -0
  292. mindspore/mint/nn/__init__.py +512 -0
  293. mindspore/mint/nn/functional.py +573 -0
  294. mindspore/mint/optim/__init__.py +24 -0
  295. mindspore/mint/optim/adamw.py +185 -0
  296. mindspore/msobj140.dll +0 -0
  297. mindspore/mspdb140.dll +0 -0
  298. mindspore/mspdbcore.dll +0 -0
  299. mindspore/mspdbst.dll +0 -0
  300. mindspore/mspft140.dll +0 -0
  301. mindspore/msvcdis140.dll +0 -0
  302. mindspore/msvcp140.dll +0 -0
  303. mindspore/msvcp140_1.dll +0 -0
  304. mindspore/msvcp140_2.dll +0 -0
  305. mindspore/msvcp140_atomic_wait.dll +0 -0
  306. mindspore/msvcp140_codecvt_ids.dll +0 -0
  307. mindspore/multiprocessing/__init__.py +72 -0
  308. mindspore/nn/__init__.py +48 -0
  309. mindspore/nn/cell.py +2605 -0
  310. mindspore/nn/dynamic_lr.py +482 -0
  311. mindspore/nn/extend/__init__.py +29 -0
  312. mindspore/nn/extend/basic.py +140 -0
  313. mindspore/nn/extend/embedding.py +143 -0
  314. mindspore/nn/extend/layer/__init__.py +27 -0
  315. mindspore/nn/extend/layer/normalization.py +109 -0
  316. mindspore/nn/extend/pooling.py +117 -0
  317. mindspore/nn/grad/__init__.py +21 -0
  318. mindspore/nn/grad/cell_grad.py +196 -0
  319. mindspore/nn/layer/__init__.py +63 -0
  320. mindspore/nn/layer/activation.py +1655 -0
  321. mindspore/nn/layer/basic.py +1519 -0
  322. mindspore/nn/layer/channel_shuffle.py +90 -0
  323. mindspore/nn/layer/combined.py +248 -0
  324. mindspore/nn/layer/container.py +734 -0
  325. mindspore/nn/layer/conv.py +1505 -0
  326. mindspore/nn/layer/dense.py +204 -0
  327. mindspore/nn/layer/embedding.py +751 -0
  328. mindspore/nn/layer/embedding_service.py +531 -0
  329. mindspore/nn/layer/embedding_service_layer.py +393 -0
  330. mindspore/nn/layer/image.py +661 -0
  331. mindspore/nn/layer/math.py +1069 -0
  332. mindspore/nn/layer/normalization.py +1177 -0
  333. mindspore/nn/layer/padding.py +894 -0
  334. mindspore/nn/layer/pooling.py +2148 -0
  335. mindspore/nn/layer/rnn_cells.py +388 -0
  336. mindspore/nn/layer/rnns.py +849 -0
  337. mindspore/nn/layer/thor_layer.py +963 -0
  338. mindspore/nn/layer/timedistributed.py +155 -0
  339. mindspore/nn/layer/transformer.py +823 -0
  340. mindspore/nn/learning_rate_schedule.py +512 -0
  341. mindspore/nn/loss/__init__.py +36 -0
  342. mindspore/nn/loss/loss.py +2846 -0
  343. mindspore/nn/metrics.py +53 -0
  344. mindspore/nn/optim/__init__.py +44 -0
  345. mindspore/nn/optim/_dist_optimizer_registry.py +111 -0
  346. mindspore/nn/optim/ada_grad.py +217 -0
  347. mindspore/nn/optim/adadelta.py +206 -0
  348. mindspore/nn/optim/adafactor.py +448 -0
  349. mindspore/nn/optim/adam.py +1297 -0
  350. mindspore/nn/optim/adamax.py +220 -0
  351. mindspore/nn/optim/adasum.py +548 -0
  352. mindspore/nn/optim/asgd.py +216 -0
  353. mindspore/nn/optim/ftrl.py +401 -0
  354. mindspore/nn/optim/lamb.py +296 -0
  355. mindspore/nn/optim/lars.py +202 -0
  356. mindspore/nn/optim/lazyadam.py +533 -0
  357. mindspore/nn/optim/momentum.py +239 -0
  358. mindspore/nn/optim/optimizer.py +1034 -0
  359. mindspore/nn/optim/proximal_ada_grad.py +242 -0
  360. mindspore/nn/optim/rmsprop.py +264 -0
  361. mindspore/nn/optim/rprop.py +251 -0
  362. mindspore/nn/optim/sgd.py +237 -0
  363. mindspore/nn/optim/thor.py +1310 -0
  364. mindspore/nn/probability/__init__.py +22 -0
  365. mindspore/nn/probability/bijector/__init__.py +35 -0
  366. mindspore/nn/probability/bijector/bijector.py +337 -0
  367. mindspore/nn/probability/bijector/exp.py +65 -0
  368. mindspore/nn/probability/bijector/gumbel_cdf.py +144 -0
  369. mindspore/nn/probability/bijector/invert.py +126 -0
  370. mindspore/nn/probability/bijector/power_transform.py +196 -0
  371. mindspore/nn/probability/bijector/scalar_affine.py +167 -0
  372. mindspore/nn/probability/bijector/softplus.py +189 -0
  373. mindspore/nn/probability/bnn_layers/__init__.py +29 -0
  374. mindspore/nn/probability/bnn_layers/_util.py +46 -0
  375. mindspore/nn/probability/bnn_layers/bnn_cell_wrapper.py +112 -0
  376. mindspore/nn/probability/bnn_layers/conv_variational.py +267 -0
  377. mindspore/nn/probability/bnn_layers/dense_variational.py +302 -0
  378. mindspore/nn/probability/bnn_layers/layer_distribution.py +123 -0
  379. mindspore/nn/probability/distribution/__init__.py +56 -0
  380. mindspore/nn/probability/distribution/_utils/__init__.py +34 -0
  381. mindspore/nn/probability/distribution/_utils/custom_ops.py +96 -0
  382. mindspore/nn/probability/distribution/_utils/utils.py +362 -0
  383. mindspore/nn/probability/distribution/bernoulli.py +334 -0
  384. mindspore/nn/probability/distribution/beta.py +391 -0
  385. mindspore/nn/probability/distribution/categorical.py +435 -0
  386. mindspore/nn/probability/distribution/cauchy.py +383 -0
  387. mindspore/nn/probability/distribution/distribution.py +827 -0
  388. mindspore/nn/probability/distribution/exponential.py +350 -0
  389. mindspore/nn/probability/distribution/gamma.py +391 -0
  390. mindspore/nn/probability/distribution/geometric.py +335 -0
  391. mindspore/nn/probability/distribution/gumbel.py +257 -0
  392. mindspore/nn/probability/distribution/half_normal.py +133 -0
  393. mindspore/nn/probability/distribution/laplace.py +128 -0
  394. mindspore/nn/probability/distribution/log_normal.py +272 -0
  395. mindspore/nn/probability/distribution/logistic.py +379 -0
  396. mindspore/nn/probability/distribution/normal.py +336 -0
  397. mindspore/nn/probability/distribution/poisson.py +288 -0
  398. mindspore/nn/probability/distribution/student_t.py +149 -0
  399. mindspore/nn/probability/distribution/transformed_distribution.py +235 -0
  400. mindspore/nn/probability/distribution/uniform.py +375 -0
  401. mindspore/nn/reinforcement/__init__.py +24 -0
  402. mindspore/nn/reinforcement/_batch_read_write.py +142 -0
  403. mindspore/nn/reinforcement/_tensors_queue.py +152 -0
  404. mindspore/nn/reinforcement/tensor_array.py +145 -0
  405. mindspore/nn/sparse/__init__.py +23 -0
  406. mindspore/nn/sparse/sparse.py +147 -0
  407. mindspore/nn/wrap/__init__.py +49 -0
  408. mindspore/nn/wrap/cell_wrapper.py +979 -0
  409. mindspore/nn/wrap/grad_reducer.py +608 -0
  410. mindspore/nn/wrap/loss_scale.py +680 -0
  411. mindspore/numpy/__init__.py +121 -0
  412. mindspore/numpy/array_creations.py +2734 -0
  413. mindspore/numpy/array_ops.py +2625 -0
  414. mindspore/numpy/dtypes.py +185 -0
  415. mindspore/numpy/fft.py +431 -0
  416. mindspore/numpy/logic_ops.py +935 -0
  417. mindspore/numpy/math_ops.py +5910 -0
  418. mindspore/numpy/utils.py +214 -0
  419. mindspore/numpy/utils_const.py +565 -0
  420. mindspore/opencv_core452.dll +0 -0
  421. mindspore/opencv_imgcodecs452.dll +0 -0
  422. mindspore/opencv_imgproc452.dll +0 -0
  423. mindspore/ops/__init__.py +54 -0
  424. mindspore/ops/_constants.py +30 -0
  425. mindspore/ops/_grad_experimental/__init__.py +31 -0
  426. mindspore/ops/_grad_experimental/grad_array_ops.py +830 -0
  427. mindspore/ops/_grad_experimental/grad_base.py +143 -0
  428. mindspore/ops/_grad_experimental/grad_comm_ops.py +670 -0
  429. mindspore/ops/_grad_experimental/grad_debug_ops.py +31 -0
  430. mindspore/ops/_grad_experimental/grad_implementations.py +203 -0
  431. mindspore/ops/_grad_experimental/grad_inner_ops.py +79 -0
  432. mindspore/ops/_grad_experimental/grad_math_ops.py +824 -0
  433. mindspore/ops/_grad_experimental/grad_nn_ops.py +231 -0
  434. mindspore/ops/_grad_experimental/grad_quant_ops.py +238 -0
  435. mindspore/ops/_grad_experimental/grad_sparse.py +342 -0
  436. mindspore/ops/_grad_experimental/grad_sparse_ops.py +399 -0
  437. mindspore/ops/_grad_experimental/taylor_rule.py +220 -0
  438. mindspore/ops/_op_impl/__init__.py +23 -0
  439. mindspore/ops/_op_impl/_custom_op/__init__.py +39 -0
  440. mindspore/ops/_op_impl/_custom_op/_basic.py +158 -0
  441. mindspore/ops/_op_impl/_custom_op/batch_matmul_impl.py +279 -0
  442. mindspore/ops/_op_impl/_custom_op/batchnorm_fold.py +156 -0
  443. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2.py +109 -0
  444. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad.py +125 -0
  445. mindspore/ops/_op_impl/_custom_op/batchnorm_fold2_grad_reduce.py +105 -0
  446. mindspore/ops/_op_impl/_custom_op/batchnorm_fold_grad.py +124 -0
  447. mindspore/ops/_op_impl/_custom_op/cholesky_trsm_impl.py +116 -0
  448. mindspore/ops/_op_impl/_custom_op/correction_mul.py +89 -0
  449. mindspore/ops/_op_impl/_custom_op/correction_mul_grad.py +196 -0
  450. mindspore/ops/_op_impl/_custom_op/dsd_back_impl.py +366 -0
  451. mindspore/ops/_op_impl/_custom_op/dsd_impl.py +162 -0
  452. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel.py +136 -0
  453. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad.py +206 -0
  454. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perchannel_grad_reduce.py +88 -0
  455. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer.py +128 -0
  456. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad.py +199 -0
  457. mindspore/ops/_op_impl/_custom_op/fake_learned_scale_quant_perlayer_grad_reduce.py +88 -0
  458. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel.py +156 -0
  459. mindspore/ops/_op_impl/_custom_op/fake_quant_perchannel_grad.py +184 -0
  460. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer.py +143 -0
  461. mindspore/ops/_op_impl/_custom_op/fake_quant_perlayer_grad.py +169 -0
  462. mindspore/ops/_op_impl/_custom_op/fused_abs_max1_impl.py +548 -0
  463. mindspore/ops/_op_impl/_custom_op/img2col_impl.py +881 -0
  464. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_left_impl.py +278 -0
  465. mindspore/ops/_op_impl/_custom_op/matmul_cube_dense_right_impl.py +200 -0
  466. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_left_cast_impl.py +334 -0
  467. mindspore/ops/_op_impl/_custom_op/matmul_cube_fracz_right_mul_impl.py +255 -0
  468. mindspore/ops/_op_impl/_custom_op/matmul_cube_impl.py +222 -0
  469. mindspore/ops/_op_impl/_custom_op/matmul_dds_grad_impl.py +644 -0
  470. mindspore/ops/_op_impl/_custom_op/matmul_dds_impl.py +488 -0
  471. mindspore/ops/_op_impl/_custom_op/matrix_combine_impl.py +87 -0
  472. mindspore/ops/_op_impl/_custom_op/minmax_update_perchannel.py +129 -0
  473. mindspore/ops/_op_impl/_custom_op/minmax_update_perlayer.py +121 -0
  474. mindspore/ops/_op_impl/_custom_op/transpose02314_impl.py +352 -0
  475. mindspore/ops/_op_impl/aicpu/__init__.py +441 -0
  476. mindspore/ops/_op_impl/aicpu/abs.py +36 -0
  477. mindspore/ops/_op_impl/aicpu/acos.py +32 -0
  478. mindspore/ops/_op_impl/aicpu/acos_grad.py +33 -0
  479. mindspore/ops/_op_impl/aicpu/acosh.py +34 -0
  480. mindspore/ops/_op_impl/aicpu/acosh_grad.py +35 -0
  481. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d.py +34 -0
  482. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_2d_grad.py +34 -0
  483. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d.py +39 -0
  484. mindspore/ops/_op_impl/aicpu/adaptive_avg_pool_3d_grad.py +39 -0
  485. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d.py +37 -0
  486. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_2d_grad.py +37 -0
  487. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d.py +42 -0
  488. mindspore/ops/_op_impl/aicpu/adaptive_max_pool_3d_grad.py +152 -0
  489. mindspore/ops/_op_impl/aicpu/add.py +43 -0
  490. mindspore/ops/_op_impl/aicpu/add_n.py +41 -0
  491. mindspore/ops/_op_impl/aicpu/add_v2.py +40 -0
  492. mindspore/ops/_op_impl/aicpu/addcdiv.py +41 -0
  493. mindspore/ops/_op_impl/aicpu/addcmul.py +47 -0
  494. mindspore/ops/_op_impl/aicpu/adjust_contrastv2.py +32 -0
  495. mindspore/ops/_op_impl/aicpu/adjust_hue.py +31 -0
  496. mindspore/ops/_op_impl/aicpu/adjust_saturation.py +32 -0
  497. mindspore/ops/_op_impl/aicpu/affine_grid.py +33 -0
  498. mindspore/ops/_op_impl/aicpu/affine_grid_grad.py +35 -0
  499. mindspore/ops/_op_impl/aicpu/angle.py +31 -0
  500. mindspore/ops/_op_impl/aicpu/arg_max.py +75 -0
  501. mindspore/ops/_op_impl/aicpu/arg_min.py +75 -0
  502. mindspore/ops/_op_impl/aicpu/argmax_with_value.py +43 -0
  503. mindspore/ops/_op_impl/aicpu/argmin_with_value.py +43 -0
  504. mindspore/ops/_op_impl/aicpu/asin.py +32 -0
  505. mindspore/ops/_op_impl/aicpu/asin_grad.py +33 -0
  506. mindspore/ops/_op_impl/aicpu/asinh.py +34 -0
  507. mindspore/ops/_op_impl/aicpu/asinh_grad.py +35 -0
  508. mindspore/ops/_op_impl/aicpu/atanh.py +34 -0
  509. mindspore/ops/_op_impl/aicpu/avgpool_grad_v1.py +37 -0
  510. mindspore/ops/_op_impl/aicpu/avgpool_v1.py +36 -0
  511. mindspore/ops/_op_impl/aicpu/bartlett_window.py +36 -0
  512. mindspore/ops/_op_impl/aicpu/batch_matmul.py +43 -0
  513. mindspore/ops/_op_impl/aicpu/batch_norm_grad_grad.py +49 -0
  514. mindspore/ops/_op_impl/aicpu/bernoulli.py +48 -0
  515. mindspore/ops/_op_impl/aicpu/bessel_i0.py +31 -0
  516. mindspore/ops/_op_impl/aicpu/betainc.py +31 -0
  517. mindspore/ops/_op_impl/aicpu/bias_add.py +44 -0
  518. mindspore/ops/_op_impl/aicpu/bias_add_grad.py +42 -0
  519. mindspore/ops/_op_impl/aicpu/bincount.py +33 -0
  520. mindspore/ops/_op_impl/aicpu/blackman_window.py +36 -0
  521. mindspore/ops/_op_impl/aicpu/broadcast_to.py +58 -0
  522. mindspore/ops/_op_impl/aicpu/bucketize.py +34 -0
  523. mindspore/ops/_op_impl/aicpu/cache_swap_table.py +102 -0
  524. mindspore/ops/_op_impl/aicpu/cast.py +225 -0
  525. mindspore/ops/_op_impl/aicpu/cauchy.py +33 -0
  526. mindspore/ops/_op_impl/aicpu/channel_shuffle.py +40 -0
  527. mindspore/ops/_op_impl/aicpu/check_numerics.py +33 -0
  528. mindspore/ops/_op_impl/aicpu/cholesky.py +32 -0
  529. mindspore/ops/_op_impl/aicpu/cholesky_inverse.py +31 -0
  530. mindspore/ops/_op_impl/aicpu/cholesky_solve.py +33 -0
  531. mindspore/ops/_op_impl/aicpu/choleskygrad.py +32 -0
  532. mindspore/ops/_op_impl/aicpu/coalesce.py +37 -0
  533. mindspore/ops/_op_impl/aicpu/col2im.py +38 -0
  534. mindspore/ops/_op_impl/aicpu/combined_non_max_suppression.py +42 -0
  535. mindspore/ops/_op_impl/aicpu/compare_and_bitpack.py +37 -0
  536. mindspore/ops/_op_impl/aicpu/complex.py +32 -0
  537. mindspore/ops/_op_impl/aicpu/complex_abs.py +31 -0
  538. mindspore/ops/_op_impl/aicpu/compute_accidental_hits.py +44 -0
  539. mindspore/ops/_op_impl/aicpu/concat.py +57 -0
  540. mindspore/ops/_op_impl/aicpu/concat_offset.py +42 -0
  541. mindspore/ops/_op_impl/aicpu/concat_offset_v1.py +31 -0
  542. mindspore/ops/_op_impl/aicpu/conj.py +42 -0
  543. mindspore/ops/_op_impl/aicpu/conjugate_transpose.py +58 -0
  544. mindspore/ops/_op_impl/aicpu/cos.py +34 -0
  545. mindspore/ops/_op_impl/aicpu/cosh.py +34 -0
  546. mindspore/ops/_op_impl/aicpu/count_nonzero.py +43 -0
  547. mindspore/ops/_op_impl/aicpu/crop_and_resize.py +69 -0
  548. mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_boxes.py +68 -0
  549. mindspore/ops/_op_impl/aicpu/crop_and_resize_grad_image.py +38 -0
  550. mindspore/ops/_op_impl/aicpu/cross.py +42 -0
  551. mindspore/ops/_op_impl/aicpu/csr_sparse_matrix_to_dense.py +48 -0
  552. mindspore/ops/_op_impl/aicpu/csr_sparse_matrix_to_sparse_tensor.py +51 -0
  553. mindspore/ops/_op_impl/aicpu/ctc_greedy_decoder.py +35 -0
  554. mindspore/ops/_op_impl/aicpu/ctc_loss_v2.py +43 -0
  555. mindspore/ops/_op_impl/aicpu/ctc_loss_v2_grad.py +45 -0
  556. mindspore/ops/_op_impl/aicpu/ctcloss.py +38 -0
  557. mindspore/ops/_op_impl/aicpu/cummax.py +41 -0
  558. mindspore/ops/_op_impl/aicpu/cumprod.py +58 -0
  559. mindspore/ops/_op_impl/aicpu/cumsum.py +58 -0
  560. mindspore/ops/_op_impl/aicpu/cumulative_logsumexp.py +36 -0
  561. mindspore/ops/_op_impl/aicpu/data_format_vec_permute.py +32 -0
  562. mindspore/ops/_op_impl/aicpu/deformable_offsets.py +38 -0
  563. mindspore/ops/_op_impl/aicpu/deformable_offsets_grad.py +43 -0
  564. mindspore/ops/_op_impl/aicpu/dense_to_csr_sparse_matrix.py +49 -0
  565. mindspore/ops/_op_impl/aicpu/dense_to_dense_set_operation.py +45 -0
  566. mindspore/ops/_op_impl/aicpu/dense_to_sparse_set_operation.py +48 -0
  567. mindspore/ops/_op_impl/aicpu/depth_to_space.py +44 -0
  568. mindspore/ops/_op_impl/aicpu/diag.py +36 -0
  569. mindspore/ops/_op_impl/aicpu/diag_part.py +36 -0
  570. mindspore/ops/_op_impl/aicpu/diagonal.py +35 -0
  571. mindspore/ops/_op_impl/aicpu/digamma.py +31 -0
  572. mindspore/ops/_op_impl/aicpu/div.py +41 -0
  573. mindspore/ops/_op_impl/aicpu/div_no_nan.py +35 -0
  574. mindspore/ops/_op_impl/aicpu/dropout2d.py +42 -0
  575. mindspore/ops/_op_impl/aicpu/dropout3d.py +42 -0
  576. mindspore/ops/_op_impl/aicpu/dropout_genmask.py +41 -0
  577. mindspore/ops/_op_impl/aicpu/dropout_genmask_v3.py +32 -0
  578. mindspore/ops/_op_impl/aicpu/dynamic_stitch.py +42 -0
  579. mindspore/ops/_op_impl/aicpu/edit_distance.py +56 -0
  580. mindspore/ops/_op_impl/aicpu/eig.py +35 -0
  581. mindspore/ops/_op_impl/aicpu/embedding_lookup.py +102 -0
  582. mindspore/ops/_op_impl/aicpu/end_of_sequence.py +30 -0
  583. mindspore/ops/_op_impl/aicpu/environ_create.py +28 -0
  584. mindspore/ops/_op_impl/aicpu/environ_destroy_all.py +28 -0
  585. mindspore/ops/_op_impl/aicpu/environ_get.py +41 -0
  586. mindspore/ops/_op_impl/aicpu/environ_set.py +40 -0
  587. mindspore/ops/_op_impl/aicpu/eps.py +32 -0
  588. mindspore/ops/_op_impl/aicpu/equal.py +41 -0
  589. mindspore/ops/_op_impl/aicpu/exp.py +37 -0
  590. mindspore/ops/_op_impl/aicpu/expand.py +45 -0
  591. mindspore/ops/_op_impl/aicpu/expand_dims.py +42 -0
  592. mindspore/ops/_op_impl/aicpu/expm1.py +34 -0
  593. mindspore/ops/_op_impl/aicpu/extract_glimpse.py +35 -0
  594. mindspore/ops/_op_impl/aicpu/eye.py +44 -0
  595. mindspore/ops/_op_impl/aicpu/fft_with_size.py +47 -0
  596. mindspore/ops/_op_impl/aicpu/fill_diagonal.py +39 -0
  597. mindspore/ops/_op_impl/aicpu/fill_v2.py +58 -0
  598. mindspore/ops/_op_impl/aicpu/flatten.py +43 -0
  599. mindspore/ops/_op_impl/aicpu/floor_div.py +38 -0
  600. mindspore/ops/_op_impl/aicpu/fmax.py +36 -0
  601. mindspore/ops/_op_impl/aicpu/fmin.py +37 -0
  602. mindspore/ops/_op_impl/aicpu/fractional_avg_pool.py +41 -0
  603. mindspore/ops/_op_impl/aicpu/fractional_avg_pool_grad.py +41 -0
  604. mindspore/ops/_op_impl/aicpu/fractional_max_pool.py +41 -0
  605. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_grad_with_fixed_ksize.py +43 -0
  606. mindspore/ops/_op_impl/aicpu/fractional_max_pool3d_with_fixed_ksize.py +65 -0
  607. mindspore/ops/_op_impl/aicpu/fractional_max_pool_grad.py +42 -0
  608. mindspore/ops/_op_impl/aicpu/fractional_max_pool_grad_with_fixed_ksize.py +42 -0
  609. mindspore/ops/_op_impl/aicpu/fractional_max_pool_with_fixed_ksize.py +49 -0
  610. mindspore/ops/_op_impl/aicpu/fse_decode.py +43 -0
  611. mindspore/ops/_op_impl/aicpu/fused_sparse_adam.py +46 -0
  612. mindspore/ops/_op_impl/aicpu/fused_sparse_ftrl.py +41 -0
  613. mindspore/ops/_op_impl/aicpu/fused_sparse_lazy_adam.py +46 -0
  614. mindspore/ops/_op_impl/aicpu/fused_sparse_proximal_adagrad.py +39 -0
  615. mindspore/ops/_op_impl/aicpu/gamma.py +38 -0
  616. mindspore/ops/_op_impl/aicpu/gather.py +46 -0
  617. mindspore/ops/_op_impl/aicpu/gather_d.py +79 -0
  618. mindspore/ops/_op_impl/aicpu/gather_d_grad_v2.py +79 -0
  619. mindspore/ops/_op_impl/aicpu/gather_grad.py +54 -0
  620. mindspore/ops/_op_impl/aicpu/gather_nd.py +56 -0
  621. mindspore/ops/_op_impl/aicpu/gcd.py +32 -0
  622. mindspore/ops/_op_impl/aicpu/generate_eod_mask.py +38 -0
  623. mindspore/ops/_op_impl/aicpu/geqrf.py +32 -0
  624. mindspore/ops/_op_impl/aicpu/get_next.py +39 -0
  625. mindspore/ops/_op_impl/aicpu/glu.py +33 -0
  626. mindspore/ops/_op_impl/aicpu/glu_grad.py +34 -0
  627. mindspore/ops/_op_impl/aicpu/greater.py +41 -0
  628. mindspore/ops/_op_impl/aicpu/greater_equal.py +41 -0
  629. mindspore/ops/_op_impl/aicpu/grid_sampler_2d.py +35 -0
  630. mindspore/ops/_op_impl/aicpu/grid_sampler_2d_grad.py +38 -0
  631. mindspore/ops/_op_impl/aicpu/grid_sampler_3d.py +34 -0
  632. mindspore/ops/_op_impl/aicpu/grid_sampler_3d_grad.py +38 -0
  633. mindspore/ops/_op_impl/aicpu/hamming_window.py +57 -0
  634. mindspore/ops/_op_impl/aicpu/hard_sigmoid.py +32 -0
  635. mindspore/ops/_op_impl/aicpu/hard_sigmoid_grad.py +33 -0
  636. mindspore/ops/_op_impl/aicpu/heaviside.py +40 -0
  637. mindspore/ops/_op_impl/aicpu/histogram.py +35 -0
  638. mindspore/ops/_op_impl/aicpu/hsv_to_rgb.py +32 -0
  639. mindspore/ops/_op_impl/aicpu/hypot.py +32 -0
  640. mindspore/ops/_op_impl/aicpu/identity.py +42 -0
  641. mindspore/ops/_op_impl/aicpu/identity_n.py +41 -0
  642. mindspore/ops/_op_impl/aicpu/igamma.py +30 -0
  643. mindspore/ops/_op_impl/aicpu/igammac.py +30 -0
  644. mindspore/ops/_op_impl/aicpu/igammagrada.py +30 -0
  645. mindspore/ops/_op_impl/aicpu/im2col.py +43 -0
  646. mindspore/ops/_op_impl/aicpu/imag.py +31 -0
  647. mindspore/ops/_op_impl/aicpu/index_fill.py +54 -0
  648. mindspore/ops/_op_impl/aicpu/index_put.py +50 -0
  649. mindspore/ops/_op_impl/aicpu/init_data_set_queue.py +27 -0
  650. mindspore/ops/_op_impl/aicpu/inplace_index_add.py +39 -0
  651. mindspore/ops/_op_impl/aicpu/instance_norm_v2.py +41 -0
  652. mindspore/ops/_op_impl/aicpu/instance_norm_v2_grad.py +44 -0
  653. mindspore/ops/_op_impl/aicpu/is_finite.py +40 -0
  654. mindspore/ops/_op_impl/aicpu/is_inf.py +31 -0
  655. mindspore/ops/_op_impl/aicpu/is_nan.py +31 -0
  656. mindspore/ops/_op_impl/aicpu/kldivloss.py +34 -0
  657. mindspore/ops/_op_impl/aicpu/kldivlossgrad.py +35 -0
  658. mindspore/ops/_op_impl/aicpu/layer_norm_grad_grad.py +47 -0
  659. mindspore/ops/_op_impl/aicpu/lcm.py +32 -0
  660. mindspore/ops/_op_impl/aicpu/left_shift.py +38 -0
  661. mindspore/ops/_op_impl/aicpu/less.py +41 -0
  662. mindspore/ops/_op_impl/aicpu/less_equal.py +41 -0
  663. mindspore/ops/_op_impl/aicpu/lgamma.py +33 -0
  664. mindspore/ops/_op_impl/aicpu/linear_sum_assignment.py +57 -0
  665. mindspore/ops/_op_impl/aicpu/linspace.py +33 -0
  666. mindspore/ops/_op_impl/aicpu/list_diff.py +50 -0
  667. mindspore/ops/_op_impl/aicpu/log.py +37 -0
  668. mindspore/ops/_op_impl/aicpu/log1p.py +34 -0
  669. mindspore/ops/_op_impl/aicpu/log_matrix_determinant.py +31 -0
  670. mindspore/ops/_op_impl/aicpu/log_normal_reverse.py +33 -0
  671. mindspore/ops/_op_impl/aicpu/log_uniform_candidate_sampler.py +37 -0
  672. mindspore/ops/_op_impl/aicpu/logical_xor.py +30 -0
  673. mindspore/ops/_op_impl/aicpu/logit.py +33 -0
  674. mindspore/ops/_op_impl/aicpu/logit_grad.py +34 -0
  675. mindspore/ops/_op_impl/aicpu/logspace.py +36 -0
  676. mindspore/ops/_op_impl/aicpu/lower_bound.py +47 -0
  677. mindspore/ops/_op_impl/aicpu/lstsq.py +34 -0
  678. mindspore/ops/_op_impl/aicpu/lu.py +39 -0
  679. mindspore/ops/_op_impl/aicpu/lu_solve.py +32 -0
  680. mindspore/ops/_op_impl/aicpu/lu_unpack.py +114 -0
  681. mindspore/ops/_op_impl/aicpu/lu_unpack_grad.py +49 -0
  682. mindspore/ops/_op_impl/aicpu/masked_fill.py +42 -0
  683. mindspore/ops/_op_impl/aicpu/masked_scatter.py +40 -0
  684. mindspore/ops/_op_impl/aicpu/masked_select.py +31 -0
  685. mindspore/ops/_op_impl/aicpu/masked_select_grad.py +35 -0
  686. mindspore/ops/_op_impl/aicpu/matmul.py +39 -0
  687. mindspore/ops/_op_impl/aicpu/matrix_band_part.py +59 -0
  688. mindspore/ops/_op_impl/aicpu/matrix_determinant.py +30 -0
  689. mindspore/ops/_op_impl/aicpu/matrix_diag_part_v3.py +54 -0
  690. mindspore/ops/_op_impl/aicpu/matrix_diag_v3.py +56 -0
  691. mindspore/ops/_op_impl/aicpu/matrix_exp.py +34 -0
  692. mindspore/ops/_op_impl/aicpu/matrix_inverse.py +31 -0
  693. mindspore/ops/_op_impl/aicpu/matrix_logarithm.py +31 -0
  694. mindspore/ops/_op_impl/aicpu/matrix_power.py +37 -0
  695. mindspore/ops/_op_impl/aicpu/matrix_set_diag_v3.py +54 -0
  696. mindspore/ops/_op_impl/aicpu/matrix_solve.py +35 -0
  697. mindspore/ops/_op_impl/aicpu/matrix_solve_ls.py +36 -0
  698. mindspore/ops/_op_impl/aicpu/matrix_triangular_solve.py +36 -0
  699. mindspore/ops/_op_impl/aicpu/max_pool3d_grad_with_argmax.py +60 -0
  700. mindspore/ops/_op_impl/aicpu/max_pool3d_with_argmax.py +59 -0
  701. mindspore/ops/_op_impl/aicpu/max_unpool2d.py +57 -0
  702. mindspore/ops/_op_impl/aicpu/max_unpool2d_grad.py +58 -0
  703. mindspore/ops/_op_impl/aicpu/max_unpool3d.py +57 -0
  704. mindspore/ops/_op_impl/aicpu/max_unpool3d_grad.py +58 -0
  705. mindspore/ops/_op_impl/aicpu/maximum_grad_grad.py +40 -0
  706. mindspore/ops/_op_impl/aicpu/maxpool_grad_v1.py +46 -0
  707. mindspore/ops/_op_impl/aicpu/maxpool_v1.py +42 -0
  708. mindspore/ops/_op_impl/aicpu/median.py +39 -0
  709. mindspore/ops/_op_impl/aicpu/median_grad.py +45 -0
  710. mindspore/ops/_op_impl/aicpu/meshgrid.py +41 -0
  711. mindspore/ops/_op_impl/aicpu/minimum_grad_grad.py +40 -0
  712. mindspore/ops/_op_impl/aicpu/mirror_pad.py +50 -0
  713. mindspore/ops/_op_impl/aicpu/mirror_pad_grad.py +48 -0
  714. mindspore/ops/_op_impl/aicpu/mul.py +43 -0
  715. mindspore/ops/_op_impl/aicpu/mul_no_nan.py +42 -0
  716. mindspore/ops/_op_impl/aicpu/multi_margin_loss.py +37 -0
  717. mindspore/ops/_op_impl/aicpu/multi_margin_loss_grad.py +41 -0
  718. mindspore/ops/_op_impl/aicpu/multilabel_margin_loss_grad.py +37 -0
  719. mindspore/ops/_op_impl/aicpu/multinomial.py +47 -0
  720. mindspore/ops/_op_impl/aicpu/multinomial_with_replacement.py +35 -0
  721. mindspore/ops/_op_impl/aicpu/mvlgamma.py +32 -0
  722. mindspore/ops/_op_impl/aicpu/mvlgamma_grad.py +33 -0
  723. mindspore/ops/_op_impl/aicpu/nan_to_num.py +34 -0
  724. mindspore/ops/_op_impl/aicpu/neg.py +36 -0
  725. mindspore/ops/_op_impl/aicpu/nextafter.py +32 -0
  726. mindspore/ops/_op_impl/aicpu/nllloss.py +38 -0
  727. mindspore/ops/_op_impl/aicpu/nllloss_grad.py +39 -0
  728. mindspore/ops/_op_impl/aicpu/no_repeat_ngram.py +34 -0
  729. mindspore/ops/_op_impl/aicpu/non_deterministic_ints.py +33 -0
  730. mindspore/ops/_op_impl/aicpu/non_max_suppression.py +36 -0
  731. mindspore/ops/_op_impl/aicpu/non_max_suppression_with_overlaps.py +35 -0
  732. mindspore/ops/_op_impl/aicpu/non_zero.py +43 -0
  733. mindspore/ops/_op_impl/aicpu/not_equal.py +39 -0
  734. mindspore/ops/_op_impl/aicpu/nth_element.py +39 -0
  735. mindspore/ops/_op_impl/aicpu/nuclear_norm.py +33 -0
  736. mindspore/ops/_op_impl/aicpu/one_hot.py +116 -0
  737. mindspore/ops/_op_impl/aicpu/ones_like.py +39 -0
  738. mindspore/ops/_op_impl/aicpu/orgqr.py +34 -0
  739. mindspore/ops/_op_impl/aicpu/pad_and_shift.py +33 -0
  740. mindspore/ops/_op_impl/aicpu/pad_v3.py +61 -0
  741. mindspore/ops/_op_impl/aicpu/pad_v3_grad.py +59 -0
  742. mindspore/ops/_op_impl/aicpu/padding.py +41 -0
  743. mindspore/ops/_op_impl/aicpu/parameterized_truncated_normal.py +54 -0
  744. mindspore/ops/_op_impl/aicpu/pdist_grad.py +33 -0
  745. mindspore/ops/_op_impl/aicpu/poisson.py +37 -0
  746. mindspore/ops/_op_impl/aicpu/polar.py +32 -0
  747. mindspore/ops/_op_impl/aicpu/polygamma.py +34 -0
  748. mindspore/ops/_op_impl/aicpu/pow.py +39 -0
  749. mindspore/ops/_op_impl/aicpu/print_tensor.py +39 -0
  750. mindspore/ops/_op_impl/aicpu/priority_replay_buffer.py +113 -0
  751. mindspore/ops/_op_impl/aicpu/qr.py +36 -0
  752. mindspore/ops/_op_impl/aicpu/quant_dtype_cast.py +40 -0
  753. mindspore/ops/_op_impl/aicpu/quantile.py +35 -0
  754. mindspore/ops/_op_impl/aicpu/ragged_range.py +49 -0
  755. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_sparse.py +73 -0
  756. mindspore/ops/_op_impl/aicpu/ragged_tensor_to_tensor.py +74 -0
  757. mindspore/ops/_op_impl/aicpu/random_categorical.py +68 -0
  758. mindspore/ops/_op_impl/aicpu/random_choice_with_mask.py +36 -0
  759. mindspore/ops/_op_impl/aicpu/random_gamma.py +38 -0
  760. mindspore/ops/_op_impl/aicpu/random_poisson.py +134 -0
  761. mindspore/ops/_op_impl/aicpu/random_shuffle.py +47 -0
  762. mindspore/ops/_op_impl/aicpu/randperm.py +38 -0
  763. mindspore/ops/_op_impl/aicpu/randperm_v2.py +41 -0
  764. mindspore/ops/_op_impl/aicpu/range.py +36 -0
  765. mindspore/ops/_op_impl/aicpu/range_v2.py +35 -0
  766. mindspore/ops/_op_impl/aicpu/real.py +31 -0
  767. mindspore/ops/_op_impl/aicpu/real_div.py +40 -0
  768. mindspore/ops/_op_impl/aicpu/reciprocal.py +34 -0
  769. mindspore/ops/_op_impl/aicpu/reciprocal_grad.py +35 -0
  770. mindspore/ops/_op_impl/aicpu/reduce_mean.py +57 -0
  771. mindspore/ops/_op_impl/aicpu/reduce_prod.py +57 -0
  772. mindspore/ops/_op_impl/aicpu/reduce_sum.py +57 -0
  773. mindspore/ops/_op_impl/aicpu/relu_grad_v3.py +41 -0
  774. mindspore/ops/_op_impl/aicpu/relu_v3.py +38 -0
  775. mindspore/ops/_op_impl/aicpu/reservoir_replay_buffer.py +96 -0
  776. mindspore/ops/_op_impl/aicpu/reshape.py +42 -0
  777. mindspore/ops/_op_impl/aicpu/resize_area.py +40 -0
  778. mindspore/ops/_op_impl/aicpu/resize_bicubic.py +20 -0
  779. mindspore/ops/_op_impl/aicpu/resize_bicubic_grad.py +19 -0
  780. mindspore/ops/_op_impl/aicpu/resize_bilinear.py +32 -0
  781. mindspore/ops/_op_impl/aicpu/resize_bilinear_grad.py +32 -0
  782. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2.py +36 -0
  783. mindspore/ops/_op_impl/aicpu/resize_nearest_neighbor_v2_grad.py +35 -0
  784. mindspore/ops/_op_impl/aicpu/resize_v2.py +68 -0
  785. mindspore/ops/_op_impl/aicpu/resize_v2_grad.py +68 -0
  786. mindspore/ops/_op_impl/aicpu/reverse_sequence.py +55 -0
  787. mindspore/ops/_op_impl/aicpu/reversev2.py +54 -0
  788. mindspore/ops/_op_impl/aicpu/rgb_to_hsv.py +32 -0
  789. mindspore/ops/_op_impl/aicpu/right_shift.py +38 -0
  790. mindspore/ops/_op_impl/aicpu/rnnt_loss.py +35 -0
  791. mindspore/ops/_op_impl/aicpu/round.py +34 -0
  792. mindspore/ops/_op_impl/aicpu/rsqrt.py +33 -0
  793. mindspore/ops/_op_impl/aicpu/rsqrt_grad.py +36 -0
  794. mindspore/ops/_op_impl/aicpu/sample_distorted_bounding_box_v2.py +49 -0
  795. mindspore/ops/_op_impl/aicpu/scale_and_translate.py +52 -0
  796. mindspore/ops/_op_impl/aicpu/scale_and_translate_grad.py +36 -0
  797. mindspore/ops/_op_impl/aicpu/scatter.py +79 -0
  798. mindspore/ops/_op_impl/aicpu/scatter_add_with_axis.py +53 -0
  799. mindspore/ops/_op_impl/aicpu/scatter_elements.py +39 -0
  800. mindspore/ops/_op_impl/aicpu/scatter_nd.py +59 -0
  801. mindspore/ops/_op_impl/aicpu/scatter_nd_max.py +54 -0
  802. mindspore/ops/_op_impl/aicpu/scatter_nd_min.py +54 -0
  803. mindspore/ops/_op_impl/aicpu/scatter_nd_update.py +59 -0
  804. mindspore/ops/_op_impl/aicpu/search_sorted.py +44 -0
  805. mindspore/ops/_op_impl/aicpu/segment_max.py +52 -0
  806. mindspore/ops/_op_impl/aicpu/segment_mean.py +56 -0
  807. mindspore/ops/_op_impl/aicpu/segment_min.py +52 -0
  808. mindspore/ops/_op_impl/aicpu/segment_prod.py +56 -0
  809. mindspore/ops/_op_impl/aicpu/segment_sum.py +56 -0
  810. mindspore/ops/_op_impl/aicpu/select.py +45 -0
  811. mindspore/ops/_op_impl/aicpu/self_adjoint_eig.py +34 -0
  812. mindspore/ops/_op_impl/aicpu/sequence_add.py +34 -0
  813. mindspore/ops/_op_impl/aicpu/sequence_add_offset.py +34 -0
  814. mindspore/ops/_op_impl/aicpu/sequence_addn.py +38 -0
  815. mindspore/ops/_op_impl/aicpu/sequence_concat.py +40 -0
  816. mindspore/ops/_op_impl/aicpu/sequence_stack.py +40 -0
  817. mindspore/ops/_op_impl/aicpu/set_size.py +38 -0
  818. mindspore/ops/_op_impl/aicpu/sign.py +36 -0
  819. mindspore/ops/_op_impl/aicpu/sin.py +34 -0
  820. mindspore/ops/_op_impl/aicpu/sinc.py +43 -0
  821. mindspore/ops/_op_impl/aicpu/sinh.py +34 -0
  822. mindspore/ops/_op_impl/aicpu/slice.py +59 -0
  823. mindspore/ops/_op_impl/aicpu/slice_grad.py +76 -0
  824. mindspore/ops/_op_impl/aicpu/smooth_l1_loss.py +35 -0
  825. mindspore/ops/_op_impl/aicpu/smooth_l1_loss_grad.py +37 -0
  826. mindspore/ops/_op_impl/aicpu/sort.py +39 -0
  827. mindspore/ops/_op_impl/aicpu/space_to_depth.py +44 -0
  828. mindspore/ops/_op_impl/aicpu/sparse_addmm.py +87 -0
  829. mindspore/ops/_op_impl/aicpu/sparse_apply_adagrad_da.py +80 -0
  830. mindspore/ops/_op_impl/aicpu/sparse_apply_centered_rms_prop.py +105 -0
  831. mindspore/ops/_op_impl/aicpu/sparse_apply_momentum.py +80 -0
  832. mindspore/ops/_op_impl/aicpu/sparse_apply_proximal_gradient_descent.py +79 -0
  833. mindspore/ops/_op_impl/aicpu/sparse_concat.py +59 -0
  834. mindspore/ops/_op_impl/aicpu/sparse_cross.py +42 -0
  835. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_add.py +58 -0
  836. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_div.py +58 -0
  837. mindspore/ops/_op_impl/aicpu/sparse_dense_cwise_mul.py +58 -0
  838. mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows.py +63 -0
  839. mindspore/ops/_op_impl/aicpu/sparse_fill_empty_rows_grad.py +45 -0
  840. mindspore/ops/_op_impl/aicpu/sparse_matrix_mat_mul.py +56 -0
  841. mindspore/ops/_op_impl/aicpu/sparse_matrix_nnz.py +81 -0
  842. mindspore/ops/_op_impl/aicpu/sparse_matrix_transpose.py +116 -0
  843. mindspore/ops/_op_impl/aicpu/sparse_reorder.py +56 -0
  844. mindspore/ops/_op_impl/aicpu/sparse_reshape.py +34 -0
  845. mindspore/ops/_op_impl/aicpu/sparse_segment_mean_grad.py +36 -0
  846. mindspore/ops/_op_impl/aicpu/sparse_segment_mean_with_num_segments.py +44 -0
  847. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n.py +43 -0
  848. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n_grad.py +38 -0
  849. mindspore/ops/_op_impl/aicpu/sparse_segment_sqrt_n_with_num_segments.py +44 -0
  850. mindspore/ops/_op_impl/aicpu/sparse_segment_sum.py +49 -0
  851. mindspore/ops/_op_impl/aicpu/sparse_segment_sum_with_num_segments.py +68 -0
  852. mindspore/ops/_op_impl/aicpu/sparse_slice.py +63 -0
  853. mindspore/ops/_op_impl/aicpu/sparse_slice_grad.py +61 -0
  854. mindspore/ops/_op_impl/aicpu/sparse_softmax.py +33 -0
  855. mindspore/ops/_op_impl/aicpu/sparse_softmax_cross_entropy_with_logits_v2.py +35 -0
  856. mindspore/ops/_op_impl/aicpu/sparse_sparse_maximum.py +53 -0
  857. mindspore/ops/_op_impl/aicpu/sparse_sparse_minimum.py +53 -0
  858. mindspore/ops/_op_impl/aicpu/sparse_tensor_dense_add.py +84 -0
  859. mindspore/ops/_op_impl/aicpu/sparse_tensor_dense_mat_mul.py +190 -0
  860. mindspore/ops/_op_impl/aicpu/sparse_tensor_to_csr_sparse_matrix.py +51 -0
  861. mindspore/ops/_op_impl/aicpu/sparse_to_dense_v2.py +73 -0
  862. mindspore/ops/_op_impl/aicpu/split.py +45 -0
  863. mindspore/ops/_op_impl/aicpu/sqrt.py +34 -0
  864. mindspore/ops/_op_impl/aicpu/sqrt_grad.py +35 -0
  865. mindspore/ops/_op_impl/aicpu/square.py +35 -0
  866. mindspore/ops/_op_impl/aicpu/squared_difference.py +37 -0
  867. mindspore/ops/_op_impl/aicpu/squeeze.py +42 -0
  868. mindspore/ops/_op_impl/aicpu/sspaddmm.py +97 -0
  869. mindspore/ops/_op_impl/aicpu/stack.py +45 -0
  870. mindspore/ops/_op_impl/aicpu/stack_push_pop.py +87 -0
  871. mindspore/ops/_op_impl/aicpu/standard_laplace.py +34 -0
  872. mindspore/ops/_op_impl/aicpu/standard_normal.py +34 -0
  873. mindspore/ops/_op_impl/aicpu/stateless_dropout_genmask.py +37 -0
  874. mindspore/ops/_op_impl/aicpu/stft.py +70 -0
  875. mindspore/ops/_op_impl/aicpu/strided_slice.py +43 -0
  876. mindspore/ops/_op_impl/aicpu/strided_slice_grad.py +50 -0
  877. mindspore/ops/_op_impl/aicpu/strided_slice_v2.py +93 -0
  878. mindspore/ops/_op_impl/aicpu/strided_slice_v2_grad.py +66 -0
  879. mindspore/ops/_op_impl/aicpu/sub.py +41 -0
  880. mindspore/ops/_op_impl/aicpu/sub_and_filter.py +36 -0
  881. mindspore/ops/_op_impl/aicpu/tan.py +34 -0
  882. mindspore/ops/_op_impl/aicpu/tanh.py +34 -0
  883. mindspore/ops/_op_impl/aicpu/tanh_grad.py +35 -0
  884. mindspore/ops/_op_impl/aicpu/tensor_scatter_update.py +59 -0
  885. mindspore/ops/_op_impl/aicpu/tile.py +56 -0
  886. mindspore/ops/_op_impl/aicpu/topk.py +34 -0
  887. mindspore/ops/_op_impl/aicpu/trace.py +40 -0
  888. mindspore/ops/_op_impl/aicpu/tracegrad.py +41 -0
  889. mindspore/ops/_op_impl/aicpu/trans_data.py +35 -0
  890. mindspore/ops/_op_impl/aicpu/transpose.py +58 -0
  891. mindspore/ops/_op_impl/aicpu/tridiagonal_matmul.py +42 -0
  892. mindspore/ops/_op_impl/aicpu/tridiagonal_solve.py +35 -0
  893. mindspore/ops/_op_impl/aicpu/tril.py +42 -0
  894. mindspore/ops/_op_impl/aicpu/tril_indices.py +34 -0
  895. mindspore/ops/_op_impl/aicpu/triplet_margin_loss.py +62 -0
  896. mindspore/ops/_op_impl/aicpu/triu.py +43 -0
  897. mindspore/ops/_op_impl/aicpu/triu_indices.py +34 -0
  898. mindspore/ops/_op_impl/aicpu/truncated_normal.py +39 -0
  899. mindspore/ops/_op_impl/aicpu/uniform.py +36 -0
  900. mindspore/ops/_op_impl/aicpu/uniform_candidate_sampler.py +41 -0
  901. mindspore/ops/_op_impl/aicpu/uniform_int.py +36 -0
  902. mindspore/ops/_op_impl/aicpu/uniform_real.py +33 -0
  903. mindspore/ops/_op_impl/aicpu/unique.py +31 -0
  904. mindspore/ops/_op_impl/aicpu/unique_consecutive.py +47 -0
  905. mindspore/ops/_op_impl/aicpu/unique_with_pad.py +32 -0
  906. mindspore/ops/_op_impl/aicpu/unravel_index.py +32 -0
  907. mindspore/ops/_op_impl/aicpu/unsorted_segment_prod.py +53 -0
  908. mindspore/ops/_op_impl/aicpu/unsorted_segment_sum.py +57 -0
  909. mindspore/ops/_op_impl/aicpu/unstack.py +45 -0
  910. mindspore/ops/_op_impl/aicpu/update_cache.py +44 -0
  911. mindspore/ops/_op_impl/aicpu/upper_bound.py +47 -0
  912. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d.py +42 -0
  913. mindspore/ops/_op_impl/aicpu/upsample_nearest_3d_grad.py +49 -0
  914. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d.py +40 -0
  915. mindspore/ops/_op_impl/aicpu/upsample_trilinear_3d_grad.py +50 -0
  916. mindspore/ops/_op_impl/aicpu/xdivy.py +35 -0
  917. mindspore/ops/_op_impl/aicpu/xlogy.py +33 -0
  918. mindspore/ops/_op_impl/aicpu/zeros_like.py +42 -0
  919. mindspore/ops/_op_impl/aicpu/zeta.py +31 -0
  920. mindspore/ops/_op_impl/akg/__init__.py +19 -0
  921. mindspore/ops/_op_impl/akg/ascend/__init__.py +48 -0
  922. mindspore/ops/_op_impl/akg/ascend/abs.py +35 -0
  923. mindspore/ops/_op_impl/akg/ascend/add.py +42 -0
  924. mindspore/ops/_op_impl/akg/ascend/add_n.py +37 -0
  925. mindspore/ops/_op_impl/akg/ascend/batchmatmul.py +33 -0
  926. mindspore/ops/_op_impl/akg/ascend/cast.py +46 -0
  927. mindspore/ops/_op_impl/akg/ascend/equal.py +35 -0
  928. mindspore/ops/_op_impl/akg/ascend/exp.py +35 -0
  929. mindspore/ops/_op_impl/akg/ascend/expand_dims.py +33 -0
  930. mindspore/ops/_op_impl/akg/ascend/greater.py +34 -0
  931. mindspore/ops/_op_impl/akg/ascend/greater_equal.py +35 -0
  932. mindspore/ops/_op_impl/akg/ascend/less.py +31 -0
  933. mindspore/ops/_op_impl/akg/ascend/less_equal.py +35 -0
  934. mindspore/ops/_op_impl/akg/ascend/load_im2col.py +33 -0
  935. mindspore/ops/_op_impl/akg/ascend/log.py +34 -0
  936. mindspore/ops/_op_impl/akg/ascend/maximum.py +36 -0
  937. mindspore/ops/_op_impl/akg/ascend/minimum.py +39 -0
  938. mindspore/ops/_op_impl/akg/ascend/mul.py +41 -0
  939. mindspore/ops/_op_impl/akg/ascend/neg.py +37 -0
  940. mindspore/ops/_op_impl/akg/ascend/pow.py +35 -0
  941. mindspore/ops/_op_impl/akg/ascend/prod_force_se_a.py +33 -0
  942. mindspore/ops/_op_impl/akg/ascend/real_div.py +36 -0
  943. mindspore/ops/_op_impl/akg/ascend/reciprocal.py +32 -0
  944. mindspore/ops/_op_impl/akg/ascend/reduce_max.py +32 -0
  945. mindspore/ops/_op_impl/akg/ascend/reduce_min.py +32 -0
  946. mindspore/ops/_op_impl/akg/ascend/reduce_sum.py +37 -0
  947. mindspore/ops/_op_impl/akg/ascend/rsqrt.py +35 -0
  948. mindspore/ops/_op_impl/akg/ascend/select.py +37 -0
  949. mindspore/ops/_op_impl/akg/ascend/sqrt.py +35 -0
  950. mindspore/ops/_op_impl/akg/ascend/square.py +35 -0
  951. mindspore/ops/_op_impl/akg/ascend/sub.py +42 -0
  952. mindspore/ops/_op_impl/akg/cpu/__init__.py +23 -0
  953. mindspore/ops/_op_impl/akg/cpu/coo2csr.py +29 -0
  954. mindspore/ops/_op_impl/akg/cpu/csr2coo.py +29 -0
  955. mindspore/ops/_op_impl/akg/cpu/csr_gather.py +33 -0
  956. mindspore/ops/_op_impl/akg/cpu/csr_mm.py +34 -0
  957. mindspore/ops/_op_impl/akg/cpu/csr_mul.py +33 -0
  958. mindspore/ops/_op_impl/akg/cpu/csr_mv.py +33 -0
  959. mindspore/ops/_op_impl/akg/cpu/csr_reduce_sum.py +31 -0
  960. mindspore/ops/_op_impl/akg/gpu/__init__.py +24 -0
  961. mindspore/ops/_op_impl/akg/gpu/coo2csr.py +29 -0
  962. mindspore/ops/_op_impl/akg/gpu/csr2coo.py +29 -0
  963. mindspore/ops/_op_impl/akg/gpu/csr_div.py +36 -0
  964. mindspore/ops/_op_impl/akg/gpu/csr_gather.py +33 -0
  965. mindspore/ops/_op_impl/akg/gpu/csr_mm.py +37 -0
  966. mindspore/ops/_op_impl/akg/gpu/csr_mul.py +36 -0
  967. mindspore/ops/_op_impl/akg/gpu/csr_mv.py +36 -0
  968. mindspore/ops/_op_impl/akg/gpu/csr_reduce_sum.py +33 -0
  969. mindspore/ops/_op_impl/cpu/__init__.py +78 -0
  970. mindspore/ops/_op_impl/cpu/adam.py +49 -0
  971. mindspore/ops/_op_impl/cpu/adam_weight_decay.py +47 -0
  972. mindspore/ops/_op_impl/cpu/arg_max.py +30 -0
  973. mindspore/ops/_op_impl/cpu/arg_max_with_value.py +31 -0
  974. mindspore/ops/_op_impl/cpu/arg_min_with_value.py +31 -0
  975. mindspore/ops/_op_impl/cpu/buffer_append.py +28 -0
  976. mindspore/ops/_op_impl/cpu/buffer_get.py +28 -0
  977. mindspore/ops/_op_impl/cpu/buffer_sample.py +28 -0
  978. mindspore/ops/_op_impl/cpu/cast.py +171 -0
  979. mindspore/ops/_op_impl/cpu/concat_offset.py +38 -0
  980. mindspore/ops/_op_impl/cpu/conv2d.py +30 -0
  981. mindspore/ops/_op_impl/cpu/conv3d.py +30 -0
  982. mindspore/ops/_op_impl/cpu/div.py +32 -0
  983. mindspore/ops/_op_impl/cpu/dropout.py +31 -0
  984. mindspore/ops/_op_impl/cpu/dropout_grad.py +30 -0
  985. mindspore/ops/_op_impl/cpu/dynamic_shape.py +42 -0
  986. mindspore/ops/_op_impl/cpu/dynamic_stitch.py +41 -0
  987. mindspore/ops/_op_impl/cpu/equal_count.py +30 -0
  988. mindspore/ops/_op_impl/cpu/gather_d.py +49 -0
  989. mindspore/ops/_op_impl/cpu/gather_d_grad.py +38 -0
  990. mindspore/ops/_op_impl/cpu/gather_d_grad_v2.py +40 -0
  991. mindspore/ops/_op_impl/cpu/gather_v2.py +40 -0
  992. mindspore/ops/_op_impl/cpu/hsigmoid.py +33 -0
  993. mindspore/ops/_op_impl/cpu/hsigmoid_grad.py +34 -0
  994. mindspore/ops/_op_impl/cpu/hswish.py +32 -0
  995. mindspore/ops/_op_impl/cpu/hswish_grad.py +33 -0
  996. mindspore/ops/_op_impl/cpu/identity_n.py +40 -0
  997. mindspore/ops/_op_impl/cpu/is_finite.py +39 -0
  998. mindspore/ops/_op_impl/cpu/l2loss.py +30 -0
  999. mindspore/ops/_op_impl/cpu/layer_norm.py +36 -0
  1000. mindspore/ops/_op_impl/cpu/layer_norm_grad.py +38 -0
  1001. mindspore/ops/_op_impl/cpu/maximum.py +35 -0
  1002. mindspore/ops/_op_impl/cpu/maximum_grad.py +47 -0
  1003. mindspore/ops/_op_impl/cpu/minimum.py +40 -0
  1004. mindspore/ops/_op_impl/cpu/minimum_grad.py +51 -0
  1005. mindspore/ops/_op_impl/cpu/mirror_pad.py +36 -0
  1006. mindspore/ops/_op_impl/cpu/mirror_pad_grad.py +36 -0
  1007. mindspore/ops/_op_impl/cpu/mul.py +32 -0
  1008. mindspore/ops/_op_impl/cpu/one_hot.py +31 -0
  1009. mindspore/ops/_op_impl/cpu/pad.py +32 -0
  1010. mindspore/ops/_op_impl/cpu/pow.py +32 -0
  1011. mindspore/ops/_op_impl/cpu/priority_replay_buffer.py +42 -0
  1012. mindspore/ops/_op_impl/cpu/pyexecute.py +29 -0
  1013. mindspore/ops/_op_impl/cpu/pyfunc.py +29 -0
  1014. mindspore/ops/_op_impl/cpu/range.py +34 -0
  1015. mindspore/ops/_op_impl/cpu/real_div.py +33 -0
  1016. mindspore/ops/_op_impl/cpu/reduce_all.py +29 -0
  1017. mindspore/ops/_op_impl/cpu/reduce_any.py +29 -0
  1018. mindspore/ops/_op_impl/cpu/reduce_max.py +32 -0
  1019. mindspore/ops/_op_impl/cpu/reduce_mean.py +40 -0
  1020. mindspore/ops/_op_impl/cpu/reduce_min.py +32 -0
  1021. mindspore/ops/_op_impl/cpu/reduce_prod.py +40 -0
  1022. mindspore/ops/_op_impl/cpu/reduce_std.py +31 -0
  1023. mindspore/ops/_op_impl/cpu/reduce_sum.py +41 -0
  1024. mindspore/ops/_op_impl/cpu/space_to_batch_nd.py +38 -0
  1025. mindspore/ops/_op_impl/cpu/sparse_slice.py +62 -0
  1026. mindspore/ops/_op_impl/cpu/sparse_slice_grad.py +60 -0
  1027. mindspore/ops/_op_impl/cpu/split.py +34 -0
  1028. mindspore/ops/_op_impl/cpu/sspaddmm.py +95 -0
  1029. mindspore/ops/_op_impl/cpu/stack.py +38 -0
  1030. mindspore/ops/_op_impl/cpu/sub.py +32 -0
  1031. mindspore/ops/_op_impl/cpu/tensor_copy_slices.py +41 -0
  1032. mindspore/ops/_op_impl/cpu/tile.py +37 -0
  1033. mindspore/ops/_op_impl/cpu/top_k.py +31 -0
  1034. mindspore/ops/_op_impl/cpu/transpose.py +39 -0
  1035. mindspore/ops/_primitive_cache.py +90 -0
  1036. mindspore/ops/_register_for_op.py +73 -0
  1037. mindspore/ops/_utils/__init__.py +20 -0
  1038. mindspore/ops/_utils/utils.py +147 -0
  1039. mindspore/ops/_vmap/__init__.py +25 -0
  1040. mindspore/ops/_vmap/vmap_array_ops.py +2151 -0
  1041. mindspore/ops/_vmap/vmap_base.py +533 -0
  1042. mindspore/ops/_vmap/vmap_convolution_ops.py +441 -0
  1043. mindspore/ops/_vmap/vmap_debug_ops.py +50 -0
  1044. mindspore/ops/_vmap/vmap_grad_math_ops.py +274 -0
  1045. mindspore/ops/_vmap/vmap_grad_nn_ops.py +806 -0
  1046. mindspore/ops/_vmap/vmap_image_ops.py +194 -0
  1047. mindspore/ops/_vmap/vmap_math_ops.py +977 -0
  1048. mindspore/ops/_vmap/vmap_nn_ops.py +2209 -0
  1049. mindspore/ops/_vmap/vmap_other_ops.py +105 -0
  1050. mindspore/ops/_vmap/vmap_random_ops.py +122 -0
  1051. mindspore/ops/_vmap/vmap_sparse_ops.py +89 -0
  1052. mindspore/ops/auto_generate/__init__.py +31 -0
  1053. mindspore/ops/auto_generate/cpp_create_prim_instance_helper.py +231 -0
  1054. mindspore/ops/auto_generate/gen_arg_dtype_cast.py +250 -0
  1055. mindspore/ops/auto_generate/gen_arg_handler.py +197 -0
  1056. mindspore/ops/auto_generate/gen_extend_func.py +980 -0
  1057. mindspore/ops/auto_generate/gen_ops_def.py +6443 -0
  1058. mindspore/ops/auto_generate/gen_ops_prim.py +13167 -0
  1059. mindspore/ops/auto_generate/pyboost_inner_prim.py +429 -0
  1060. mindspore/ops/composite/__init__.py +71 -0
  1061. mindspore/ops/composite/base.py +1281 -0
  1062. mindspore/ops/composite/env_ops.py +41 -0
  1063. mindspore/ops/composite/math_ops.py +125 -0
  1064. mindspore/ops/composite/multitype_ops/__init__.py +77 -0
  1065. mindspore/ops/composite/multitype_ops/_compile_utils.py +1458 -0
  1066. mindspore/ops/composite/multitype_ops/_constexpr_utils.py +897 -0
  1067. mindspore/ops/composite/multitype_ops/add_impl.py +606 -0
  1068. mindspore/ops/composite/multitype_ops/bitwise_and_impl.py +56 -0
  1069. mindspore/ops/composite/multitype_ops/bitwise_or_impl.py +56 -0
  1070. mindspore/ops/composite/multitype_ops/bitwise_xor_impl.py +56 -0
  1071. mindspore/ops/composite/multitype_ops/div_impl.py +189 -0
  1072. mindspore/ops/composite/multitype_ops/equal_impl.py +335 -0
  1073. mindspore/ops/composite/multitype_ops/floordiv_impl.py +88 -0
  1074. mindspore/ops/composite/multitype_ops/getitem_impl.py +400 -0
  1075. mindspore/ops/composite/multitype_ops/greater_equal_impl.py +109 -0
  1076. mindspore/ops/composite/multitype_ops/greater_impl.py +110 -0
  1077. mindspore/ops/composite/multitype_ops/in_impl.py +196 -0
  1078. mindspore/ops/composite/multitype_ops/left_shift_impl.py +37 -0
  1079. mindspore/ops/composite/multitype_ops/less_equal_impl.py +111 -0
  1080. mindspore/ops/composite/multitype_ops/less_impl.py +112 -0
  1081. mindspore/ops/composite/multitype_ops/logic_not_impl.py +113 -0
  1082. mindspore/ops/composite/multitype_ops/logical_and_impl.py +60 -0
  1083. mindspore/ops/composite/multitype_ops/logical_or_impl.py +61 -0
  1084. mindspore/ops/composite/multitype_ops/mod_impl.py +86 -0
  1085. mindspore/ops/composite/multitype_ops/mul_impl.py +294 -0
  1086. mindspore/ops/composite/multitype_ops/negative_impl.py +79 -0
  1087. mindspore/ops/composite/multitype_ops/not_equal_impl.py +290 -0
  1088. mindspore/ops/composite/multitype_ops/not_in_impl.py +196 -0
  1089. mindspore/ops/composite/multitype_ops/ones_like_impl.py +96 -0
  1090. mindspore/ops/composite/multitype_ops/pow_impl.py +87 -0
  1091. mindspore/ops/composite/multitype_ops/right_shift_impl.py +37 -0
  1092. mindspore/ops/composite/multitype_ops/setitem_impl.py +884 -0
  1093. mindspore/ops/composite/multitype_ops/sub_impl.py +116 -0
  1094. mindspore/ops/composite/multitype_ops/uadd_impl.py +29 -0
  1095. mindspore/ops/composite/multitype_ops/zeros_like_impl.py +228 -0
  1096. mindspore/ops/deprecated.py +315 -0
  1097. mindspore/ops/extend/__init__.py +53 -0
  1098. mindspore/ops/extend/array_func.py +218 -0
  1099. mindspore/ops/extend/math_func.py +76 -0
  1100. mindspore/ops/extend/nn_func.py +308 -0
  1101. mindspore/ops/function/__init__.py +760 -0
  1102. mindspore/ops/function/array_func.py +6889 -0
  1103. mindspore/ops/function/clip_func.py +384 -0
  1104. mindspore/ops/function/debug_func.py +69 -0
  1105. mindspore/ops/function/fft_func.py +31 -0
  1106. mindspore/ops/function/grad/__init__.py +34 -0
  1107. mindspore/ops/function/grad/grad_func.py +1424 -0
  1108. mindspore/ops/function/image_func.py +292 -0
  1109. mindspore/ops/function/linalg_func.py +416 -0
  1110. mindspore/ops/function/math_func.py +11877 -0
  1111. mindspore/ops/function/nn_func.py +8175 -0
  1112. mindspore/ops/function/other_func.py +114 -0
  1113. mindspore/ops/function/parameter_func.py +134 -0
  1114. mindspore/ops/function/random_func.py +1539 -0
  1115. mindspore/ops/function/reshard_func.py +102 -0
  1116. mindspore/ops/function/sparse_func.py +884 -0
  1117. mindspore/ops/function/sparse_unary_func.py +2422 -0
  1118. mindspore/ops/function/spectral_func.py +150 -0
  1119. mindspore/ops/function/vmap_func.py +116 -0
  1120. mindspore/ops/functional.py +454 -0
  1121. mindspore/ops/op_info_register.py +1572 -0
  1122. mindspore/ops/operations/__init__.py +717 -0
  1123. mindspore/ops/operations/_csr_ops.py +403 -0
  1124. mindspore/ops/operations/_custom_grad.py +181 -0
  1125. mindspore/ops/operations/_embedding_cache_ops.py +307 -0
  1126. mindspore/ops/operations/_grad_ops.py +3052 -0
  1127. mindspore/ops/operations/_infer_ops.py +19 -0
  1128. mindspore/ops/operations/_inner_ops.py +2567 -0
  1129. mindspore/ops/operations/_map_tensor_ops.py +112 -0
  1130. mindspore/ops/operations/_ms_kernel.py +601 -0
  1131. mindspore/ops/operations/_ocr_ops.py +379 -0
  1132. mindspore/ops/operations/_opaque_predicate_registry.py +41 -0
  1133. mindspore/ops/operations/_pyfunc_registry.py +58 -0
  1134. mindspore/ops/operations/_quant_ops.py +1844 -0
  1135. mindspore/ops/operations/_rl_inner_ops.py +1231 -0
  1136. mindspore/ops/operations/_scalar_ops.py +106 -0
  1137. mindspore/ops/operations/_sequence_ops.py +1155 -0
  1138. mindspore/ops/operations/_sparse_grad_ops.py +56 -0
  1139. mindspore/ops/operations/_tensor_array.py +359 -0
  1140. mindspore/ops/operations/_thor_ops.py +807 -0
  1141. mindspore/ops/operations/array_ops.py +6258 -0
  1142. mindspore/ops/operations/comm_ops.py +1996 -0
  1143. mindspore/ops/operations/control_ops.py +127 -0
  1144. mindspore/ops/operations/custom_ops.py +1065 -0
  1145. mindspore/ops/operations/debug_ops.py +646 -0
  1146. mindspore/ops/operations/image_ops.py +1041 -0
  1147. mindspore/ops/operations/inner_ops.py +697 -0
  1148. mindspore/ops/operations/linalg_ops.py +95 -0
  1149. mindspore/ops/operations/manually_defined/__init__.py +24 -0
  1150. mindspore/ops/operations/manually_defined/_inner.py +61 -0
  1151. mindspore/ops/operations/manually_defined/ops_def.py +2016 -0
  1152. mindspore/ops/operations/math_ops.py +5306 -0
  1153. mindspore/ops/operations/nn_ops.py +9669 -0
  1154. mindspore/ops/operations/other_ops.py +871 -0
  1155. mindspore/ops/operations/random_ops.py +1243 -0
  1156. mindspore/ops/operations/reshard_ops.py +53 -0
  1157. mindspore/ops/operations/rl_ops.py +288 -0
  1158. mindspore/ops/operations/sparse_ops.py +2753 -0
  1159. mindspore/ops/operations/spectral_ops.py +111 -0
  1160. mindspore/ops/primitive.py +1034 -0
  1161. mindspore/ops/signature.py +54 -0
  1162. mindspore/ops/silent_check.py +162 -0
  1163. mindspore/ops/vm_impl_registry.py +91 -0
  1164. mindspore/ops_generate/__init__.py +27 -0
  1165. mindspore/ops_generate/arg_dtype_cast.py +250 -0
  1166. mindspore/ops_generate/arg_handler.py +197 -0
  1167. mindspore/ops_generate/gen_aclnn_implement.py +263 -0
  1168. mindspore/ops_generate/gen_ops.py +1084 -0
  1169. mindspore/ops_generate/gen_ops_inner_prim.py +131 -0
  1170. mindspore/ops_generate/gen_pyboost_func.py +968 -0
  1171. mindspore/ops_generate/gen_utils.py +209 -0
  1172. mindspore/ops_generate/op_proto.py +138 -0
  1173. mindspore/ops_generate/pyboost_utils.py +354 -0
  1174. mindspore/ops_generate/template.py +239 -0
  1175. mindspore/parallel/__init__.py +28 -0
  1176. mindspore/parallel/_auto_parallel_context.py +1466 -0
  1177. mindspore/parallel/_cell_wrapper.py +91 -0
  1178. mindspore/parallel/_cost_model_context.py +700 -0
  1179. mindspore/parallel/_dp_allreduce_fusion.py +159 -0
  1180. mindspore/parallel/_offload_context.py +275 -0
  1181. mindspore/parallel/_parallel_serialization.py +533 -0
  1182. mindspore/parallel/_ps_context.py +242 -0
  1183. mindspore/parallel/_recovery_context.py +110 -0
  1184. mindspore/parallel/_tensor.py +660 -0
  1185. mindspore/parallel/_transformer/__init__.py +35 -0
  1186. mindspore/parallel/_transformer/layers.py +765 -0
  1187. mindspore/parallel/_transformer/loss.py +251 -0
  1188. mindspore/parallel/_transformer/moe.py +693 -0
  1189. mindspore/parallel/_transformer/op_parallel_config.py +222 -0
  1190. mindspore/parallel/_transformer/transformer.py +3119 -0
  1191. mindspore/parallel/_utils.py +600 -0
  1192. mindspore/parallel/algo_parameter_config.py +400 -0
  1193. mindspore/parallel/checkpoint_transform.py +643 -0
  1194. mindspore/parallel/cluster/__init__.py +15 -0
  1195. mindspore/parallel/cluster/process_entity/__init__.py +18 -0
  1196. mindspore/parallel/cluster/process_entity/_api.py +344 -0
  1197. mindspore/parallel/cluster/process_entity/_utils.py +126 -0
  1198. mindspore/parallel/cluster/run.py +136 -0
  1199. mindspore/parallel/mpi/__init__.py +14 -0
  1200. mindspore/parallel/mpi/_mpi_config.py +116 -0
  1201. mindspore/parallel/parameter_broadcast.py +152 -0
  1202. mindspore/parallel/shard.py +350 -0
  1203. mindspore/perf_msvcbuildinsights.dll +0 -0
  1204. mindspore/pgodb140.dll +0 -0
  1205. mindspore/pgort140.dll +0 -0
  1206. mindspore/profiler/__init__.py +27 -0
  1207. mindspore/profiler/common/__init__.py +14 -0
  1208. mindspore/profiler/common/exceptions/__init__.py +14 -0
  1209. mindspore/profiler/common/exceptions/error_code.py +83 -0
  1210. mindspore/profiler/common/exceptions/exceptions.py +286 -0
  1211. mindspore/profiler/common/process_pool.py +41 -0
  1212. mindspore/profiler/common/singleton.py +28 -0
  1213. mindspore/profiler/common/struct_type.py +118 -0
  1214. mindspore/profiler/common/util.py +444 -0
  1215. mindspore/profiler/common/validator/__init__.py +14 -0
  1216. mindspore/profiler/common/validator/validate_path.py +84 -0
  1217. mindspore/profiler/envprofiling.py +256 -0
  1218. mindspore/profiler/parser/__init__.py +14 -0
  1219. mindspore/profiler/parser/aicpu_data_parser.py +272 -0
  1220. mindspore/profiler/parser/ascend_analysis/__init__.py +14 -0
  1221. mindspore/profiler/parser/ascend_analysis/constant.py +53 -0
  1222. mindspore/profiler/parser/ascend_analysis/file_manager.py +159 -0
  1223. mindspore/profiler/parser/ascend_analysis/function_event.py +161 -0
  1224. mindspore/profiler/parser/ascend_analysis/fwk_cann_parser.py +131 -0
  1225. mindspore/profiler/parser/ascend_analysis/fwk_file_parser.py +85 -0
  1226. mindspore/profiler/parser/ascend_analysis/msprof_timeline_parser.py +57 -0
  1227. mindspore/profiler/parser/ascend_analysis/profiler_info_parser.py +116 -0
  1228. mindspore/profiler/parser/ascend_analysis/tlv_decoder.py +86 -0
  1229. mindspore/profiler/parser/ascend_analysis/trace_event_manager.py +68 -0
  1230. mindspore/profiler/parser/ascend_cluster_generator.py +116 -0
  1231. mindspore/profiler/parser/ascend_communicate_generator.py +314 -0
  1232. mindspore/profiler/parser/ascend_flops_generator.py +116 -0
  1233. mindspore/profiler/parser/ascend_fpbp_generator.py +82 -0
  1234. mindspore/profiler/parser/ascend_hccl_generator.py +271 -0
  1235. mindspore/profiler/parser/ascend_integrate_generator.py +42 -0
  1236. mindspore/profiler/parser/ascend_memory_generator.py +185 -0
  1237. mindspore/profiler/parser/ascend_msprof_exporter.py +281 -0
  1238. mindspore/profiler/parser/ascend_msprof_generator.py +187 -0
  1239. mindspore/profiler/parser/ascend_op_generator.py +334 -0
  1240. mindspore/profiler/parser/ascend_steptrace_generator.py +94 -0
  1241. mindspore/profiler/parser/ascend_timeline_generator.py +543 -0
  1242. mindspore/profiler/parser/base_timeline_generator.py +489 -0
  1243. mindspore/profiler/parser/container.py +229 -0
  1244. mindspore/profiler/parser/cpu_gpu_timeline_generator.py +684 -0
  1245. mindspore/profiler/parser/flops_parser.py +531 -0
  1246. mindspore/profiler/parser/framework_enum.py +111 -0
  1247. mindspore/profiler/parser/framework_parser.py +854 -0
  1248. mindspore/profiler/parser/framework_struct.py +61 -0
  1249. mindspore/profiler/parser/hccl_parser.py +573 -0
  1250. mindspore/profiler/parser/hwts_log_parser.py +122 -0
  1251. mindspore/profiler/parser/integrator.py +526 -0
  1252. mindspore/profiler/parser/memory_usage_parser.py +431 -0
  1253. mindspore/profiler/parser/minddata_analyzer.py +800 -0
  1254. mindspore/profiler/parser/minddata_parser.py +186 -0
  1255. mindspore/profiler/parser/minddata_pipeline_parser.py +299 -0
  1256. mindspore/profiler/parser/msadvisor_analyzer.py +82 -0
  1257. mindspore/profiler/parser/msadvisor_parser.py +240 -0
  1258. mindspore/profiler/parser/op_intermediate_parser.py +149 -0
  1259. mindspore/profiler/parser/optime_parser.py +250 -0
  1260. mindspore/profiler/parser/profiler_info.py +141 -0
  1261. mindspore/profiler/parser/step_trace_parser.py +666 -0
  1262. mindspore/profiler/profiling.py +2054 -0
  1263. mindspore/rewrite/__init__.py +29 -0
  1264. mindspore/rewrite/api/__init__.py +17 -0
  1265. mindspore/rewrite/api/node.py +519 -0
  1266. mindspore/rewrite/api/node_type.py +53 -0
  1267. mindspore/rewrite/api/pattern_engine.py +490 -0
  1268. mindspore/rewrite/api/scoped_value.py +181 -0
  1269. mindspore/rewrite/api/symbol_tree.py +497 -0
  1270. mindspore/rewrite/ast_helpers/__init__.py +25 -0
  1271. mindspore/rewrite/ast_helpers/ast_converter.py +143 -0
  1272. mindspore/rewrite/ast_helpers/ast_finder.py +404 -0
  1273. mindspore/rewrite/ast_helpers/ast_flattener.py +268 -0
  1274. mindspore/rewrite/ast_helpers/ast_modifier.py +605 -0
  1275. mindspore/rewrite/ast_helpers/ast_replacer.py +79 -0
  1276. mindspore/rewrite/common/__init__.py +19 -0
  1277. mindspore/rewrite/common/config.py +24 -0
  1278. mindspore/rewrite/common/error_log.py +39 -0
  1279. mindspore/rewrite/common/event.py +28 -0
  1280. mindspore/rewrite/common/namer.py +271 -0
  1281. mindspore/rewrite/common/namespace.py +118 -0
  1282. mindspore/rewrite/common/observable.py +44 -0
  1283. mindspore/rewrite/common/observer.py +54 -0
  1284. mindspore/rewrite/node/__init__.py +22 -0
  1285. mindspore/rewrite/node/call_function.py +95 -0
  1286. mindspore/rewrite/node/cell_container.py +139 -0
  1287. mindspore/rewrite/node/control_flow.py +113 -0
  1288. mindspore/rewrite/node/node.py +1428 -0
  1289. mindspore/rewrite/node/node_manager.py +283 -0
  1290. mindspore/rewrite/node/node_topological_manager.py +223 -0
  1291. mindspore/rewrite/parsers/__init__.py +29 -0
  1292. mindspore/rewrite/parsers/arguments_parser.py +63 -0
  1293. mindspore/rewrite/parsers/assign_parser.py +852 -0
  1294. mindspore/rewrite/parsers/attribute_parser.py +57 -0
  1295. mindspore/rewrite/parsers/class_def_parser.py +289 -0
  1296. mindspore/rewrite/parsers/constant_parser.py +104 -0
  1297. mindspore/rewrite/parsers/container_parser.py +88 -0
  1298. mindspore/rewrite/parsers/expr_parser.py +55 -0
  1299. mindspore/rewrite/parsers/for_parser.py +61 -0
  1300. mindspore/rewrite/parsers/function_def_parser.py +84 -0
  1301. mindspore/rewrite/parsers/if_parser.py +85 -0
  1302. mindspore/rewrite/parsers/module_parser.py +117 -0
  1303. mindspore/rewrite/parsers/parser.py +43 -0
  1304. mindspore/rewrite/parsers/parser_register.py +86 -0
  1305. mindspore/rewrite/parsers/return_parser.py +37 -0
  1306. mindspore/rewrite/parsers/while_parser.py +59 -0
  1307. mindspore/rewrite/sparsify/__init__.py +0 -0
  1308. mindspore/rewrite/sparsify/sparse_transformer.py +457 -0
  1309. mindspore/rewrite/sparsify/sparsify.py +112 -0
  1310. mindspore/rewrite/sparsify/utils.py +179 -0
  1311. mindspore/rewrite/symbol_tree/__init__.py +20 -0
  1312. mindspore/rewrite/symbol_tree/symbol_tree.py +1819 -0
  1313. mindspore/rewrite/symbol_tree/symbol_tree_builder.py +76 -0
  1314. mindspore/rewrite/symbol_tree/symbol_tree_dumper.py +142 -0
  1315. mindspore/run_check/__init__.py +20 -0
  1316. mindspore/run_check/_check_version.py +574 -0
  1317. mindspore/run_check/run_check.py +66 -0
  1318. mindspore/safeguard/__init__.py +18 -0
  1319. mindspore/safeguard/rewrite_obfuscation.py +531 -0
  1320. mindspore/swresample-4.dll +0 -0
  1321. mindspore/swscale-6.dll +0 -0
  1322. mindspore/tbbmalloc.dll +0 -0
  1323. mindspore/tinyxml2.dll +0 -0
  1324. mindspore/train/__init__.py +47 -0
  1325. mindspore/train/_utils.py +439 -0
  1326. mindspore/train/amp.py +817 -0
  1327. mindspore/train/anf_ir_pb2.py +1517 -0
  1328. mindspore/train/callback/__init__.py +44 -0
  1329. mindspore/train/callback/_backup_and_restore.py +117 -0
  1330. mindspore/train/callback/_callback.py +613 -0
  1331. mindspore/train/callback/_checkpoint.py +751 -0
  1332. mindspore/train/callback/_cluster_monitor.py +201 -0
  1333. mindspore/train/callback/_dataset_graph.py +150 -0
  1334. mindspore/train/callback/_early_stop.py +239 -0
  1335. mindspore/train/callback/_flops_collector.py +238 -0
  1336. mindspore/train/callback/_history.py +92 -0
  1337. mindspore/train/callback/_lambda_callback.py +80 -0
  1338. mindspore/train/callback/_landscape.py +1049 -0
  1339. mindspore/train/callback/_loss_monitor.py +107 -0
  1340. mindspore/train/callback/_lr_scheduler_callback.py +76 -0
  1341. mindspore/train/callback/_mindio_ttp.py +443 -0
  1342. mindspore/train/callback/_on_request_exit.py +195 -0
  1343. mindspore/train/callback/_reduce_lr_on_plateau.py +226 -0
  1344. mindspore/train/callback/_summary_collector.py +1184 -0
  1345. mindspore/train/callback/_time_monitor.py +141 -0
  1346. mindspore/train/checkpoint_pb2.py +233 -0
  1347. mindspore/train/data_sink.py +219 -0
  1348. mindspore/train/dataset_helper.py +688 -0
  1349. mindspore/train/lineage_pb2.py +1260 -0
  1350. mindspore/train/loss_scale_manager.py +213 -0
  1351. mindspore/train/memory_profiling_pb2.py +298 -0
  1352. mindspore/train/metrics/__init__.py +175 -0
  1353. mindspore/train/metrics/accuracy.py +133 -0
  1354. mindspore/train/metrics/auc.py +129 -0
  1355. mindspore/train/metrics/bleu_score.py +170 -0
  1356. mindspore/train/metrics/confusion_matrix.py +700 -0
  1357. mindspore/train/metrics/cosine_similarity.py +109 -0
  1358. mindspore/train/metrics/dice.py +116 -0
  1359. mindspore/train/metrics/error.py +175 -0
  1360. mindspore/train/metrics/fbeta.py +167 -0
  1361. mindspore/train/metrics/hausdorff_distance.py +333 -0
  1362. mindspore/train/metrics/loss.py +97 -0
  1363. mindspore/train/metrics/mean_surface_distance.py +189 -0
  1364. mindspore/train/metrics/metric.py +373 -0
  1365. mindspore/train/metrics/occlusion_sensitivity.py +225 -0
  1366. mindspore/train/metrics/perplexity.py +133 -0
  1367. mindspore/train/metrics/precision.py +160 -0
  1368. mindspore/train/metrics/recall.py +159 -0
  1369. mindspore/train/metrics/roc.py +223 -0
  1370. mindspore/train/metrics/root_mean_square_surface_distance.py +191 -0
  1371. mindspore/train/metrics/topk.py +167 -0
  1372. mindspore/train/mind_ir_pb2.py +1903 -0
  1373. mindspore/train/model.py +2176 -0
  1374. mindspore/train/node_strategy_pb2.py +653 -0
  1375. mindspore/train/print_pb2.py +184 -0
  1376. mindspore/train/profiling_parallel_pb2.py +151 -0
  1377. mindspore/train/serialization.py +3101 -0
  1378. mindspore/train/summary/__init__.py +23 -0
  1379. mindspore/train/summary/_lineage_adapter.py +41 -0
  1380. mindspore/train/summary/_summary_adapter.py +496 -0
  1381. mindspore/train/summary/_writer_pool.py +207 -0
  1382. mindspore/train/summary/enums.py +56 -0
  1383. mindspore/train/summary/summary_record.py +581 -0
  1384. mindspore/train/summary/writer.py +167 -0
  1385. mindspore/train/summary_pb2.py +1165 -0
  1386. mindspore/train/train_thor/__init__.py +20 -0
  1387. mindspore/train/train_thor/convert_utils.py +268 -0
  1388. mindspore/train/train_thor/dataset_helper.py +192 -0
  1389. mindspore/train/train_thor/model_thor.py +257 -0
  1390. mindspore/turbojpeg.dll +0 -0
  1391. mindspore/vcmeta.dll +0 -0
  1392. mindspore/vcomp140.dll +0 -0
  1393. mindspore/vcruntime140.dll +0 -0
  1394. mindspore/vcruntime140_1.dll +0 -0
  1395. mindspore/version.py +1 -0
  1396. mindspore-2.3.0.dist-info/METADATA +351 -0
  1397. mindspore-2.3.0.dist-info/RECORD +1400 -0
  1398. mindspore-2.3.0.dist-info/WHEEL +5 -0
  1399. mindspore-2.3.0.dist-info/entry_points.txt +4 -0
  1400. mindspore-2.3.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2125 @@
1
+ /**
2
+ * Copyright 2020-2024 Huawei Technologies Co., Ltd
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #ifndef MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_VISION_H_
18
+ #define MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_VISION_H_
19
+
20
+ #include <map>
21
+ #include <memory>
22
+ #include <string>
23
+ #include <tuple>
24
+ #include <utility>
25
+ #include <vector>
26
+
27
+ #include "include/api/dual_abi_helper.h"
28
+ #include "include/api/status.h"
29
+ #include "include/dataset/constants.h"
30
+ #include "include/dataset/transforms.h"
31
+ #include "include/dataset/vision_lite.h"
32
+
33
+ namespace mindspore {
34
+ namespace dataset {
35
+ class TensorOperation;
36
+
37
+ // Transform operations for performing computer vision.
38
+ namespace vision {
39
+ /// \brief Apply brightness adjustment on input image.
40
+ class DATASET_API AdjustBrightness final : public TensorTransform {
41
+ public:
42
+ /// \brief Constructor.
43
+ /// \param[in] brightness_factor Adjusts image brightness, non negative real number.
44
+ /// \par Example
45
+ /// \code
46
+ /// /* Define operations */
47
+ /// auto decode_op = vision::Decode();
48
+ /// auto adjust_brightness_op = vision::AdjustBrightness(2.0);
49
+ ///
50
+ /// /* dataset is an instance of Dataset object */
51
+ /// dataset = dataset->Map({decode_op, adjust_brightness_op}, // operations
52
+ /// {"image"}); // input columns
53
+ /// \endcode
54
+ explicit AdjustBrightness(float brightness_factor);
55
+
56
+ /// \brief Destructor.
57
+ ~AdjustBrightness() override = default;
58
+
59
+ protected:
60
+ /// \brief Function to convert TensorTransform object into a TensorOperation object.
61
+ /// \return Shared pointer to TensorOperation object.
62
+ std::shared_ptr<TensorOperation> Parse() override;
63
+
64
+ private:
65
+ struct Data;
66
+ std::shared_ptr<Data> data_;
67
+ };
68
+
69
+ /// \brief Apply contrast adjustment on input image.
70
+ class DATASET_API AdjustContrast final : public TensorTransform {
71
+ public:
72
+ /// \brief Constructor.
73
+ /// \param[in] contrast_factor Adjusts image contrast, non negative real number.
74
+ /// \par Example
75
+ /// \code
76
+ /// /* Define operations */
77
+ /// auto decode_op = vision::Decode();
78
+ /// auto adjust_contrast_op = vision::AdjustContrast(10.0);
79
+ ///
80
+ /// /* dataset is an instance of Dataset object */
81
+ /// dataset = dataset->Map({decode_op, adjust_contrast_op}, // operations
82
+ /// {"image"}); // input columns
83
+ /// \endcode
84
+ explicit AdjustContrast(float contrast_factor);
85
+
86
+ /// \brief Destructor.
87
+ ~AdjustContrast() override = default;
88
+
89
+ protected:
90
+ /// \brief Function to convert TensorTransform object into a TensorOperation object.
91
+ /// \return Shared pointer to TensorOperation object.
92
+ std::shared_ptr<TensorOperation> Parse() override;
93
+
94
+ private:
95
+ struct Data;
96
+ std::shared_ptr<Data> data_;
97
+ };
98
+
99
+ /// \brief AdjustGamma TensorTransform.
100
+ /// \note Apply gamma correction on input image.
101
+ class DATASET_API AdjustGamma final : public TensorTransform {
102
+ public:
103
+ /// \brief Constructor.
104
+ /// \param[in] gamma Non negative real number, which makes the output image pixel value
105
+ /// exponential in relation to the input image pixel value.
106
+ /// \param[in] gain The constant multiplier. Default: 1.0.
107
+ /// \par Example
108
+ /// \code
109
+ /// /* Define operations */
110
+ /// auto decode_op = vision::Decode();
111
+ /// auto adjust_gamma_op = vision::AdjustGamma(10.0);
112
+ ///
113
+ /// /* dataset is an instance of Dataset object */
114
+ /// dataset = dataset->Map({decode_op, adjust_gamma_op}, // operations
115
+ /// {"image"}); // input columns
116
+ /// \endcode
117
+ explicit AdjustGamma(float gamma, float gain = 1.0);
118
+
119
+ /// \brief Destructor.
120
+ ~AdjustGamma() override = default;
121
+
122
+ protected:
123
+ /// \brief Function to convert TensorTransform object into a TensorOperation object.
124
+ /// \return Shared pointer to TensorOperation object.
125
+ std::shared_ptr<TensorOperation> Parse() override;
126
+
127
+ private:
128
+ struct Data;
129
+ std::shared_ptr<Data> data_;
130
+ };
131
+
132
+ /// \note Apply hue adjustment on input image.
133
+ class DATASET_API AdjustHue final : public TensorTransform {
134
+ public:
135
+ /// \brief Constructor.
136
+ /// \param[in] hue_factor How much to shift the hue channel, must be in the interval [-0.5, 0.5].
137
+ /// \par Example
138
+ /// \code
139
+ /// /* Define operations */
140
+ /// auto decode_op = vision::Decode();
141
+ /// auto adjust_hue_op = vision::AdjustHue(0.2);
142
+ ///
143
+ /// /* dataset is an instance of Dataset object */
144
+ /// dataset = dataset->Map({decode_op, adjust_contrast_op}, // operations
145
+ /// {"image"}); // input columns
146
+ /// \endcode
147
+ explicit AdjustHue(float hue_factor);
148
+
149
+ /// \brief Destructor.
150
+ ~AdjustHue() override = default;
151
+
152
+ protected:
153
+ /// \brief Function to convert TensorTransform object into a TensorOperation object.
154
+ /// \return Shared pointer to TensorOperation object.
155
+ std::shared_ptr<TensorOperation> Parse() override;
156
+
157
+ private:
158
+ struct Data;
159
+ std::shared_ptr<Data> data_;
160
+ };
161
+
162
+ /// \brief Adjust the color saturation of the input image.
163
+ class DATASET_API AdjustSaturation final : public TensorTransform {
164
+ public:
165
+ /// \brief Constructor.
166
+ /// \param[in] saturation_factor Adjust image saturation, non negative real number.
167
+ /// \par Example
168
+ /// \code
169
+ /// /* Define operations */
170
+ /// auto decode_op = vision::Decode();
171
+ /// auto adjust_saturation_op = vision::AdjustSaturation(2.0);
172
+ ///
173
+ /// /* dataset is an instance of Dataset object */
174
+ /// dataset = dataset->Map({decode_op, adjust_saturation_op}, // operations
175
+ /// {"image"}); // input columns
176
+ /// \endcode
177
+ explicit AdjustSaturation(float saturation_factor);
178
+
179
+ /// \brief Destructor.
180
+ ~AdjustSaturation() override = default;
181
+
182
+ protected:
183
+ /// \brief Function to convert TensorTransform object into a TensorOperation object.
184
+ /// \return Shared pointer to TensorOperation object.
185
+ std::shared_ptr<TensorOperation> Parse() override;
186
+
187
+ private:
188
+ struct Data;
189
+ std::shared_ptr<Data> data_;
190
+ };
191
+
192
+ /// \brief Apply adjust sharpness on input image. Input image is expected to be in [H, W, C] or [H, W] format.
193
+ class DATASET_API AdjustSharpness final : public TensorTransform {
194
+ public:
195
+ /// \brief Constructor.
196
+ /// \param[in] sharpness_factor How much to adjust the sharpness. Can be any Non negative real number.
197
+ /// 0 gives a blurred image, 1 gives the original image while 2 increases the Sharpness by a factor of 2.
198
+ /// \par Example
199
+ /// \code
200
+ /// /* Define operations */
201
+ /// auto decode_op = vision::Decode();
202
+ /// auto adjust_sharpness_op = vision::AdjustSharpness(2.0);
203
+ ///
204
+ /// /* dataset is an instance of Dataset object */
205
+ /// dataset = dataset->Map({decode_op, adjust_sharpness_op}, // operations
206
+ /// {"image"}); // input columns
207
+ /// \endcode
208
+ explicit AdjustSharpness(float sharpness_factor);
209
+
210
+ /// \brief Destructor.
211
+ ~AdjustSharpness() override = default;
212
+
213
+ protected:
214
+ /// \brief Function to convert TensorTransform object into a TensorOperation object.
215
+ /// \return Shared pointer to TensorOperation object.
216
+ std::shared_ptr<TensorOperation> Parse() override;
217
+
218
+ private:
219
+ struct Data;
220
+ std::shared_ptr<Data> data_;
221
+ };
222
+
223
+ /// \brief Apply AutoAugment data augmentation method.
224
+ class DATASET_API AutoAugment final : public TensorTransform {
225
+ public:
226
+ /// \brief Constructor.
227
+ /// \param[in] policy An enum for the data auto augmentation policy (default=AutoAugmentPolicy::kImageNet).
228
+ /// - AutoAugmentPolicy::kImageNet, AutoAugment policy learned on the ImageNet dataset.
229
+ /// - AutoAugmentPolicy::kCifar10, AutoAugment policy learned on the Cifar10 dataset.
230
+ /// - AutoAugmentPolicy::kSVHN, AutoAugment policy learned on the SVHN dataset.
231
+ /// \param[in] interpolation An enum for the mode of interpolation (default=InterpolationMode::kNearestNeighbour).
232
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
233
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
234
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
235
+ /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation.
236
+ /// \param[in] fill_value A vector representing the pixel intensity of the borders (default={0, 0, 0}).
237
+ /// \par Example
238
+ /// \code
239
+ /// /* Define operations */
240
+ /// auto decode_op = vision::Decode();
241
+ /// auto auto_augment_op = vision::AutoAugment(AutoAugmentPolicy::kImageNet,
242
+ /// InterpolationMode::kNearestNeighbour, {0, 0, 0});
243
+ /// /* dataset is an instance of Dataset object */
244
+ /// dataset = dataset->Map({decode_op, auto_augment_op}, // operations
245
+ /// {"image"}); // input columns
246
+ /// \endcode
247
+ explicit AutoAugment(AutoAugmentPolicy policy = AutoAugmentPolicy::kImageNet,
248
+ InterpolationMode interpolation = InterpolationMode::kNearestNeighbour,
249
+ const std::vector<uint8_t> &fill_value = {0, 0, 0});
250
+
251
+ /// \brief Destructor.
252
+ ~AutoAugment() override = default;
253
+
254
+ protected:
255
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
256
+ /// \return Shared pointer to TensorOperation object.
257
+ std::shared_ptr<TensorOperation> Parse() override;
258
+
259
+ private:
260
+ struct Data;
261
+ std::shared_ptr<Data> data_;
262
+ };
263
+
264
+ /// \brief Apply automatic contrast on the input image.
265
+ class DATASET_API AutoContrast final : public TensorTransform {
266
+ public:
267
+ /// \brief Constructor.
268
+ /// \param[in] cutoff Percent of pixels to cut off from the histogram, the valid range of cutoff value is 0 to 50.
269
+ /// \param[in] ignore Pixel values to ignore.
270
+ /// \par Example
271
+ /// \code
272
+ /// /* Define operations */
273
+ /// auto decode_op = vision::Decode();
274
+ /// auto autocontrast_op = vision::AutoContrast(10.0, {10, 20});
275
+ ///
276
+ /// /* dataset is an instance of Dataset object */
277
+ /// dataset = dataset->Map({decode_op, autocontrast_op}, // operations
278
+ /// {"image"}); // input columns
279
+ /// \endcode
280
+ explicit AutoContrast(float cutoff = 0.0, const std::vector<uint32_t> &ignore = {});
281
+
282
+ /// \brief Destructor.
283
+ ~AutoContrast() override = default;
284
+
285
+ protected:
286
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
287
+ /// \return Shared pointer to TensorOperation object.
288
+ std::shared_ptr<TensorOperation> Parse() override;
289
+
290
+ private:
291
+ struct Data;
292
+ std::shared_ptr<Data> data_;
293
+ };
294
+
295
+ /// \brief BoundingBoxAugment TensorTransform.
296
+ /// \note Apply a given image transform on a random selection of bounding box regions of a given image.
297
+ class DATASET_API BoundingBoxAugment final : public TensorTransform {
298
+ public:
299
+ /// \brief Constructor.
300
+ /// \param[in] transform Raw pointer to the TensorTransform operation.
301
+ /// \param[in] ratio Ratio of bounding boxes to apply augmentation on. Range: [0, 1] (default=0.3).
302
+ /// \par Example
303
+ /// \code
304
+ /// /* Define operations */
305
+ /// TensorTransform *rotate_op = new vision::RandomRotation({-180, 180});
306
+ /// auto bbox_aug_op = vision::BoundingBoxAugment(rotate_op, 0.5);
307
+ ///
308
+ /// /* dataset is an instance of Dataset object */
309
+ /// dataset = dataset->Map({bbox_aug_op}, // operations
310
+ /// {"image", "bbox"}); // input columns
311
+ /// \endcode
312
+ explicit BoundingBoxAugment(TensorTransform *transform, float ratio = 0.3);
313
+
314
+ /// \brief Constructor.
315
+ /// \param[in] transform Smart pointer to the TensorTransform operation.
316
+ /// \param[in] ratio Ratio of bounding boxes where augmentation is applied to. Range: [0, 1] (default=0.3).
317
+ /// \par Example
318
+ /// \code
319
+ /// /* Define operations */
320
+ /// std::shared_ptr<TensorTransform> flip_op = std::make_shared<vision::RandomHorizontalFlip>(0.5);
321
+ /// std::shared_ptr<TensorTransform> bbox_aug_op = std::make_shared<vision::BoundingBoxAugment>(flip_op, 0.1);
322
+ ///
323
+ /// /* dataset is an instance of Dataset object */
324
+ /// dataset = dataset->Map({bbox_aug_op}, // operations
325
+ /// {"image", "bbox"}); // input columns
326
+ /// \endcode
327
+ explicit BoundingBoxAugment(const std::shared_ptr<TensorTransform> &transform, float ratio = 0.3);
328
+
329
+ /// \brief Constructor.
330
+ /// \param[in] transform Object pointer to the TensorTransform operation.
331
+ /// \param[in] ratio Ratio of bounding boxes where augmentation is applied to. Range: [0, 1] (default=0.3).
332
+ /// \par Example
333
+ /// \code
334
+ /// /* Define operations */
335
+ /// vision::RandomColor random_color_op = vision::RandomColor(0.5, 1.0);
336
+ /// vision::BoundingBoxAugment bbox_aug_op = vision::BoundingBoxAugment(random_color_op, 0.8);
337
+ ///
338
+ /// /* dataset is an instance of Dataset object */
339
+ /// dataset = dataset->Map({bbox_aug_op}, // operations
340
+ /// {"image", "bbox"}); // input columns
341
+ /// \endcode
342
+ explicit BoundingBoxAugment(const std::reference_wrapper<TensorTransform> &transform, float ratio = 0.3);
343
+
344
+ /// \brief Destructor.
345
+ ~BoundingBoxAugment() override = default;
346
+
347
+ protected:
348
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
349
+ /// \return Shared pointer to TensorOperation object.
350
+ std::shared_ptr<TensorOperation> Parse() override;
351
+
352
+ private:
353
+ struct Data;
354
+ std::shared_ptr<Data> data_;
355
+ };
356
+
357
+ /// \brief Change the color space of the image.
358
+ class DATASET_API ConvertColor final : public TensorTransform {
359
+ public:
360
+ /// \brief Constructor.
361
+ /// \param[in] convert_mode The mode of image channel conversion.
362
+ /// \par Example
363
+ /// \code
364
+ /// /* dataset is an instance of Dataset object */
365
+ /// dataset = dataset->Map({std::make_shared<vision::Decode>(),
366
+ /// std::make_shared<vision::ConvertColor>(ConvertMode::COLOR_BGR2RGB)}, // operations
367
+ /// {"image"}); // input columns
368
+ /// \endcode
369
+ explicit ConvertColor(ConvertMode convert_mode);
370
+
371
+ /// \brief Destructor.
372
+ ~ConvertColor() override = default;
373
+
374
+ protected:
375
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
376
+ /// \return Shared pointer to TensorOperation object.
377
+ std::shared_ptr<TensorOperation> Parse() override;
378
+
379
+ private:
380
+ struct Data;
381
+ std::shared_ptr<Data> data_;
382
+ };
383
+
384
+ /// \brief Mask a random section of each image with the corresponding part of another randomly
385
+ /// selected image in that batch.
386
+ class DATASET_API CutMixBatch final : public TensorTransform {
387
+ public:
388
+ /// \brief Constructor.
389
+ /// \param[in] image_batch_format The format of the batch.
390
+ /// \param[in] alpha The hyperparameter of beta distribution (default = 1.0).
391
+ /// \param[in] prob The probability by which CutMix is applied to each image (default = 1.0).
392
+ /// \par Example
393
+ /// \code
394
+ /// /* dataset is an instance of Dataset object */
395
+ /// dataset = dataset->Batch(5);
396
+ /// dataset = dataset->Map({std::make_shared<vision::CutMixBatch>(ImageBatchFormat::kNHWC)}, // operations
397
+ /// {"image", "label"}); // input columns
398
+ /// \endcode
399
+ explicit CutMixBatch(ImageBatchFormat image_batch_format, float alpha = 1.0, float prob = 1.0);
400
+
401
+ /// \brief Destructor.
402
+ ~CutMixBatch() override = default;
403
+
404
+ protected:
405
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
406
+ /// \return Shared pointer to TensorOperation object.
407
+ std::shared_ptr<TensorOperation> Parse() override;
408
+
409
+ private:
410
+ struct Data;
411
+ std::shared_ptr<Data> data_;
412
+ };
413
+
414
+ /// \brief Randomly cut (mask) out a given number of square patches from the input image.
415
+ class DATASET_API CutOut final : public TensorTransform {
416
+ public:
417
+ /// \brief Constructor.
418
+ /// \param[in] length Integer representing the side length of each square patch.
419
+ /// \param[in] num_patches Integer representing the number of patches to be cut out of an image.
420
+ /// \param[in] is_hwc A boolean to indicate whether the input image is in HWC format (true) or CHW
421
+ /// format (false) (default = true).
422
+ /// \par Example
423
+ /// \code
424
+ /// /* dataset is an instance of Dataset object */
425
+ /// dataset = dataset->Map({std::make_shared<vision::Decode>(),
426
+ /// std::make_shared<vision::CutOut>(1, 4, true)}, // operations
427
+ /// {"image"}); // input columns
428
+ /// \endcode
429
+ explicit CutOut(int32_t length, int32_t num_patches = 1, bool is_hwc = true);
430
+
431
+ /// \brief Destructor.
432
+ ~CutOut() override = default;
433
+
434
+ protected:
435
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
436
+ /// \return Shared pointer to TensorOperation object.
437
+ std::shared_ptr<TensorOperation> Parse() override;
438
+
439
+ private:
440
+ struct Data;
441
+ std::shared_ptr<Data> data_;
442
+ };
443
+
444
+ /// \brief Decode the input video.
445
+ class DATASET_API DecodeVideo final : public TensorTransform {
446
+ public:
447
+ /// \brief Constructor. It will decode a vector containing a raw video tensor into a vector containing two tensors.
448
+ /// The raw video tensor in the input vector should be 1D array of UINT8.
449
+ /// The first tensor in the output vector is a visual tensor, the shape is <T,H,W,C>, the type is DE_UINT8. Pixel
450
+ /// order is RGB. The second tensor in the output vector is an audio tensor, the shape is <C, L>.
451
+ /// \par Example
452
+ /// \code
453
+ /// /* Read video file into tensor */
454
+ /// mindspore::MSTensor video;
455
+ /// ASSERT_OK(mindspore::dataset::vision::ReadFile("/path/to/video/file", &video));
456
+ /// std::vector<mindspore::MSTensor> input_tensor;
457
+ /// std::vector<mindspore::MSTensor> output_tensor;
458
+ /// input_tensor.push_back(video);
459
+ /// auto decode_video = vision::DecodeVideo();
460
+ /// auto transform = Execute(decode_video);
461
+ /// Status rc = transform(input_tensor, &output_tensor);
462
+ /// \endcode
463
+ DecodeVideo();
464
+
465
+ /// \brief Destructor.
466
+ ~DecodeVideo() = default;
467
+
468
+ protected:
469
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
470
+ /// \return Shared pointer to TensorOperation object.
471
+ std::shared_ptr<TensorOperation> Parse() override;
472
+ };
473
+
474
+ /// \brief Encode the image as JPEG data.
475
+ /// \param[in] image The image to be encoded.
476
+ /// \param[out] output The Tensor data.
477
+ /// \param[in] quality The quality for the output tensor, in range of [1, 100]. Default: 75.
478
+ /// \return The status code.
479
+ Status DATASET_API EncodeJpeg(const mindspore::MSTensor &image, mindspore::MSTensor *output, int quality = 75);
480
+
481
+ /// \brief Encode the image as PNG data.
482
+ /// \param[in] image The image to be encoded.
483
+ /// \param[out] output The Tensor data.
484
+ /// \param[in] compression_level The compression_level for encoding, in range of [0, 9]. Default: 6.
485
+ /// \return The status code.
486
+ Status DATASET_API EncodePng(const mindspore::MSTensor &image, mindspore::MSTensor *output, int compression_level = 6);
487
+
488
+ /// \brief Apply histogram equalization on the input image.
489
+ class DATASET_API Equalize final : public TensorTransform {
490
+ public:
491
+ /// \brief Constructor.
492
+ /// \par Example
493
+ /// \code
494
+ /// /* dataset is an instance of Dataset object */
495
+ /// dataset = dataset->Map({std::make_shared<vision::Decode>(),
496
+ /// std::make_shared<vision::Equalize>()}, // operations
497
+ /// {"image"}); // input columns
498
+ /// \endcode
499
+ Equalize();
500
+
501
+ /// \brief Destructor.
502
+ ~Equalize() override = default;
503
+
504
+ protected:
505
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
506
+ /// \return Shared pointer to TensorOperation object.
507
+ std::shared_ptr<TensorOperation> Parse() override;
508
+ };
509
+
510
+ /// \brief Erase the input image with given value.
511
+ class DATASET_API Erase final : public TensorTransform {
512
+ public:
513
+ /// \brief Constructor.
514
+ /// \param[in] top Vertical ordinate of the upper left corner of erased region.
515
+ /// \param[in] left Horizontal ordinate of the upper left corner of erased region.
516
+ /// \param[in] height Height of erased region.
517
+ /// \param[in] width Width of erased region.
518
+ /// \param[in] value Pixel value used to pad the erased area.
519
+ /// If a single integer is provided, it will be used for all RGB channels.
520
+ /// If a sequence of length 3 is provided, it will be used for R, G, B channels respectively. Default: 0.
521
+ /// \param[in] inplace Whether to erase inplace. Default: False.
522
+ /// \par Example
523
+ /// \code
524
+ /// /* dataset is an instance of Dataset object */
525
+ /// dataset = dataset->Map({std::make_shared<vision::Decode>(),
526
+ /// std::make_shared<vision::Erase>(10, 10, 10, 10)}, // operations
527
+ /// {"image"}); // input columns
528
+ /// \endcode
529
+ Erase(int32_t top, int32_t left, int32_t height, int32_t width, const std::vector<float> &value = {0., 0., 0.},
530
+ bool inplace = false);
531
+
532
+ /// \brief Destructor.
533
+ ~Erase() override = default;
534
+
535
+ protected:
536
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
537
+ /// \return Shared pointer to TensorOperation object.
538
+ std::shared_ptr<TensorOperation> Parse() override;
539
+
540
+ private:
541
+ struct Data;
542
+ std::shared_ptr<Data> data_;
543
+ };
544
+
545
+ /// \brief Get the number of input image channels.
546
+ /// \param[in] image Tensor of the image.
547
+ /// \param[out] channels Channels of the image.
548
+ /// \return The status code.
549
+ Status DATASET_API GetImageNumChannels(const mindspore::MSTensor &image, dsize_t *channels);
550
+
551
+ /// \brief Get the size of input image.
552
+ /// \param[in] image Tensor of the image.
553
+ /// \param[out] size Size of the image as [height, width].
554
+ /// \return The status code.
555
+ Status DATASET_API GetImageSize(const mindspore::MSTensor &image, std::vector<dsize_t> *size);
556
+
557
+ /// \brief Flip the input image horizontally.
558
+ class DATASET_API HorizontalFlip final : public TensorTransform {
559
+ public:
560
+ /// \brief Constructor.
561
+ /// \par Example
562
+ /// \code
563
+ /// /* dataset is an instance of Dataset object */
564
+ /// dataset = dataset->Map({std::make_shared<vision::Decode>(),
565
+ /// std::make_shared<vision::HorizontalFlip>()}, // operations
566
+ /// {"image"}); // input columns
567
+ /// \endcode
568
+ HorizontalFlip();
569
+
570
+ /// \brief Destructor.
571
+ ~HorizontalFlip() override = default;
572
+
573
+ protected:
574
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
575
+ /// \return Shared pointer to TensorOperation object.
576
+ std::shared_ptr<TensorOperation> Parse() override;
577
+ };
578
+
579
+ /// \brief Apply invert on the input image in RGB mode.
580
+ class DATASET_API Invert final : public TensorTransform {
581
+ public:
582
+ /// \brief Constructor.
583
+ /// \par Example
584
+ /// \code
585
+ /// /* dataset is an instance of Dataset object */
586
+ /// dataset = dataset->Map({std::make_shared<vision::Decode>(),
587
+ /// std::make_shared<vision::Invert>()}, // operations
588
+ /// {"image"}); // input columns
589
+ /// \endcode
590
+ Invert();
591
+
592
+ /// \brief Destructor.
593
+ ~Invert() override = default;
594
+
595
+ protected:
596
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
597
+ /// \return Shared pointer to TensorOperation object.
598
+ std::shared_ptr<TensorOperation> Parse() override;
599
+ };
600
+
601
+ /// \brief Apply MixUp transformation on an input batch of images and labels. The labels must be in
602
+ /// one-hot format and Batch must be called before calling this function.
603
+ class DATASET_API MixUpBatch final : public TensorTransform {
604
+ public:
605
+ /// \brief Constructor.
606
+ /// \param[in] alpha hyperparameter of beta distribution (default = 1.0).
607
+ /// \par Example
608
+ /// \code
609
+ /// /* dataset is an instance of Dataset object */
610
+ /// dataset = dataset->Batch(5);
611
+ /// dataset = dataset->Map({std::make_shared<vision::MixUpBatch>()}, // operations
612
+ /// {"image"}); // input columns
613
+ /// \endcode
614
+ explicit MixUpBatch(float alpha = 1.0);
615
+
616
+ /// \brief Destructor.
617
+ ~MixUpBatch() override = default;
618
+
619
+ protected:
620
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
621
+ /// \return Shared pointer to TensorOperation object.
622
+ std::shared_ptr<TensorOperation> Parse() override;
623
+
624
+ private:
625
+ struct Data;
626
+ std::shared_ptr<Data> data_;
627
+ };
628
+
629
+ /// \brief Normalize the input image with respect to mean and standard deviation and pads an extra
630
+ /// channel with value zero.
631
+ class DATASET_API NormalizePad final : public TensorTransform {
632
+ public:
633
+ /// \brief Constructor.
634
+ /// \param[in] mean A vector of mean values for each channel, with respect to channel order.
635
+ /// The mean values must be in range [0.0, 255.0].
636
+ /// \param[in] std A vector of standard deviations for each channel, with respect to channel order.
637
+ /// The standard deviation values must be in range (0.0, 255.0].
638
+ /// \param[in] dtype The output datatype of Tensor.
639
+ /// The standard deviation values must be "float32" or "float16"(default = "float32").
640
+ /// \param[in] is_hwc A boolean to indicate whether the input image is in HWC format (true) or CHW
641
+ /// format (false) (default = true).
642
+ /// \par Example
643
+ /// \code
644
+ /// /* Define operations */
645
+ /// auto decode_op = vision::Decode();
646
+ /// auto normalize_pad_op = vision::NormalizePad({121.0, 115.0, 100.0}, {70.0, 68.0, 71.0});
647
+ ///
648
+ /// /* dataset is an instance of Dataset object */
649
+ /// dataset = dataset->Map({decode_op, normalize_pad_op}, // operations
650
+ /// {"image"}); // input columns
651
+ /// \endcode
652
+ NormalizePad(const std::vector<float> &mean, const std::vector<float> &std, const std::string &dtype = "float32",
653
+ bool is_hwc = true)
654
+ : NormalizePad(mean, std, StringToChar(dtype), is_hwc) {}
655
+
656
+ NormalizePad(const std::vector<float> &mean, const std::vector<float> &std, const std::vector<char> &dtype,
657
+ bool is_hwc = true);
658
+
659
+ /// \brief Destructor.
660
+ ~NormalizePad() override = default;
661
+
662
+ protected:
663
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
664
+ /// \return Shared pointer to TensorOperation object.
665
+ std::shared_ptr<TensorOperation> Parse() override;
666
+
667
+ private:
668
+ struct Data;
669
+ std::shared_ptr<Data> data_;
670
+ };
671
+
672
+ /// \brief Pad the image to a fixed size.
673
+ class DATASET_API PadToSize final : public TensorTransform {
674
+ public:
675
+ /// \brief Constructor.
676
+ /// \param[in] size A two element vector representing the target size to pad, in order of [height, width].
677
+ /// \param[in] offset A two element vector representing the lengths to pad on the top and left,
678
+ /// in order of [top, left]. Default: {}, means to pad symmetrically, keeping the original image in center.
679
+ /// \param[in] fill_value A vector representing the pixel intensity of the borders. Only valid if the
680
+ /// padding_mode is BorderType.kConstant. If 1 value is provided, it is used for all RGB channels.
681
+ /// If 3 values are provided, it is used to fill R, G, B channels respectively. Default: {0}.
682
+ /// \param[in] padding_mode The method of padding, which can be one of BorderType.kConstant, BorderType.kEdge,
683
+ /// BorderType.kReflect or BorderType.kSymmetric. Default: BorderType.kConstant.
684
+ /// - BorderType.kConstant, pads with a constant value.
685
+ /// - BorderType.kEdge, pads with the last value at the edge of the image.
686
+ /// - BorderType.kReflect, pads with reflection of the image omitting the last value on the edge.
687
+ /// - BorderType.kSymmetric, pads with reflection of the image repeating the last value on the edge.
688
+ /// \par Example
689
+ /// \code
690
+ /// /* Define operations */
691
+ /// auto decode_op = vision::Decode();
692
+ /// auto pad_to_size_op = vision::PadToSize({256, 256}, {10, 20}, {255, 255, 255});
693
+ ///
694
+ /// /* dataset is an instance of Dataset object */
695
+ /// dataset = dataset->Map({decode_op, pad_to_size_op}, // operations
696
+ /// {"image"}); // input columns
697
+ /// \endcode
698
+ explicit PadToSize(const std::vector<int32_t> &size, const std::vector<int32_t> &offset = {},
699
+ const std::vector<uint8_t> &fill_value = {0}, BorderType padding_mode = BorderType::kConstant);
700
+
701
+ /// \brief Destructor.
702
+ ~PadToSize() override = default;
703
+
704
+ protected:
705
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
706
+ /// \return Shared pointer to TensorOperation object.
707
+ std::shared_ptr<TensorOperation> Parse() override;
708
+
709
+ private:
710
+ struct Data;
711
+ std::shared_ptr<Data> data_;
712
+ };
713
+
714
+ /// \brief Perform perspective transform on the image.
715
+ class DATASET_API Perspective final : public TensorTransform {
716
+ public:
717
+ /// \brief Constructor.
718
+ /// \param[in] start_points List containing four lists of two integers corresponding to four
719
+ /// corners [top-left, top-right, bottom-right, bottom-left] of the original image.
720
+ /// \param[in] end_points List containing four lists of two integers corresponding to four
721
+ /// corners [top-left, top-right, bottom-right, bottom-left] of the transformed image.
722
+ /// \param[in] interpolation An enum for the mode of interpolation. Default: InterpolationMode::kLinear.
723
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
724
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
725
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
726
+ /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation.
727
+ /// - InterpolationMode::kCubicPil, Interpolation method is bicubic interpolation like implemented in pillow.
728
+ /// \par Example
729
+ /// \code
730
+ /// /* Define operations */
731
+ /// auto decode_op = vision::Decode();
732
+ /// std::vector<std::vector<int32_t>> start_points = {{0, 0}, {1, 0}, {1, 1}, {0, 1}};
733
+ /// std::vector<std::vector<int32_t>> end_points = {{0, 2}, {2, 0}, {2, 2}, {0, 2}};
734
+ /// auto perspective_op = vision::Perspective(start_points, end_points, InterpolationMode::kLinear);
735
+ ///
736
+ /// /* dataset is an instance of Dataset object */
737
+ /// dataset = dataset->Map({decode_op, perspective_op}, // operations
738
+ /// {"image"}); // input columns
739
+ /// \endcode
740
+ Perspective(const std::vector<std::vector<int32_t>> &start_points,
741
+ const std::vector<std::vector<int32_t>> &end_points, InterpolationMode interpolation);
742
+
743
+ /// \brief Destructor.
744
+ ~Perspective() override = default;
745
+
746
+ protected:
747
+ /// \brief Function to convert TensorTransform object into a TensorOperation object.
748
+ /// \return Shared pointer to TensorOperation object.
749
+ std::shared_ptr<TensorOperation> Parse() override;
750
+
751
+ private:
752
+ struct Data;
753
+ std::shared_ptr<Data> data_;
754
+ };
755
+
756
+ /// \brief Posterize an image by reducing the number of bits for each color channel.
757
+ class DATASET_API Posterize final : public TensorTransform {
758
+ public:
759
+ /// \brief Constructor.
760
+ /// \param[in] bits The number of bits to keep for each channel,
761
+ /// should be in range of [0, 8].
762
+ /// \par Example
763
+ /// \code
764
+ /// /* Define operations */
765
+ /// auto decode_op = vision::Decode();
766
+ /// auto posterize_op = vision::Posterize(8);
767
+ ///
768
+ /// /* dataset is an instance of Dataset object */
769
+ /// dataset = dataset->Map({decode_op, posterize_op}, // operations
770
+ /// {"image"}); // input columns
771
+ /// \endcode
772
+ explicit Posterize(uint8_t bits);
773
+
774
+ /// \brief Destructor.
775
+ ~Posterize() override = default;
776
+
777
+ protected:
778
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
779
+ /// \return Shared pointer to TensorOperation object.
780
+ std::shared_ptr<TensorOperation> Parse() override;
781
+
782
+ private:
783
+ struct Data;
784
+ std::shared_ptr<Data> data_;
785
+ };
786
+
787
+ /// \brief Apply RandAugment data augmentation method.
788
+ class DATASET_API RandAugment final : public TensorTransform {
789
+ public:
790
+ /// \brief Constructor.
791
+ /// \param[in] num_ops Number of augmentation transformations to apply sequentially. Default: 2.
792
+ /// \param[in] magnitude Magnitude for all the transformations. Default: 9.
793
+ /// \param[in] num_magnitude_bins The number of different magnitude values. Default: 31.
794
+ /// \param[in] interpolation An enum for the mode of interpolation. Default: InterpolationMode::kNearestNeighbour.
795
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
796
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
797
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
798
+ /// \param[in] fill_value A vector representing the pixel intensity of the borders. Default: {0, 0, 0}.
799
+ /// \par Example
800
+ /// \code
801
+ /// /* Define operations */
802
+ /// auto decode_op = vision::Decode();
803
+ /// auto rand_augment_op = vision::RandAugment();
804
+ /// /* dataset is an instance of Dataset object */
805
+ /// dataset = dataset->Map({decode_op, rand_augment_op}, // operations
806
+ /// {"image"}); // input columns
807
+ /// \endcode
808
+ explicit RandAugment(int32_t num_ops = 2, int32_t magnitude = 9, int32_t num_magnitude_bins = 31,
809
+ InterpolationMode interpolation = InterpolationMode::kNearestNeighbour,
810
+ const std::vector<uint8_t> &fill_value = {0, 0, 0});
811
+
812
+ /// \brief Destructor.
813
+ ~RandAugment() override = default;
814
+
815
+ protected:
816
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
817
+ /// \return Shared pointer to TensorOperation object.
818
+ std::shared_ptr<TensorOperation> Parse() override;
819
+
820
+ private:
821
+ struct Data;
822
+ std::shared_ptr<Data> data_;
823
+ };
824
+
825
+ /// \brief Automatically adjust the contrast of the image with a given probability.
826
+ class DATASET_API RandomAutoContrast final : public TensorTransform {
827
+ public:
828
+ /// \brief Constructor.
829
+ /// \param[in] cutoff Percent of the lightest and darkest pixels to be cut off from
830
+ /// the histogram of the input image. The value must be in range of [0.0, 50.0) (default=0.0).
831
+ /// \param[in] ignore The background pixel values to be ignored, each of which must be
832
+ /// in range of [0, 255] (default={}).
833
+ /// \param[in] prob A float representing the probability of AutoContrast, which must be
834
+ /// in range of [0, 1] (default=0.5).
835
+ /// \par Example
836
+ /// \code
837
+ /// /* Define operations */
838
+ /// auto decode_op = vision::Decode();
839
+ /// auto random_auto_contrast_op = vision::RandomAutoContrast(5.0);
840
+ ///
841
+ /// /* dataset is an instance of Dataset object */
842
+ /// dataset = dataset->Map({decode_op, random_auto_contrast_op}, // operations
843
+ /// {"image"}); // input columns
844
+ /// \endcode
845
+ explicit RandomAutoContrast(float cutoff = 0.0, const std::vector<uint32_t> &ignore = {}, float prob = 0.5);
846
+
847
+ /// \brief Destructor.
848
+ ~RandomAutoContrast() override = default;
849
+
850
+ protected:
851
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
852
+ /// \return Shared pointer to TensorOperation object.
853
+ std::shared_ptr<TensorOperation> Parse() override;
854
+
855
+ private:
856
+ struct Data;
857
+ std::shared_ptr<Data> data_;
858
+ };
859
+
860
+ /// \brief Randomly adjust the sharpness of the input image with a given probability.
861
+ class DATASET_API RandomAdjustSharpness final : public TensorTransform {
862
+ public:
863
+ /// \brief Constructor.
864
+ /// \param[in] degree A float representing sharpness adjustment degree, which must be non negative.
865
+ /// \param[in] prob A float representing the probability of the image being sharpness adjusted, which
866
+ /// must in range of [0, 1] (default=0.5).
867
+ /// \par Example
868
+ /// \code
869
+ /// /* Define operations */
870
+ /// auto decode_op = vision::Decode();
871
+ /// auto random_adjust_sharpness_op = vision::RandomAdjustSharpness(30.0);
872
+ ///
873
+ /// /* dataset is an instance of Dataset object */
874
+ /// dataset = dataset->Map({decode_op, random_adjust_sharpness_op}, // operations
875
+ /// {"image"}); // input columns
876
+ /// \endcode
877
+ explicit RandomAdjustSharpness(float degree, float prob = 0.5);
878
+
879
+ /// \brief Destructor.
880
+ ~RandomAdjustSharpness() override = default;
881
+
882
+ protected:
883
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
884
+ /// \return Shared pointer to TensorOperation object.
885
+ std::shared_ptr<TensorOperation> Parse() override;
886
+
887
+ private:
888
+ struct Data;
889
+ std::shared_ptr<Data> data_;
890
+ };
891
+
892
+ /// \brief Blend an image with its grayscale version with random weights
893
+ /// t and 1 - t generated from a given range. If the range is trivial
894
+ /// then the weights are determinate and t equals to the bound of the interval.
895
+ class DATASET_API RandomColor final : public TensorTransform {
896
+ public:
897
+ /// \brief Constructor.
898
+ /// \param[in] t_lb Lower bound random weights.
899
+ /// \param[in] t_ub Upper bound random weights.
900
+ /// \par Example
901
+ /// \code
902
+ /// /* Define operations */
903
+ /// auto decode_op = vision::Decode();
904
+ /// auto random_color_op = vision::RandomColor(5.0, 50.0);
905
+ ///
906
+ /// /* dataset is an instance of Dataset object */
907
+ /// dataset = dataset->Map({decode_op, random_color_op}, // operations
908
+ /// {"image"}); // input columns
909
+ /// \endcode
910
+ RandomColor(float t_lb, float t_ub);
911
+
912
+ /// \brief Destructor.
913
+ ~RandomColor() override = default;
914
+
915
+ protected:
916
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
917
+ /// \return Shared pointer to TensorOperation object.
918
+ std::shared_ptr<TensorOperation> Parse() override;
919
+
920
+ private:
921
+ struct Data;
922
+ std::shared_ptr<Data> data_;
923
+ };
924
+
925
+ /// \brief Randomly adjust the brightness, contrast, saturation, and hue of the input image.
926
+ class DATASET_API RandomColorAdjust final : public TensorTransform {
927
+ public:
928
+ /// \brief Constructor.
929
+ /// \param[in] brightness Brightness adjustment factor. Must be a vector of one or two values
930
+ /// if it is a vector of two values it needs to be in the form of [min, max] (Default={1, 1}).
931
+ /// \param[in] contrast Contrast adjustment factor. Must be a vector of one or two values
932
+ /// if it is a vector of two values, it needs to be in the form of [min, max] (Default={1, 1}).
933
+ /// \param[in] saturation Saturation adjustment factor. Must be a vector of one or two values
934
+ /// if it is a vector of two values, it needs to be in the form of [min, max] (Default={1, 1}).
935
+ /// \param[in] hue Hue adjustment factor. Must be a vector of one or two values
936
+ /// if it is a vector of two values, it must be in the form of [min, max] where -0.5 <= min <= max <= 0.5
937
+ /// (Default={0, 0}).
938
+ /// \par Example
939
+ /// \code
940
+ /// /* Define operations */
941
+ /// auto decode_op = vision::Decode();
942
+ /// auto random_color_adjust_op = vision::RandomColorAdjust({1.0, 5.0}, {10.0, 20.0}, {40.0, 40.0});
943
+ ///
944
+ /// /* dataset is an instance of Dataset object */
945
+ /// dataset = dataset->Map({decode_op, random_color_adjust_op}, // operations
946
+ /// {"image"}); // input columns
947
+ /// \endcode
948
+ explicit RandomColorAdjust(const std::vector<float> &brightness = {1.0, 1.0},
949
+ const std::vector<float> &contrast = {1.0, 1.0},
950
+ const std::vector<float> &saturation = {1.0, 1.0},
951
+ const std::vector<float> &hue = {0.0, 0.0});
952
+
953
+ /// \brief Destructor.
954
+ ~RandomColorAdjust() override = default;
955
+
956
+ protected:
957
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
958
+ /// \return Shared pointer to TensorOperation object.
959
+ std::shared_ptr<TensorOperation> Parse() override;
960
+
961
+ private:
962
+ struct Data;
963
+ std::shared_ptr<Data> data_;
964
+ };
965
+
966
+ /// \brief Crop the input image at a random location.
967
+ class DATASET_API RandomCrop final : public TensorTransform {
968
+ public:
969
+ /// \brief Constructor.
970
+ /// \param[in] size A vector representing the output size of the cropped image.
971
+ /// If the size is a single value, a squared crop of size (size, size) is returned.
972
+ /// If the size has 2 values, it should be (height, width).
973
+ /// \param[in] padding A vector representing the number of pixels to pad the image.
974
+ /// If the vector has one value, it pads all sides of the image with that value.
975
+ /// If the vector has two values, it pads left and right with the first and
976
+ /// top and bottom with the second value.
977
+ /// If the vector has four values, it pads left, top, right, and bottom with
978
+ /// those values respectively.
979
+ /// \param[in] pad_if_needed A boolean indicating that whether to pad the image
980
+ /// if either side is smaller than the given output size.
981
+ /// \param[in] fill_value A vector representing the pixel intensity of the borders if the padding_mode is
982
+ /// BorderType.kConstant. If 1 value is provided, it is used for all RGB channels.
983
+ /// If 3 values are provided, it is used to fill R, G, B channels respectively.
984
+ /// \param[in] padding_mode The method of padding (default=BorderType::kConstant).It can be any of
985
+ /// [BorderType::kConstant, BorderType::kEdge, BorderType::kReflect, BorderType::kSymmetric].
986
+ /// - BorderType::kConstant, Fill the border with constant values.
987
+ /// - BorderType::kEdge, Fill the border with the last value on the edge.
988
+ /// - BorderType::kReflect, Reflect the values on the edge omitting the last value of edge.
989
+ /// - BorderType::kSymmetric, Reflect the values on the edge repeating the last value of edge.
990
+ /// \note If the input image is more than one, then make sure that the image size is the same.
991
+ /// \par Example
992
+ /// \code
993
+ /// /* Define operations */
994
+ /// auto decode_op = vision::Decode();
995
+ /// auto random_crop_op = vision::RandomCrop({255, 255}, {10, 10, 10, 10});
996
+ ///
997
+ /// /* dataset is an instance of Dataset object */
998
+ /// dataset = dataset->Map({decode_op, random_crop_op}, // operations
999
+ /// {"image"}); // input columns
1000
+ /// \endcode
1001
+ explicit RandomCrop(const std::vector<int32_t> &size, const std::vector<int32_t> &padding = {0, 0, 0, 0},
1002
+ bool pad_if_needed = false, const std::vector<uint8_t> &fill_value = {0, 0, 0},
1003
+ BorderType padding_mode = BorderType::kConstant);
1004
+
1005
+ /// \brief Destructor.
1006
+ ~RandomCrop() override = default;
1007
+
1008
+ protected:
1009
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1010
+ /// \return Shared pointer to TensorOperation object.
1011
+ std::shared_ptr<TensorOperation> Parse() override;
1012
+
1013
+ private:
1014
+ struct Data;
1015
+ std::shared_ptr<Data> data_;
1016
+ };
1017
+
1018
+ /// \brief Equivalent to RandomResizedCrop TensorTransform, but crop the image before decoding.
1019
+ class DATASET_API RandomCropDecodeResize final : public TensorTransform {
1020
+ public:
1021
+ /// \brief Constructor.
1022
+ /// \param[in] size A vector representing the output size of the cropped image.
1023
+ /// If the size is a single value, a squared crop of size (size, size) is returned.
1024
+ /// If the size has 2 values, it should be (height, width).
1025
+ /// \param[in] scale Range [min, max) of respective size of the
1026
+ /// original size to be cropped (default=(0.08, 1.0)).
1027
+ /// \param[in] ratio Range [min, max) of aspect ratio to be
1028
+ /// cropped (default=(3. / 4., 4. / 3.)).
1029
+ /// \param[in] interpolation An enum for the mode of interpolation.
1030
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
1031
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
1032
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
1033
+ /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation.
1034
+ /// - InterpolationMode::kCubicPil, Interpolation method is bicubic interpolation like implemented in pillow.
1035
+ /// \param[in] max_attempts The maximum number of attempts to propose a valid crop_area (default=10).
1036
+ /// If exceeded, fall back to use center_crop instead.
1037
+ /// \par Example
1038
+ /// \code
1039
+ /// /* Define operations */
1040
+ /// auto random_op = vision::RandomCropDecodeResize({255, 255}, {0.1, 0.5});
1041
+ ///
1042
+ /// /* dataset is an instance of Dataset object */
1043
+ /// dataset = dataset->Map({random_op}, // operations
1044
+ /// {"image"}); // input columns
1045
+ /// \endcode
1046
+ explicit RandomCropDecodeResize(const std::vector<int32_t> &size, const std::vector<float> &scale = {0.08, 1.0},
1047
+ const std::vector<float> &ratio = {3. / 4., 4. / 3.},
1048
+ InterpolationMode interpolation = InterpolationMode::kLinear,
1049
+ int32_t max_attempts = 10);
1050
+
1051
+ /// \brief Destructor.
1052
+ ~RandomCropDecodeResize() override = default;
1053
+
1054
+ protected:
1055
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1056
+ /// \return Shared pointer to TensorOperation object.
1057
+ std::shared_ptr<TensorOperation> Parse() override;
1058
+
1059
+ private:
1060
+ struct Data;
1061
+ std::shared_ptr<Data> data_;
1062
+ };
1063
+
1064
+ /// \brief Crop the input image at a random location and adjust bounding boxes accordingly.
1065
+ /// If the cropped area is out of bbox, the returned bbox will be empty.
1066
+ class DATASET_API RandomCropWithBBox final : public TensorTransform {
1067
+ public:
1068
+ /// \brief Constructor.
1069
+ /// \param[in] size A vector representing the output size of the cropped image.
1070
+ /// If the size is a single value, a squared crop of size (size, size) is returned.
1071
+ /// If the size has 2 values, it should be (height, width).
1072
+ /// \param[in] padding A vector representing the number of pixels to pad the image
1073
+ /// If the vector has one value, it pads all sides of the image with that value.
1074
+ /// If the vector has two values, it pads left and right with the first and
1075
+ /// top and bottom with the second value.
1076
+ /// If the vector has four values, it pads left, top, right, and bottom with
1077
+ /// those values respectively.
1078
+ /// \param[in] pad_if_needed A boolean indicating that whether to pad the image
1079
+ /// if either side is smaller than the given output size.
1080
+ /// \param[in] fill_value A vector representing the pixel intensity of the borders. Only valid
1081
+ /// if the padding_mode is BorderType.kConstant. If 1 value is provided, it is used for all
1082
+ /// RGB channels. If 3 values are provided, it is used to fill R, G, B channels respectively.
1083
+ /// \param[in] padding_mode The method of padding (default=BorderType::kConstant).It can be any of
1084
+ /// [BorderType::kConstant, BorderType::kEdge, BorderType::kReflect, BorderType::kSymmetric].
1085
+ /// - BorderType::kConstant, Fill the border with constant values.
1086
+ /// - BorderType::kEdge, Fill the border with the last value on the edge.
1087
+ /// - BorderType::kReflect, Reflect the values on the edge omitting the last value of edge.
1088
+ /// - BorderType::kSymmetric, Reflect the values on the edge repeating the last value of edge.
1089
+ /// \par Example
1090
+ /// \code
1091
+ /// /* Define operations */
1092
+ /// auto random_op = vision::RandomCropWithBBox({224, 224}, {0, 0, 0, 0});
1093
+ ///
1094
+ /// /* dataset is an instance of Dataset object */
1095
+ /// dataset = dataset->Map({random_op}, // operations
1096
+ /// {"image", "bbox"}); // input columns
1097
+ /// \endcode
1098
+ explicit RandomCropWithBBox(const std::vector<int32_t> &size, const std::vector<int32_t> &padding = {0, 0, 0, 0},
1099
+ bool pad_if_needed = false, const std::vector<uint8_t> &fill_value = {0, 0, 0},
1100
+ BorderType padding_mode = BorderType::kConstant);
1101
+
1102
+ /// \brief Destructor.
1103
+ ~RandomCropWithBBox() override = default;
1104
+
1105
+ protected:
1106
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1107
+ /// \return Shared pointer to TensorOperation object.
1108
+ std::shared_ptr<TensorOperation> Parse() override;
1109
+
1110
+ private:
1111
+ struct Data;
1112
+ std::shared_ptr<Data> data_;
1113
+ };
1114
+
1115
+ /// \brief Randomly apply histogram equalization on the input image with a given probability.
1116
+ class DATASET_API RandomEqualize final : public TensorTransform {
1117
+ public:
1118
+ /// \brief Constructor.
1119
+ /// \param[in] prob A float representing the probability of equalization, which
1120
+ /// must be in range of [0, 1] (default=0.5).
1121
+ /// \par Example
1122
+ /// \code
1123
+ /// /* Define operations */
1124
+ /// auto decode_op = vision::Decode();
1125
+ /// auto random_op = vision::RandomEqualize(0.5);
1126
+ ///
1127
+ /// /* dataset is an instance of Dataset object */
1128
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1129
+ /// {"image"}); // input columns
1130
+ /// \endcode
1131
+ explicit RandomEqualize(float prob = 0.5);
1132
+
1133
+ /// \brief Destructor.
1134
+ ~RandomEqualize() override = default;
1135
+
1136
+ protected:
1137
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1138
+ /// \return Shared pointer to TensorOperation object.
1139
+ std::shared_ptr<TensorOperation> Parse() override;
1140
+
1141
+ private:
1142
+ struct Data;
1143
+ std::shared_ptr<Data> data_;
1144
+ };
1145
+
1146
+ /// \brief Randomly flip the input image horizontally with a given probability.
1147
+ class DATASET_API RandomHorizontalFlip final : public TensorTransform {
1148
+ public:
1149
+ /// \brief Constructor.
1150
+ /// \param[in] prob A float representing the probability of flip.
1151
+ /// \par Example
1152
+ /// \code
1153
+ /// /* Define operations */
1154
+ /// auto decode_op = vision::Decode();
1155
+ /// auto random_op = vision::RandomHorizontalFlip(0.8);
1156
+ ///
1157
+ /// /* dataset is an instance of Dataset object */
1158
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1159
+ /// {"image"}); // input columns
1160
+ /// \endcode
1161
+ explicit RandomHorizontalFlip(float prob = 0.5);
1162
+
1163
+ /// \brief Destructor.
1164
+ ~RandomHorizontalFlip() override = default;
1165
+
1166
+ protected:
1167
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1168
+ /// \return Shared pointer to TensorOperation object.
1169
+ std::shared_ptr<TensorOperation> Parse() override;
1170
+
1171
+ private:
1172
+ struct Data;
1173
+ std::shared_ptr<Data> data_;
1174
+ };
1175
+
1176
+ /// \brief Randomly flip the input image horizontally with a given probability and adjust bounding boxes accordingly.
1177
+ class DATASET_API RandomHorizontalFlipWithBBox final : public TensorTransform {
1178
+ public:
1179
+ /// \brief Constructor.
1180
+ /// \param[in] prob A float representing the probability of flip.
1181
+ /// \par Example
1182
+ /// \code
1183
+ /// /* Define operations */
1184
+ /// auto random_op = vision::RandomHorizontalFlipWithBBox(1.0);
1185
+ ///
1186
+ /// /* dataset is an instance of Dataset object */
1187
+ /// dataset = dataset->Map({random_op}, // operations
1188
+ /// {"image", "bbox"}); // input columns
1189
+ /// \endcode
1190
+ explicit RandomHorizontalFlipWithBBox(float prob = 0.5);
1191
+
1192
+ /// \brief Destructor.
1193
+ ~RandomHorizontalFlipWithBBox() override = default;
1194
+
1195
+ protected:
1196
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1197
+ /// \return Shared pointer to TensorOperation object.
1198
+ std::shared_ptr<TensorOperation> Parse() override;
1199
+
1200
+ private:
1201
+ struct Data;
1202
+ std::shared_ptr<Data> data_;
1203
+ };
1204
+
1205
+ /// \brief Randomly invert the input image with a given probability.
1206
+ class DATASET_API RandomInvert final : public TensorTransform {
1207
+ public:
1208
+ /// \brief Constructor.
1209
+ /// \param[in] prob A float representing the probability of the image being inverted, which
1210
+ /// must be in range of [0, 1] (default=0.5).
1211
+ /// \par Example
1212
+ /// \code
1213
+ /// /* Define operations */
1214
+ /// auto decode_op = vision::Decode();
1215
+ /// auto random_op = vision::RandomInvert(0.8);
1216
+ ///
1217
+ /// /* dataset is an instance of Dataset object */
1218
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1219
+ /// {"image"}); // input columns
1220
+ /// \endcode
1221
+ explicit RandomInvert(float prob = 0.5);
1222
+
1223
+ /// \brief Destructor.
1224
+ ~RandomInvert() override = default;
1225
+
1226
+ protected:
1227
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1228
+ /// \return Shared pointer to TensorOperation object.
1229
+ std::shared_ptr<TensorOperation> Parse() override;
1230
+
1231
+ private:
1232
+ struct Data;
1233
+ std::shared_ptr<Data> data_;
1234
+ };
1235
+
1236
+ /// \brief Add AlexNet-style PCA-based noise to an image.
1237
+ class DATASET_API RandomLighting final : public TensorTransform {
1238
+ public:
1239
+ /// \brief Constructor.
1240
+ /// \param[in] alpha A float representing the intensity of the image (default=0.05).
1241
+ /// \par Example
1242
+ /// \code
1243
+ /// /* Define operations */
1244
+ /// auto decode_op = vision::Decode();
1245
+ /// auto random_op = vision::RandomLighting(0.1);
1246
+ ///
1247
+ /// /* dataset is an instance of Dataset object */
1248
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1249
+ /// {"image"}); // input columns
1250
+ /// \endcode
1251
+ explicit RandomLighting(float alpha = 0.05);
1252
+
1253
+ /// \brief Destructor.
1254
+ ~RandomLighting() override = default;
1255
+
1256
+ protected:
1257
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1258
+ /// \return Shared pointer to TensorOperation object.
1259
+ std::shared_ptr<TensorOperation> Parse() override;
1260
+
1261
+ private:
1262
+ struct Data;
1263
+ std::shared_ptr<Data> data_;
1264
+ };
1265
+
1266
+ /// \brief Reduce the number of bits for each color channel randomly.
1267
+ class DATASET_API RandomPosterize final : public TensorTransform {
1268
+ public:
1269
+ /// \brief Constructor.
1270
+ /// \param[in] bit_range Range of random posterize to compress image.
1271
+ /// uint8_t vector representing the minimum and maximum bit in range of [1,8] (Default={4, 8}).
1272
+ /// \par Example
1273
+ /// \code
1274
+ /// /* Define operations */
1275
+ /// auto decode_op = vision::Decode();
1276
+ /// auto random_op = vision::RandomPosterize({4, 8});
1277
+ ///
1278
+ /// /* dataset is an instance of Dataset object */
1279
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1280
+ /// {"image"}); // input columns
1281
+ /// \endcode
1282
+ explicit RandomPosterize(const std::vector<uint8_t> &bit_range = {4, 8});
1283
+
1284
+ /// \brief Destructor.
1285
+ ~RandomPosterize() override = default;
1286
+
1287
+ protected:
1288
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1289
+ /// \return Shared pointer to TensorOperation object.
1290
+ std::shared_ptr<TensorOperation> Parse() override;
1291
+
1292
+ private:
1293
+ struct Data;
1294
+ std::shared_ptr<Data> data_;
1295
+ };
1296
+
1297
+ /// \brief Resize the input image using a randomly selected interpolation mode.
1298
+ class DATASET_API RandomResize final : public TensorTransform {
1299
+ public:
1300
+ /// \brief Constructor.
1301
+ /// \param[in] size A vector representing the output size of the resized image.
1302
+ /// If the size is a single value, the smaller edge of the image will be resized to this value with
1303
+ /// the same image aspect ratio. If the size has 2 values, it should be (height, width).
1304
+ /// \par Example
1305
+ /// \code
1306
+ /// /* Define operations */
1307
+ /// auto decode_op = vision::Decode();
1308
+ /// auto random_op = vision::RandomResize({32, 32});
1309
+ ///
1310
+ /// /* dataset is an instance of Dataset object */
1311
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1312
+ /// {"image"}); // input columns
1313
+ /// \endcode
1314
+ explicit RandomResize(const std::vector<int32_t> &size);
1315
+
1316
+ /// \brief Destructor.
1317
+ ~RandomResize() override = default;
1318
+
1319
+ protected:
1320
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1321
+ /// \return Shared pointer to TensorOperation object.
1322
+ std::shared_ptr<TensorOperation> Parse() override;
1323
+
1324
+ private:
1325
+ struct Data;
1326
+ std::shared_ptr<Data> data_;
1327
+ };
1328
+
1329
+ /// \brief Resize the input image using a randomly selected interpolation mode and adjust
1330
+ /// bounding boxes accordingly.
1331
+ class DATASET_API RandomResizeWithBBox final : public TensorTransform {
1332
+ public:
1333
+ /// \brief Constructor.
1334
+ /// \param[in] size A vector representing the output size of the resized image.
1335
+ /// If the size is a single value, the smaller edge of the image will be resized to this value with
1336
+ /// the same image aspect ratio. If the size has 2 values, it should be (height, width).
1337
+ /// \par Example
1338
+ /// \code
1339
+ /// /* Define operations */
1340
+ /// auto random_op = vision::RandomResizeWithBBox({50, 50});
1341
+ ///
1342
+ /// /* dataset is an instance of Dataset object */
1343
+ /// dataset = dataset->Map({random_op}, // operations
1344
+ /// {"image", "bbox"}); // input columns
1345
+ /// \endcode
1346
+ explicit RandomResizeWithBBox(const std::vector<int32_t> &size);
1347
+
1348
+ /// \brief Destructor.
1349
+ ~RandomResizeWithBBox() override = default;
1350
+
1351
+ protected:
1352
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1353
+ /// \return Shared pointer to TensorOperation object.
1354
+ std::shared_ptr<TensorOperation> Parse() override;
1355
+
1356
+ private:
1357
+ struct Data;
1358
+ std::shared_ptr<Data> data_;
1359
+ };
1360
+
1361
+ /// \brief Crop the input image to a random size and aspect ratio.
1362
+ class DATASET_API RandomResizedCrop final : public TensorTransform {
1363
+ public:
1364
+ /// \brief Constructor.
1365
+ /// \param[in] size A vector representing the output size of the cropped image.
1366
+ /// If the size is a single value, a squared crop of size (size, size) is returned.
1367
+ /// If the size has 2 values, it should be (height, width).
1368
+ /// \param[in] scale Range [min, max) of respective size of the original
1369
+ /// size to be cropped (default=(0.08, 1.0)).
1370
+ /// \param[in] ratio Range [min, max) of aspect ratio to be cropped
1371
+ /// (default=(3. / 4., 4. / 3.)).
1372
+ /// \param[in] interpolation Image interpolation mode (default=InterpolationMode::kLinear).
1373
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
1374
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
1375
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
1376
+ /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation.
1377
+ /// - InterpolationMode::kCubicPil, Interpolation method is bicubic interpolation like implemented in pillow.
1378
+ /// \param[in] max_attempts The maximum number of attempts to propose a valid.
1379
+ /// crop_area (default=10). If exceeded, fall back to use center_crop instead.
1380
+ /// \note If the input image is more than one, then make sure that the image size is the same.
1381
+ /// \par Example
1382
+ /// \code
1383
+ /// /* Define operations */
1384
+ /// auto decode_op = vision::Decode();
1385
+ /// auto random_op = vision::RandomResizedCrop({32, 32}, {0.08, 1.0});
1386
+ ///
1387
+ /// /* dataset is an instance of Dataset object */
1388
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1389
+ /// {"image"}); // input columns
1390
+ /// \endcode
1391
+ explicit RandomResizedCrop(const std::vector<int32_t> &size, const std::vector<float> &scale = {0.08, 1.0},
1392
+ const std::vector<float> &ratio = {3. / 4., 4. / 3.},
1393
+ InterpolationMode interpolation = InterpolationMode::kLinear, int32_t max_attempts = 10);
1394
+
1395
+ /// \brief Destructor.
1396
+ ~RandomResizedCrop() override = default;
1397
+
1398
+ protected:
1399
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1400
+ /// \return Shared pointer to TensorOperation object.
1401
+ std::shared_ptr<TensorOperation> Parse() override;
1402
+
1403
+ private:
1404
+ struct Data;
1405
+ std::shared_ptr<Data> data_;
1406
+ };
1407
+
1408
+ /// \brief Crop the input image to a random size and aspect ratio.
1409
+ /// If cropped area is out of bbox, the return bbox will be empty.
1410
+ class DATASET_API RandomResizedCropWithBBox final : public TensorTransform {
1411
+ public:
1412
+ /// \brief Constructor.
1413
+ /// \param[in] size A vector representing the output size of the cropped image.
1414
+ /// If the size is a single value, a squared crop of size (size, size) is returned.
1415
+ /// If the size has 2 values, it should be (height, width).
1416
+ /// \param[in] scale Range [min, max) of respective size of the original
1417
+ /// size to be cropped (default=(0.08, 1.0)).
1418
+ /// \param[in] ratio Range [min, max) of aspect ratio to be cropped
1419
+ /// (default=(3. / 4., 4. / 3.)).
1420
+ /// \param[in] interpolation Image interpolation mode (default=InterpolationMode::kLinear).
1421
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
1422
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
1423
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
1424
+ /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation.
1425
+ /// - InterpolationMode::kCubicPil, Interpolation method is bicubic interpolation like implemented in pillow.
1426
+ /// \param[in] max_attempts The maximum number of attempts to propose a valid
1427
+ /// crop_area (default=10). If exceeded, fall back to use center_crop instead.
1428
+ /// \par Example
1429
+ /// \code
1430
+ /// /* Define operations */
1431
+ /// auto random_op = vision::RandomResizedCropWithBBox({50, 50}, {0.05, 0.5}, {0.2, 0.4},
1432
+ /// InterpolationMode::kCubic);
1433
+ ///
1434
+ /// /* dataset is an instance of Dataset object */
1435
+ /// dataset = dataset->Map({random_op}, // operations
1436
+ /// {"image", "bbox"}); // input columns
1437
+ /// \endcode
1438
+ explicit RandomResizedCropWithBBox(const std::vector<int32_t> &size, const std::vector<float> &scale = {0.08, 1.0},
1439
+ const std::vector<float> &ratio = {3. / 4., 4. / 3.},
1440
+ InterpolationMode interpolation = InterpolationMode::kLinear,
1441
+ int32_t max_attempts = 10);
1442
+
1443
+ /// \brief Destructor.
1444
+ ~RandomResizedCropWithBBox() override = default;
1445
+
1446
+ protected:
1447
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1448
+ /// \return Shared pointer to TensorOperation object.
1449
+ std::shared_ptr<TensorOperation> Parse() override;
1450
+
1451
+ private:
1452
+ struct Data;
1453
+ std::shared_ptr<Data> data_;
1454
+ };
1455
+
1456
+ /// \brief Rotate the image according to parameters.
1457
+ class DATASET_API RandomRotation final : public TensorTransform {
1458
+ public:
1459
+ /// \brief Constructor.
1460
+ /// \param[in] degrees A float vector of size 2, representing the starting and ending degrees.
1461
+ /// \param[in] resample An enum for the mode of interpolation.
1462
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
1463
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
1464
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
1465
+ /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation.
1466
+ /// - InterpolationMode::kCubicPil, Interpolation method is bicubic interpolation like implemented in pillow.
1467
+ /// \param[in] expand A boolean representing whether the image is expanded after rotation.
1468
+ /// \param[in] center A float vector of size 2 or empty, representing the x and y center of rotation
1469
+ /// or the center of the image.
1470
+ /// \param[in] fill_value A vector representing the value to fill the area outside the transform
1471
+ /// in the output image. If 1 value is provided, it is used for all RGB channels.
1472
+ /// If 3 values are provided, it is used to fill R, G, B channels respectively.
1473
+ /// \par Example
1474
+ /// \code
1475
+ /// /* Define operations */
1476
+ /// auto decode_op = vision::Decode();
1477
+ /// auto random_op = vision::RandomRotation({30, 60}, InterpolationMode::kNearestNeighbour);
1478
+ ///
1479
+ /// /* dataset is an instance of Dataset object */
1480
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1481
+ /// {"image"}); // input columns
1482
+ /// \endcode
1483
+ explicit RandomRotation(const std::vector<float> &degrees,
1484
+ InterpolationMode resample = InterpolationMode::kNearestNeighbour, bool expand = false,
1485
+ const std::vector<float> &center = {}, const std::vector<uint8_t> &fill_value = {0, 0, 0});
1486
+
1487
+ /// \brief Destructor.
1488
+ ~RandomRotation() override = default;
1489
+
1490
+ protected:
1491
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1492
+ /// \return Shared pointer to TensorOperation object.
1493
+ std::shared_ptr<TensorOperation> Parse() override;
1494
+
1495
+ private:
1496
+ struct Data;
1497
+ std::shared_ptr<Data> data_;
1498
+ };
1499
+
1500
+ /// \brief Choose a random sub-policy from a list to be applied on the input image. A sub-policy is a list of tuples
1501
+ /// (operation, prob), where operation is a TensorTransform operation and prob is the probability that this
1502
+ /// operation will be applied. Once a sub-policy is selected, each operation within the sub-policy with be
1503
+ /// applied in sequence according to its probability.
1504
+ class DATASET_API RandomSelectSubpolicy final : public TensorTransform {
1505
+ public:
1506
+ /// \brief Constructor.
1507
+ /// \param[in] policy Vector of sub-policies to choose from, in which the TensorTransform objects are raw pointers.
1508
+ /// \par Example
1509
+ /// \code
1510
+ /// /* Define operations */
1511
+ /// auto invert_op(new vision::Invert());
1512
+ /// auto equalize_op(new vision::Equalize());
1513
+ ///
1514
+ /// std::vector<std::pair<TensorTransform *, double>> policy = {{invert_op, 0.5}, {equalize_op, 0.4}};
1515
+ /// vision::RandomSelectSubpolicy random_select_subpolicy_op = vision::RandomSelectSubpolicy({policy});
1516
+ ///
1517
+ /// /* dataset is an instance of Dataset object */
1518
+ /// dataset = dataset->Map({random_select_subpolicy_op}, // operations
1519
+ /// {"image"}); // input columns
1520
+ /// \endcode
1521
+ explicit RandomSelectSubpolicy(const std::vector<std::vector<std::pair<TensorTransform *, double>>> &policy);
1522
+
1523
+ /// \brief Constructor.
1524
+ /// \param[in] policy Vector of sub-policies to choose from, in which the TensorTransform objects are shared pointers.
1525
+ /// \par Example
1526
+ /// \code
1527
+ /// /* Define operations */
1528
+ /// std::shared_ptr<TensorTransform> invert_op(new vision::Invert());
1529
+ /// std::shared_ptr<TensorTransform> equalize_op(new vision::Equalize());
1530
+ /// std::shared_ptr<TensorTransform> resize_op(new vision::Resize({15, 15}));
1531
+ ///
1532
+ /// auto random_select_subpolicy_op = vision::RandomSelectSubpolicy({
1533
+ /// {{invert_op, 0.5}, {equalize_op, 0.4}},
1534
+ /// {{resize_op, 0.1}}
1535
+ /// });
1536
+ ///
1537
+ /// /* dataset is an instance of Dataset object */
1538
+ /// dataset = dataset->Map({random_select_subpolicy_op}, // operations
1539
+ /// {"image"}); // input columns
1540
+ /// \endcode
1541
+ explicit RandomSelectSubpolicy(
1542
+ const std::vector<std::vector<std::pair<std::shared_ptr<TensorTransform>, double>>> &policy);
1543
+
1544
+ /// \brief Constructor.
1545
+ /// \param[in] policy Vector of sub-policies to choose from, in which the TensorTransform objects are object pointers.
1546
+ /// \par Example
1547
+ /// \code
1548
+ /// /* Define operations */
1549
+ /// vision::Invert invert_op = vision::Invert();
1550
+ /// vision::Equalize equalize_op = vision::Equalize();
1551
+ /// vision::Resize resize_op = vision::Resize({15, 15});
1552
+ ///
1553
+ /// auto random_select_subpolicy_op = vision::RandomSelectSubpolicy({
1554
+ /// {{invert_op, 0.5}, {equalize_op, 0.4}},
1555
+ /// {{resize_op, 0.1}}
1556
+ /// });
1557
+ ///
1558
+ /// /* dataset is an instance of Dataset object */
1559
+ /// dataset = dataset->Map({random_select_subpolicy_op}, // operations
1560
+ /// {"image"}); // input columns
1561
+ /// \endcode
1562
+ explicit RandomSelectSubpolicy(
1563
+ const std::vector<std::vector<std::pair<std::reference_wrapper<TensorTransform>, double>>> &policy);
1564
+
1565
+ /// \brief Destructor.
1566
+ ~RandomSelectSubpolicy() override = default;
1567
+
1568
+ protected:
1569
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1570
+ /// \return Shared pointer to TensorOperation object.
1571
+ std::shared_ptr<TensorOperation> Parse() override;
1572
+
1573
+ private:
1574
+ struct Data;
1575
+ std::shared_ptr<Data> data_;
1576
+ };
1577
+
1578
+ /// \brief Adjust the sharpness of the input image by a fixed or random degree.
1579
+ class DATASET_API RandomSharpness final : public TensorTransform {
1580
+ public:
1581
+ /// \brief Constructor.
1582
+ /// \param[in] degrees A float vector of size 2, representing the range of random sharpness
1583
+ /// adjustment degrees. It should be in (min, max) format. If min=max, then it is a
1584
+ /// single fixed magnitude operation (default = (0.1, 1.9)).
1585
+ /// \par Example
1586
+ /// \code
1587
+ /// /* Define operations */
1588
+ /// auto decode_op = vision::Decode();
1589
+ /// auto random_op = vision::RandomSharpness({0.1, 1.5});
1590
+ ///
1591
+ /// /* dataset is an instance of Dataset object */
1592
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1593
+ /// {"image"}); // input columns
1594
+ /// \endcode
1595
+ explicit RandomSharpness(const std::vector<float> &degrees = {0.1, 1.9});
1596
+
1597
+ /// \brief Destructor.
1598
+ ~RandomSharpness() override = default;
1599
+
1600
+ protected:
1601
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1602
+ /// \return Shared pointer to TensorOperation object.
1603
+ std::shared_ptr<TensorOperation> Parse() override;
1604
+
1605
+ private:
1606
+ struct Data;
1607
+ std::shared_ptr<Data> data_;
1608
+ };
1609
+
1610
+ /// \brief Invert pixels randomly within a specified range.
1611
+ class DATASET_API RandomSolarize final : public TensorTransform {
1612
+ public:
1613
+ /// \brief Constructor.
1614
+ /// \param[in] threshold A vector with two elements specifying the pixel range to invert.
1615
+ /// Threshold values should always be in (min, max) format.
1616
+ /// If min=max, it will to invert all pixels above min(max).
1617
+ /// \par Example
1618
+ /// \code
1619
+ /// /* Define operations */
1620
+ /// auto decode_op = vision::Decode();
1621
+ /// auto random_op = vision::RandomSharpness({0, 255});
1622
+ ///
1623
+ /// /* dataset is an instance of Dataset object */
1624
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1625
+ /// {"image"}); // input columns
1626
+ /// \endcode
1627
+ explicit RandomSolarize(const std::vector<uint8_t> &threshold = {0, 255});
1628
+
1629
+ /// \brief Destructor.
1630
+ ~RandomSolarize() override = default;
1631
+
1632
+ protected:
1633
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1634
+ /// \return Shared pointer to TensorOperation object.
1635
+ std::shared_ptr<TensorOperation> Parse() override;
1636
+
1637
+ private:
1638
+ struct Data;
1639
+ std::shared_ptr<Data> data_;
1640
+ };
1641
+
1642
+ /// \brief Randomly flip the input image vertically with a given probability.
1643
+ class DATASET_API RandomVerticalFlip final : public TensorTransform {
1644
+ public:
1645
+ /// \brief Constructor.
1646
+ /// \param[in] prob A float representing the probability of flip.
1647
+ /// \par Example
1648
+ /// \code
1649
+ /// /* Define operations */
1650
+ /// auto decode_op = vision::Decode();
1651
+ /// auto random_op = vision::RandomVerticalFlip();
1652
+ ///
1653
+ /// /* dataset is an instance of Dataset object */
1654
+ /// dataset = dataset->Map({decode_op, random_op}, // operations
1655
+ /// {"image"}); // input columns
1656
+ /// \endcode
1657
+ explicit RandomVerticalFlip(float prob = 0.5);
1658
+
1659
+ /// \brief Destructor.
1660
+ ~RandomVerticalFlip() override = default;
1661
+
1662
+ protected:
1663
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1664
+ /// \return Shared pointer to TensorOperation object.
1665
+ std::shared_ptr<TensorOperation> Parse() override;
1666
+
1667
+ private:
1668
+ struct Data;
1669
+ std::shared_ptr<Data> data_;
1670
+ };
1671
+
1672
+ /// \brief Randomly flip the input image vertically with a given probability and adjust bounding boxes accordingly.
1673
+ class DATASET_API RandomVerticalFlipWithBBox final : public TensorTransform {
1674
+ public:
1675
+ /// \brief Constructor.
1676
+ /// \param[in] prob A float representing the probability of flip.
1677
+ /// \par Example
1678
+ /// \code
1679
+ /// /* Define operations */
1680
+ /// auto random_op = vision::RandomVerticalFlipWithBBox();
1681
+ ///
1682
+ /// /* dataset is an instance of Dataset object */
1683
+ /// dataset = dataset->Map({random_op}, // operations
1684
+ /// {"image", "bbox"}); // input columns
1685
+ /// \endcode
1686
+ explicit RandomVerticalFlipWithBBox(float prob = 0.5);
1687
+
1688
+ /// \brief Destructor.
1689
+ ~RandomVerticalFlipWithBBox() override = default;
1690
+
1691
+ protected:
1692
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1693
+ /// \return Shared pointer to TensorOperation object.
1694
+ std::shared_ptr<TensorOperation> Parse() override;
1695
+
1696
+ private:
1697
+ struct Data;
1698
+ std::shared_ptr<Data> data_;
1699
+ };
1700
+
1701
+ /// \brief Reads a file in binary mode.
1702
+ /// \param[in] filename The path to the file to be read.
1703
+ /// \param[out] output The binary data.
1704
+ /// \return The status code.
1705
+ Status DATASET_API ReadFile(const std::string &filename, mindspore::MSTensor *output);
1706
+
1707
+ /// \brief Read a image file and decode it into one or three channels data.
1708
+ /// \param[in] filename The path to the file to be read.
1709
+ /// \param[out] output The Tensor data.
1710
+ /// \param[in] mode The read mode used for optionally converting the image, can be one of
1711
+ /// [ImageReadMode::kUNCHANGED, ImageReadMode::kGRAYSCALE, ImageReadMode::kCOLOR]. Default:
1712
+ /// ImageReadMode::kUNCHANGED.
1713
+ /// - ImageReadMode::kUNCHANGED, remain the output in the original format.
1714
+ /// - ImageReadMode::kGRAYSCALE, convert the output into one channel grayscale data.
1715
+ /// - ImageReadMode::kCOLOR, convert the output into three channels RGB color data.
1716
+ /// \return The status code.
1717
+ Status DATASET_API ReadImage(const std::string &filename, mindspore::MSTensor *output,
1718
+ ImageReadMode mode = ImageReadMode::kUNCHANGED);
1719
+
1720
+ /// \brief Read the video, audio, metadata from a video file. It supports AVI, H264, H265, MOV, MP4, WMV file formats.
1721
+ /// \param[in] filename The path to the videoe file to be read.
1722
+ /// \param[out] video_output The video frames of the video file.
1723
+ /// \param[out] audio_output The audio frames of the video file.
1724
+ /// \param[out] metadata_output The metadata contains video_fps, audio_fps.
1725
+ /// \param[in] start_pts The start presentation timestamp of the video. Default: 0.0.
1726
+ /// \param[in] end_pts The end presentation timestamp of the video. Default: 2147483647.0.
1727
+ /// \param[in] pts_unit The unit for the timestamps, can be one of ["pts", "sec"]. Default: "pts".
1728
+ /// \return The status code.
1729
+ Status DATASET_API ReadVideo(const std::string &filename, mindspore::MSTensor *video_output,
1730
+ mindspore::MSTensor *audio_output, std::map<std::string, std::string> *metadata_output,
1731
+ float start_pts = 0.0, float end_pts = 2147483647.0, const std::string &pts_unit = "pts");
1732
+
1733
+ /// \brief Read the timestamps and frame rate of a video file. It supports AVI, H264, H265, MOV, MP4, WMV files.
1734
+ /// \param[in] filename The path to the videoe file to be read.
1735
+ /// \param[out] output The tuple(video_timestamps, video_fps) of the video.
1736
+ /// \param[in] pts_unit The unit for the timestamps, can be one of ["pts", "sec"]. Default: "pts".
1737
+ /// \return The status code.
1738
+ Status DATASET_API ReadVideoTimestamps(const std::string &filename, std::tuple<std::vector<float>, float> *output,
1739
+ const std::string &pts_unit = "pts");
1740
+
1741
+ /// \brief Crop the given image and zoom to the specified size.
1742
+ class DATASET_API ResizedCrop final : public TensorTransform {
1743
+ public:
1744
+ /// \brief Constructor.
1745
+ /// \param[in] top Horizontal ordinate of the upper left corner of the crop image.
1746
+ /// \param[in] left Vertical ordinate of the upper left corner of the crop image.
1747
+ /// \param[in] height Height of cropped image.
1748
+ /// \param[in] width Width of cropped image.
1749
+ /// \param[in] size A vector representing the output size of the image.
1750
+ /// If the size is a single value, a squared resized of size (size, size) is returned.
1751
+ /// If the size has 2 values, it should be (height, width).
1752
+ /// \param[in] interpolation Image interpolation mode. Default: InterpolationMode::kLinear.
1753
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
1754
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
1755
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
1756
+ /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation.
1757
+ /// - InterpolationMode::kCubicPil, Interpolation method is bicubic interpolation like implemented in pillow.
1758
+ /// \note If the input image is more than one, then make sure that the image size is the same.
1759
+ /// \par Example
1760
+ /// \code
1761
+ /// /* Define operations */
1762
+ /// auto decode_op = vision::Decode();
1763
+ /// auto resized_crop_op = vision::ResizedCrop(128, 128, 256, 256, {128, 128});
1764
+ ///
1765
+ /// /* dataset is an instance of Dataset object */
1766
+ /// dataset = dataset->Map({decode_op, resized_crop_op}, // operations
1767
+ /// {"image"}); // input columns
1768
+ /// \endcode
1769
+ ResizedCrop(int32_t top, int32_t left, int32_t height, int32_t width, const std::vector<int32_t> &size,
1770
+ InterpolationMode interpolation = InterpolationMode::kLinear);
1771
+
1772
+ /// \brief Destructor.
1773
+ ~ResizedCrop() override = default;
1774
+
1775
+ protected:
1776
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1777
+ /// \return Shared pointer to TensorOperation object.
1778
+ std::shared_ptr<TensorOperation> Parse() override;
1779
+
1780
+ private:
1781
+ struct Data;
1782
+ std::shared_ptr<Data> data_;
1783
+ };
1784
+
1785
+ /// \brief Resize the input image to the given size and adjust bounding boxes accordingly.
1786
+ class DATASET_API ResizeWithBBox final : public TensorTransform {
1787
+ public:
1788
+ /// \brief Constructor.
1789
+ /// \param[in] size The output size of the resized image.
1790
+ /// If the size is an integer, smaller edge of the image will be resized to this value with the same image aspect
1791
+ /// ratio. If the size is a sequence of length 2, it should be (height, width).
1792
+ /// \param[in] interpolation An enum for the mode of interpolation (default=InterpolationMode::kLinear).
1793
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
1794
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
1795
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
1796
+ /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation.
1797
+ /// - InterpolationMode::kCubicPil, Interpolation method is bicubic interpolation like implemented in pillow.
1798
+ /// \par Example
1799
+ /// \code
1800
+ /// /* Define operations */
1801
+ /// auto random_op = vision::ResizeWithBBox({100, 100}, InterpolationMode::kNearestNeighbour);
1802
+ ///
1803
+ /// /* dataset is an instance of Dataset object */
1804
+ /// dataset = dataset->Map({random_op}, // operations
1805
+ /// {"image", "bbox"}); // input columns
1806
+ /// \endcode
1807
+ explicit ResizeWithBBox(const std::vector<int32_t> &size,
1808
+ InterpolationMode interpolation = InterpolationMode::kLinear);
1809
+
1810
+ /// \brief Destructor.
1811
+ ~ResizeWithBBox() override = default;
1812
+
1813
+ protected:
1814
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1815
+ /// \return Shared pointer to TensorOperation object.
1816
+ std::shared_ptr<TensorOperation> Parse() override;
1817
+
1818
+ private:
1819
+ struct Data;
1820
+ std::shared_ptr<Data> data_;
1821
+ };
1822
+
1823
+ /// \brief Change the format of input tensor from 4-channel RGBA to 3-channel BGR.
1824
+ class DATASET_API RGBA2BGR final : public TensorTransform {
1825
+ public:
1826
+ /// \brief Constructor.
1827
+ /// \par Example
1828
+ /// \code
1829
+ /// /* Define operations */
1830
+ /// auto decode_op = vision::Decode();
1831
+ /// auto rgb2bgr_op = vision::RGBA2BGR();
1832
+ ///
1833
+ /// /* dataset is an instance of Dataset object */
1834
+ /// dataset = dataset->Map({decode_op, rgb2bgr_op}, // operations
1835
+ /// {"image"}); // input columns
1836
+ /// \endcode
1837
+ RGBA2BGR();
1838
+
1839
+ /// \brief Destructor.
1840
+ ~RGBA2BGR() override = default;
1841
+
1842
+ protected:
1843
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1844
+ /// \return Shared pointer to TensorOperation object.
1845
+ std::shared_ptr<TensorOperation> Parse() override;
1846
+ };
1847
+
1848
+ /// \brief Change the input 4 channel RGBA tensor to 3 channel RGB.
1849
+ class DATASET_API RGBA2RGB final : public TensorTransform {
1850
+ public:
1851
+ /// \brief Constructor.
1852
+ /// \par Example
1853
+ /// \code
1854
+ /// /* Define operations */
1855
+ /// auto decode_op = vision::Decode();
1856
+ /// auto rgba2rgb_op = vision::RGBA2RGB();
1857
+ ///
1858
+ /// /* dataset is an instance of Dataset object */
1859
+ /// dataset = dataset->Map({decode_op, rgba2rgb_op}, // operations
1860
+ /// {"image"}); // input columns
1861
+ /// \endcode
1862
+ RGBA2RGB();
1863
+
1864
+ /// \brief Destructor.
1865
+ ~RGBA2RGB() override = default;
1866
+
1867
+ protected:
1868
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1869
+ /// \return Shared pointer to TensorOperation object.
1870
+ std::shared_ptr<TensorOperation> Parse() override;
1871
+ };
1872
+
1873
+ /// \note Slice the tensor to multiple patches in horizontal and vertical directions.
1874
+ class DATASET_API SlicePatches final : public TensorTransform {
1875
+ public:
1876
+ /// \brief Constructor.
1877
+ /// \param[in] num_height The number of patches in vertical direction (default=1).
1878
+ /// \param[in] num_width The number of patches in horizontal direction (default=1).
1879
+ /// \param[in] slice_mode An enum for the mode of slice (default=SliceMode::kPad).
1880
+ /// \param[in] fill_value A value representing the pixel to fill the padding area in right and
1881
+ /// bottom border if slice_mode is kPad. Then padded tensor could be just sliced to multiple patches (default=0).
1882
+ /// \note The usage scenerio is suitable to tensor with large height and width. The tensor will keep the same
1883
+ /// if set both num_height and num_width to 1. And the number of output tensors is equal to num_height*num_width.
1884
+ /// \par Example
1885
+ /// \code
1886
+ /// /* Define operations */
1887
+ /// auto decode_op = vision::Decode();
1888
+ /// auto slice_patch_op = vision::SlicePatches(255, 255);
1889
+ ///
1890
+ /// /* dataset is an instance of Dataset object */
1891
+ /// dataset = dataset->Map({decode_op, slice_patch_op}, // operations
1892
+ /// {"image"}); // input columns
1893
+ /// \endcode
1894
+ explicit SlicePatches(int32_t num_height = 1, int32_t num_width = 1, SliceMode slice_mode = SliceMode::kPad,
1895
+ uint8_t fill_value = 0);
1896
+
1897
+ /// \brief Destructor.
1898
+ ~SlicePatches() override = default;
1899
+
1900
+ protected:
1901
+ /// \brief Function to convert TensorTransform object into a TensorOperation object.
1902
+ /// \return Shared pointer to TensorOperation object.
1903
+ std::shared_ptr<TensorOperation> Parse() override;
1904
+
1905
+ private:
1906
+ struct Data;
1907
+ std::shared_ptr<Data> data_;
1908
+ };
1909
+
1910
+ /// \brief Invert pixels within a specified range.
1911
+ class DATASET_API Solarize final : public TensorTransform {
1912
+ public:
1913
+ /// \brief Constructor.
1914
+ /// \param[in] threshold A vector with two elements specifying the pixel range to invert.
1915
+ /// Threshold values should always be in (min, max) format.
1916
+ /// If min=max, it will to invert all pixels above min(max).
1917
+ /// \par Example
1918
+ /// \code
1919
+ /// /* Define operations */
1920
+ /// auto decode_op = vision::Decode();
1921
+ /// auto solarize_op = vision::Solarize({0, 255});
1922
+ ///
1923
+ /// /* dataset is an instance of Dataset object */
1924
+ /// dataset = dataset->Map({decode_op, solarize_op}, // operations
1925
+ /// {"image"}); // input columns
1926
+ /// \endcode
1927
+ explicit Solarize(const std::vector<float> &threshold);
1928
+
1929
+ /// \brief Destructor.
1930
+ ~Solarize() override = default;
1931
+
1932
+ protected:
1933
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1934
+ /// \return Shared pointer to TensorOperation object.
1935
+ std::shared_ptr<TensorOperation> Parse() override;
1936
+
1937
+ private:
1938
+ struct Data;
1939
+ std::shared_ptr<Data> data_;
1940
+ };
1941
+
1942
+ /// \brief Divide the pixel values by 255 and convert from HWC format to CHW format with required datatype.
1943
+ class DATASET_API ToTensor final : public TensorTransform {
1944
+ public:
1945
+ /// \brief Constructor.
1946
+ /// \param[in] output_type The type of the output tensor of type mindspore::DataType or String
1947
+ /// (default=mindspore::DataType::kNumberTypeFloat32).
1948
+ /// \par Example
1949
+ /// \code
1950
+ /// /* Define operations */
1951
+ /// auto to_tensor_op = vision::ToTensor();
1952
+ ///
1953
+ /// /* dataset is an instance of Dataset object */
1954
+ /// dataset = dataset->Map({to_tensor_op}, // operations
1955
+ /// {"image"}); // input columns
1956
+ /// \endcode
1957
+ ToTensor();
1958
+ explicit ToTensor(std::string output_type);
1959
+ explicit ToTensor(mindspore::DataType output_type);
1960
+
1961
+ /// \brief Destructor.
1962
+ ~ToTensor() override = default;
1963
+
1964
+ protected:
1965
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
1966
+ /// \return Shared pointer to TensorOperation object.
1967
+ std::shared_ptr<TensorOperation> Parse() override;
1968
+
1969
+ private:
1970
+ struct Data;
1971
+ std::shared_ptr<Data> data_;
1972
+ };
1973
+
1974
+ /// \brief Dataset-independent data-augmentation with TrivialAugment Wide.
1975
+ class DATASET_API TrivialAugmentWide final : public TensorTransform {
1976
+ public:
1977
+ /// \brief Constructor.
1978
+ /// \param[in] num_magnitude_bins The number of different magnitude values. Default: 31.
1979
+ /// \param[in] interpolation An enum for the mode of interpolation. Default: InterpolationMode::kNearestNeighbour.
1980
+ /// - InterpolationMode::kNearestNeighbour, Interpolation method is nearest-neighbor interpolation.
1981
+ /// - InterpolationMode::kLinear, Interpolation method is blinear interpolation.
1982
+ /// - InterpolationMode::kCubic, Interpolation method is bicubic interpolation.
1983
+ /// - InterpolationMode::kArea, Interpolation method is pixel area interpolation.
1984
+ /// \param[in] fill_value A vector representing the pixel intensity of the borders. Default: {0, 0, 0}.
1985
+ /// \par Example
1986
+ /// \code
1987
+ /// /* Define operations */
1988
+ /// auto decode_op = vision::Decode();
1989
+ /// auto trivial_augment_wide_op = vision::TrivialAugmentWide();
1990
+ /// /* dataset is an instance of Dataset object */
1991
+ /// dataset = dataset->Map({decode_op, trivial_augment_wide_op}, // operations
1992
+ /// {"image"}); // input columns
1993
+ /// \endcode
1994
+ explicit TrivialAugmentWide(int32_t num_magnitude_bins = 31,
1995
+ InterpolationMode interpolation = InterpolationMode::kNearestNeighbour,
1996
+ const std::vector<uint8_t> &fill_value = {0, 0, 0});
1997
+
1998
+ /// \brief Destructor.
1999
+ ~TrivialAugmentWide() override = default;
2000
+
2001
+ protected:
2002
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
2003
+ /// \return Shared pointer to TensorOperation object.
2004
+ std::shared_ptr<TensorOperation> Parse() override;
2005
+
2006
+ private:
2007
+ struct Data;
2008
+ std::shared_ptr<Data> data_;
2009
+ };
2010
+
2011
+ /// \brief Randomly perform transformations, as selected from input transform list, on the input tensor.
2012
+ class DATASET_API UniformAugment final : public TensorTransform {
2013
+ public:
2014
+ /// \brief Constructor.
2015
+ /// \param[in] transforms Raw pointer to vector of TensorTransform operations.
2016
+ /// \param[in] num_ops An integer representing the number of operations to be selected and applied.
2017
+ /// \par Example
2018
+ /// \code
2019
+ /// /* Define operations */
2020
+ /// auto resize_op(new vision::Resize({30, 30}));
2021
+ /// auto random_crop_op(new vision::RandomCrop({28, 28}));
2022
+ /// auto center_crop_op(new vision::CenterCrop({16, 16}));
2023
+ /// auto uniform_op(new vision::UniformAugment({random_crop_op, center_crop_op}, 2));
2024
+ ///
2025
+ /// /* dataset is an instance of Dataset object */
2026
+ /// dataset = dataset->Map({resize_op, uniform_op}, // operations
2027
+ /// {"image"}); // input columns
2028
+ /// \endcode
2029
+ explicit UniformAugment(const std::vector<TensorTransform *> &transforms, int32_t num_ops = 2);
2030
+
2031
+ /// \brief Constructor.
2032
+ /// \param[in] transforms Smart pointer to vector of TensorTransform operations.
2033
+ /// \param[in] num_ops An integer representing the number of operations to be selected and applied.
2034
+ /// \par Example
2035
+ /// \code
2036
+ /// /* Define operations */
2037
+ /// std::shared_ptr<TensorTransform> resize_op(new vision::Resize({30, 30}));
2038
+ /// std::shared_ptr<TensorTransform> random_crop_op(new vision::RandomCrop({28, 28}));
2039
+ /// std::shared_ptr<TensorTransform> center_crop_op(new vision::CenterCrop({16, 16}));
2040
+ /// std::shared_ptr<TensorTransform> uniform_op(new vision::UniformAugment({random_crop_op, center_crop_op}, 2));
2041
+ ///
2042
+ /// /* dataset is an instance of Dataset object */
2043
+ /// dataset = dataset->Map({resize_op, uniform_op}, // operations
2044
+ /// {"image"}); // input columns
2045
+ /// \endcode
2046
+ explicit UniformAugment(const std::vector<std::shared_ptr<TensorTransform>> &transforms, int32_t num_ops = 2);
2047
+
2048
+ /// \brief Constructor.
2049
+ /// \param[in] transforms Object pointer to vector of TensorTransform operations.
2050
+ /// \param[in] num_ops An integer representing the number of operations to be selected and applied.
2051
+ /// \par Example
2052
+ /// \code
2053
+ /// /* Define operations */
2054
+ /// vision::Resize resize_op = vision::Resize({30, 30});
2055
+ /// vision::RandomCrop random_crop_op = vision::RandomCrop({28, 28});
2056
+ /// vision::CenterCrop center_crop_op = vision::CenterCrop({16, 16});
2057
+ /// vision::UniformAugment uniform_op = vision::UniformAugment({random_crop_op, center_crop_op}, 2);
2058
+ ///
2059
+ /// /* dataset is an instance of Dataset object */
2060
+ /// dataset = dataset->Map({resize_op, uniform_op}, // operations
2061
+ /// {"image"}); // input columns
2062
+ /// \endcode
2063
+ explicit UniformAugment(const std::vector<std::reference_wrapper<TensorTransform>> &transforms, int32_t num_ops = 2);
2064
+
2065
+ /// \brief Destructor.
2066
+ ~UniformAugment() override = default;
2067
+
2068
+ protected:
2069
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
2070
+ /// \return Shared pointer to TensorOperation object.
2071
+ std::shared_ptr<TensorOperation> Parse() override;
2072
+
2073
+ private:
2074
+ struct Data;
2075
+ std::shared_ptr<Data> data_;
2076
+ };
2077
+
2078
+ /// \brief Flip the input image vertically.
2079
+ class DATASET_API VerticalFlip final : public TensorTransform {
2080
+ public:
2081
+ /// \brief Constructor.
2082
+ /// \par Example
2083
+ /// \code
2084
+ /// /* Define operations */
2085
+ /// auto decode_op = vision::Decode();
2086
+ /// auto flip_op = vision::VerticalFlip();
2087
+ ///
2088
+ /// /* dataset is an instance of Dataset object */
2089
+ /// dataset = dataset->Map({decode_op, flip_op}, // operations
2090
+ /// {"image"}); // input columns
2091
+ /// \endcode
2092
+ VerticalFlip();
2093
+
2094
+ /// \brief Destructor.
2095
+ ~VerticalFlip() override = default;
2096
+
2097
+ protected:
2098
+ /// \brief The function to convert a TensorTransform object into a TensorOperation object.
2099
+ /// \return Shared pointer to TensorOperation object.
2100
+ std::shared_ptr<TensorOperation> Parse() override;
2101
+ };
2102
+
2103
+ /// \brief Write the one dimension uint8 data into a file using binary mode.
2104
+ /// \param[in] filename The path to the file to be written.
2105
+ /// \param[in] data The tensor data.
2106
+ /// \return The status code.
2107
+ Status DATASET_API WriteFile(const std::string &filename, const mindspore::MSTensor &data);
2108
+
2109
+ /// \brief Write the image data into a JPEG file.
2110
+ /// \param[in] filename The path to the file to be written.
2111
+ /// \param[in] image The data tensor.
2112
+ /// \param[in] quality The quality for JPEG file, in range of [1, 100]. Default: 75.
2113
+ /// \return The status code.
2114
+ Status DATASET_API WriteJpeg(const std::string &filename, const mindspore::MSTensor &image, int quality = 75);
2115
+
2116
+ /// \brief Write the image into a PNG file.
2117
+ /// \param[in] filename The path to the file to be written.
2118
+ /// \param[in] image The data tensor.
2119
+ /// \param[in] compression_level The compression level for PNG file, in range of [0, 9]. Default: 6.
2120
+ /// \return The status code.
2121
+ Status DATASET_API WritePng(const std::string &filename, const mindspore::MSTensor &image, int compression_level = 6);
2122
+ } // namespace vision
2123
+ } // namespace dataset
2124
+ } // namespace mindspore
2125
+ #endif // MINDSPORE_CCSRC_MINDDATA_DATASET_INCLUDE_DATASET_VISION_H_