paddlex 2.0.0rc4__py3-none-any.whl → 3.0.0b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (1087) hide show
  1. paddlex/.version +1 -0
  2. paddlex/__init__.py +51 -18
  3. paddlex/__main__.py +40 -0
  4. paddlex/configs/anomaly_detection/STFPM.yaml +41 -0
  5. paddlex/configs/doc_text_orientation/PP-LCNet_x1_0_doc_ori.yaml +41 -0
  6. paddlex/configs/face_detection/BlazeFace-FPN-SSH.yaml +40 -0
  7. paddlex/configs/face_detection/BlazeFace.yaml +40 -0
  8. paddlex/configs/face_detection/PP-YOLOE_plus-S_face.yaml +40 -0
  9. paddlex/configs/face_detection/PicoDet_LCNet_x2_5_face.yaml +40 -0
  10. paddlex/configs/face_recognition/MobileFaceNet.yaml +44 -0
  11. paddlex/configs/face_recognition/ResNet50_face.yaml +44 -0
  12. paddlex/configs/formula_recognition/LaTeX_OCR_rec.yaml +40 -0
  13. paddlex/configs/general_recognition/PP-ShiTuV2_rec.yaml +42 -0
  14. paddlex/configs/general_recognition/PP-ShiTuV2_rec_CLIP_vit_base.yaml +42 -0
  15. paddlex/configs/general_recognition/PP-ShiTuV2_rec_CLIP_vit_large.yaml +41 -0
  16. paddlex/configs/human_detection/PP-YOLOE-L_human.yaml +42 -0
  17. paddlex/configs/human_detection/PP-YOLOE-S_human.yaml +42 -0
  18. paddlex/configs/image_classification/CLIP_vit_base_patch16_224.yaml +41 -0
  19. paddlex/configs/image_classification/CLIP_vit_large_patch14_224.yaml +41 -0
  20. paddlex/configs/image_classification/ConvNeXt_base_224.yaml +41 -0
  21. paddlex/configs/image_classification/ConvNeXt_base_384.yaml +41 -0
  22. paddlex/configs/image_classification/ConvNeXt_large_224.yaml +41 -0
  23. paddlex/configs/image_classification/ConvNeXt_large_384.yaml +41 -0
  24. paddlex/configs/image_classification/ConvNeXt_small.yaml +41 -0
  25. paddlex/configs/image_classification/ConvNeXt_tiny.yaml +41 -0
  26. paddlex/configs/image_classification/FasterNet-L.yaml +40 -0
  27. paddlex/configs/image_classification/FasterNet-M.yaml +40 -0
  28. paddlex/configs/image_classification/FasterNet-S.yaml +40 -0
  29. paddlex/configs/image_classification/FasterNet-T0.yaml +40 -0
  30. paddlex/configs/image_classification/FasterNet-T1.yaml +40 -0
  31. paddlex/configs/image_classification/FasterNet-T2.yaml +40 -0
  32. paddlex/configs/image_classification/MobileNetV1_x0_25.yaml +41 -0
  33. paddlex/configs/image_classification/MobileNetV1_x0_5.yaml +41 -0
  34. paddlex/configs/image_classification/MobileNetV1_x0_75.yaml +41 -0
  35. paddlex/configs/image_classification/MobileNetV1_x1_0.yaml +41 -0
  36. paddlex/configs/image_classification/MobileNetV2_x0_25.yaml +41 -0
  37. paddlex/configs/image_classification/MobileNetV2_x0_5.yaml +41 -0
  38. paddlex/configs/image_classification/MobileNetV2_x1_0.yaml +41 -0
  39. paddlex/configs/image_classification/MobileNetV2_x1_5.yaml +41 -0
  40. paddlex/configs/image_classification/MobileNetV2_x2_0.yaml +41 -0
  41. paddlex/configs/image_classification/MobileNetV3_large_x0_35.yaml +41 -0
  42. paddlex/configs/image_classification/MobileNetV3_large_x0_5.yaml +41 -0
  43. paddlex/configs/image_classification/MobileNetV3_large_x0_75.yaml +41 -0
  44. paddlex/configs/image_classification/MobileNetV3_large_x1_0.yaml +41 -0
  45. paddlex/configs/image_classification/MobileNetV3_large_x1_25.yaml +41 -0
  46. paddlex/configs/image_classification/MobileNetV3_small_x0_35.yaml +41 -0
  47. paddlex/configs/image_classification/MobileNetV3_small_x0_5.yaml +41 -0
  48. paddlex/configs/image_classification/MobileNetV3_small_x0_75.yaml +41 -0
  49. paddlex/configs/image_classification/MobileNetV3_small_x1_0.yaml +41 -0
  50. paddlex/configs/image_classification/MobileNetV3_small_x1_25.yaml +41 -0
  51. paddlex/configs/image_classification/MobileNetV4_conv_large.yaml +41 -0
  52. paddlex/configs/image_classification/MobileNetV4_conv_medium.yaml +41 -0
  53. paddlex/configs/image_classification/MobileNetV4_conv_small.yaml +41 -0
  54. paddlex/configs/image_classification/MobileNetV4_hybrid_large.yaml +41 -0
  55. paddlex/configs/image_classification/MobileNetV4_hybrid_medium.yaml +41 -0
  56. paddlex/configs/image_classification/PP-HGNetV2-B0.yaml +41 -0
  57. paddlex/configs/image_classification/PP-HGNetV2-B1.yaml +41 -0
  58. paddlex/configs/image_classification/PP-HGNetV2-B2.yaml +41 -0
  59. paddlex/configs/image_classification/PP-HGNetV2-B3.yaml +41 -0
  60. paddlex/configs/image_classification/PP-HGNetV2-B4.yaml +41 -0
  61. paddlex/configs/image_classification/PP-HGNetV2-B5.yaml +41 -0
  62. paddlex/configs/image_classification/PP-HGNetV2-B6.yaml +41 -0
  63. paddlex/configs/image_classification/PP-HGNet_base.yaml +41 -0
  64. paddlex/configs/image_classification/PP-HGNet_small.yaml +41 -0
  65. paddlex/configs/image_classification/PP-HGNet_tiny.yaml +41 -0
  66. paddlex/configs/image_classification/PP-LCNetV2_base.yaml +41 -0
  67. paddlex/configs/image_classification/PP-LCNetV2_large.yaml +41 -0
  68. paddlex/configs/image_classification/PP-LCNetV2_small.yaml +41 -0
  69. paddlex/configs/image_classification/PP-LCNet_x0_25.yaml +41 -0
  70. paddlex/configs/image_classification/PP-LCNet_x0_35.yaml +41 -0
  71. paddlex/configs/image_classification/PP-LCNet_x0_5.yaml +41 -0
  72. paddlex/configs/image_classification/PP-LCNet_x0_75.yaml +41 -0
  73. paddlex/configs/image_classification/PP-LCNet_x1_0.yaml +41 -0
  74. paddlex/configs/image_classification/PP-LCNet_x1_5.yaml +41 -0
  75. paddlex/configs/image_classification/PP-LCNet_x2_0.yaml +41 -0
  76. paddlex/configs/image_classification/PP-LCNet_x2_5.yaml +41 -0
  77. paddlex/configs/image_classification/ResNet101.yaml +41 -0
  78. paddlex/configs/image_classification/ResNet101_vd.yaml +41 -0
  79. paddlex/configs/image_classification/ResNet152.yaml +41 -0
  80. paddlex/configs/image_classification/ResNet152_vd.yaml +41 -0
  81. paddlex/configs/image_classification/ResNet18.yaml +41 -0
  82. paddlex/configs/image_classification/ResNet18_vd.yaml +41 -0
  83. paddlex/configs/image_classification/ResNet200_vd.yaml +41 -0
  84. paddlex/configs/image_classification/ResNet34.yaml +41 -0
  85. paddlex/configs/image_classification/ResNet34_vd.yaml +41 -0
  86. paddlex/configs/image_classification/ResNet50.yaml +41 -0
  87. paddlex/configs/image_classification/ResNet50_vd.yaml +41 -0
  88. paddlex/configs/image_classification/StarNet-S1.yaml +41 -0
  89. paddlex/configs/image_classification/StarNet-S2.yaml +41 -0
  90. paddlex/configs/image_classification/StarNet-S3.yaml +41 -0
  91. paddlex/configs/image_classification/StarNet-S4.yaml +41 -0
  92. paddlex/configs/image_classification/SwinTransformer_base_patch4_window12_384.yaml +41 -0
  93. paddlex/configs/image_classification/SwinTransformer_base_patch4_window7_224.yaml +41 -0
  94. paddlex/configs/image_classification/SwinTransformer_large_patch4_window12_384.yaml +41 -0
  95. paddlex/configs/image_classification/SwinTransformer_large_patch4_window7_224.yaml +41 -0
  96. paddlex/configs/image_classification/SwinTransformer_small_patch4_window7_224.yaml +41 -0
  97. paddlex/configs/image_classification/SwinTransformer_tiny_patch4_window7_224.yaml +41 -0
  98. paddlex/configs/image_unwarping/UVDoc.yaml +12 -0
  99. paddlex/configs/instance_segmentation/Cascade-MaskRCNN-ResNet50-FPN.yaml +40 -0
  100. paddlex/configs/instance_segmentation/Cascade-MaskRCNN-ResNet50-vd-SSLDv2-FPN.yaml +40 -0
  101. paddlex/configs/instance_segmentation/Mask-RT-DETR-H.yaml +40 -0
  102. paddlex/configs/instance_segmentation/Mask-RT-DETR-L.yaml +40 -0
  103. paddlex/configs/instance_segmentation/Mask-RT-DETR-M.yaml +40 -0
  104. paddlex/configs/instance_segmentation/Mask-RT-DETR-S.yaml +40 -0
  105. paddlex/configs/instance_segmentation/Mask-RT-DETR-X.yaml +40 -0
  106. paddlex/configs/instance_segmentation/MaskRCNN-ResNeXt101-vd-FPN.yaml +39 -0
  107. paddlex/configs/instance_segmentation/MaskRCNN-ResNet101-FPN.yaml +40 -0
  108. paddlex/configs/instance_segmentation/MaskRCNN-ResNet101-vd-FPN.yaml +40 -0
  109. paddlex/configs/instance_segmentation/MaskRCNN-ResNet50-FPN.yaml +40 -0
  110. paddlex/configs/instance_segmentation/MaskRCNN-ResNet50-vd-FPN.yaml +40 -0
  111. paddlex/configs/instance_segmentation/MaskRCNN-ResNet50.yaml +40 -0
  112. paddlex/configs/instance_segmentation/PP-YOLOE_seg-S.yaml +40 -0
  113. paddlex/configs/instance_segmentation/SOLOv2.yaml +40 -0
  114. paddlex/configs/mainbody_detection/PP-ShiTuV2_det.yaml +41 -0
  115. paddlex/configs/multilabel_classification/CLIP_vit_base_patch16_448_ML.yaml +41 -0
  116. paddlex/configs/multilabel_classification/PP-HGNetV2-B0_ML.yaml +41 -0
  117. paddlex/configs/multilabel_classification/PP-HGNetV2-B4_ML.yaml +41 -0
  118. paddlex/configs/multilabel_classification/PP-HGNetV2-B6_ML.yaml +41 -0
  119. paddlex/configs/multilabel_classification/PP-LCNet_x1_0_ML.yaml +41 -0
  120. paddlex/configs/multilabel_classification/ResNet50_ML.yaml +41 -0
  121. paddlex/configs/object_detection/Cascade-FasterRCNN-ResNet50-FPN.yaml +41 -0
  122. paddlex/configs/object_detection/Cascade-FasterRCNN-ResNet50-vd-SSLDv2-FPN.yaml +42 -0
  123. paddlex/configs/object_detection/CenterNet-DLA-34.yaml +41 -0
  124. paddlex/configs/object_detection/CenterNet-ResNet50.yaml +41 -0
  125. paddlex/configs/object_detection/DETR-R50.yaml +42 -0
  126. paddlex/configs/object_detection/FCOS-ResNet50.yaml +41 -0
  127. paddlex/configs/object_detection/FasterRCNN-ResNeXt101-vd-FPN.yaml +42 -0
  128. paddlex/configs/object_detection/FasterRCNN-ResNet101-FPN.yaml +42 -0
  129. paddlex/configs/object_detection/FasterRCNN-ResNet101.yaml +42 -0
  130. paddlex/configs/object_detection/FasterRCNN-ResNet34-FPN.yaml +42 -0
  131. paddlex/configs/object_detection/FasterRCNN-ResNet50-FPN.yaml +42 -0
  132. paddlex/configs/object_detection/FasterRCNN-ResNet50-vd-FPN.yaml +42 -0
  133. paddlex/configs/object_detection/FasterRCNN-ResNet50-vd-SSLDv2-FPN.yaml +42 -0
  134. paddlex/configs/object_detection/FasterRCNN-ResNet50.yaml +42 -0
  135. paddlex/configs/object_detection/FasterRCNN-Swin-Tiny-FPN.yaml +42 -0
  136. paddlex/configs/object_detection/PP-YOLOE_plus-L.yaml +40 -0
  137. paddlex/configs/object_detection/PP-YOLOE_plus-M.yaml +40 -0
  138. paddlex/configs/object_detection/PP-YOLOE_plus-S.yaml +40 -0
  139. paddlex/configs/object_detection/PP-YOLOE_plus-X.yaml +40 -0
  140. paddlex/configs/object_detection/PicoDet-L.yaml +40 -0
  141. paddlex/configs/object_detection/PicoDet-M.yaml +42 -0
  142. paddlex/configs/object_detection/PicoDet-S.yaml +40 -0
  143. paddlex/configs/object_detection/PicoDet-XS.yaml +42 -0
  144. paddlex/configs/object_detection/RT-DETR-H.yaml +40 -0
  145. paddlex/configs/object_detection/RT-DETR-L.yaml +40 -0
  146. paddlex/configs/object_detection/RT-DETR-R18.yaml +40 -0
  147. paddlex/configs/object_detection/RT-DETR-R50.yaml +40 -0
  148. paddlex/configs/object_detection/RT-DETR-X.yaml +40 -0
  149. paddlex/configs/object_detection/YOLOX-L.yaml +40 -0
  150. paddlex/configs/object_detection/YOLOX-M.yaml +40 -0
  151. paddlex/configs/object_detection/YOLOX-N.yaml +40 -0
  152. paddlex/configs/object_detection/YOLOX-S.yaml +40 -0
  153. paddlex/configs/object_detection/YOLOX-T.yaml +40 -0
  154. paddlex/configs/object_detection/YOLOX-X.yaml +40 -0
  155. paddlex/configs/object_detection/YOLOv3-DarkNet53.yaml +40 -0
  156. paddlex/configs/object_detection/YOLOv3-MobileNetV3.yaml +40 -0
  157. paddlex/configs/object_detection/YOLOv3-ResNet50_vd_DCN.yaml +40 -0
  158. paddlex/configs/pedestrian_attribute/PP-LCNet_x1_0_pedestrian_attribute.yaml +41 -0
  159. paddlex/configs/semantic_segmentation/Deeplabv3-R101.yaml +40 -0
  160. paddlex/configs/semantic_segmentation/Deeplabv3-R50.yaml +40 -0
  161. paddlex/configs/semantic_segmentation/Deeplabv3_Plus-R101.yaml +40 -0
  162. paddlex/configs/semantic_segmentation/Deeplabv3_Plus-R50.yaml +40 -0
  163. paddlex/configs/semantic_segmentation/OCRNet_HRNet-W18.yaml +40 -0
  164. paddlex/configs/semantic_segmentation/OCRNet_HRNet-W48.yaml +40 -0
  165. paddlex/configs/semantic_segmentation/PP-LiteSeg-B.yaml +41 -0
  166. paddlex/configs/semantic_segmentation/PP-LiteSeg-T.yaml +40 -0
  167. paddlex/configs/semantic_segmentation/SeaFormer_base.yaml +40 -0
  168. paddlex/configs/semantic_segmentation/SeaFormer_large.yaml +40 -0
  169. paddlex/configs/semantic_segmentation/SeaFormer_small.yaml +40 -0
  170. paddlex/configs/semantic_segmentation/SeaFormer_tiny.yaml +40 -0
  171. paddlex/configs/semantic_segmentation/SegFormer-B0.yaml +40 -0
  172. paddlex/configs/semantic_segmentation/SegFormer-B1.yaml +40 -0
  173. paddlex/configs/semantic_segmentation/SegFormer-B2.yaml +40 -0
  174. paddlex/configs/semantic_segmentation/SegFormer-B3.yaml +40 -0
  175. paddlex/configs/semantic_segmentation/SegFormer-B4.yaml +40 -0
  176. paddlex/configs/semantic_segmentation/SegFormer-B5.yaml +40 -0
  177. paddlex/configs/small_object_detection/PP-YOLOE_plus_SOD-L.yaml +42 -0
  178. paddlex/configs/small_object_detection/PP-YOLOE_plus_SOD-S.yaml +42 -0
  179. paddlex/configs/small_object_detection/PP-YOLOE_plus_SOD-largesize-L.yaml +42 -0
  180. paddlex/configs/structure_analysis/PicoDet-L_layout_17cls.yaml +40 -0
  181. paddlex/configs/structure_analysis/PicoDet-L_layout_3cls.yaml +40 -0
  182. paddlex/configs/structure_analysis/PicoDet-S_layout_17cls.yaml +40 -0
  183. paddlex/configs/structure_analysis/PicoDet-S_layout_3cls.yaml +40 -0
  184. paddlex/configs/structure_analysis/PicoDet_layout_1x.yaml +40 -0
  185. paddlex/configs/structure_analysis/PicoDet_layout_1x_table.yaml +40 -0
  186. paddlex/configs/structure_analysis/RT-DETR-H_layout_17cls.yaml +40 -0
  187. paddlex/configs/structure_analysis/RT-DETR-H_layout_3cls.yaml +40 -0
  188. paddlex/configs/table_recognition/SLANet.yaml +39 -0
  189. paddlex/configs/table_recognition/SLANet_plus.yaml +39 -0
  190. paddlex/configs/text_detection/PP-OCRv4_mobile_det.yaml +40 -0
  191. paddlex/configs/text_detection/PP-OCRv4_server_det.yaml +40 -0
  192. paddlex/configs/text_detection_seal/PP-OCRv4_mobile_seal_det.yaml +40 -0
  193. paddlex/configs/text_detection_seal/PP-OCRv4_server_seal_det.yaml +40 -0
  194. paddlex/configs/text_recognition/PP-OCRv4_mobile_rec.yaml +39 -0
  195. paddlex/configs/text_recognition/PP-OCRv4_server_rec.yaml +39 -0
  196. paddlex/configs/text_recognition/ch_RepSVTR_rec.yaml +39 -0
  197. paddlex/configs/text_recognition/ch_SVTRv2_rec.yaml +39 -0
  198. paddlex/configs/ts_anomaly_detection/AutoEncoder_ad.yaml +37 -0
  199. paddlex/configs/ts_anomaly_detection/DLinear_ad.yaml +37 -0
  200. paddlex/configs/ts_anomaly_detection/Nonstationary_ad.yaml +37 -0
  201. paddlex/configs/ts_anomaly_detection/PatchTST_ad.yaml +37 -0
  202. paddlex/configs/ts_anomaly_detection/TimesNet_ad.yaml +37 -0
  203. paddlex/configs/ts_classification/TimesNet_cls.yaml +37 -0
  204. paddlex/configs/ts_forecast/DLinear.yaml +38 -0
  205. paddlex/configs/ts_forecast/NLinear.yaml +38 -0
  206. paddlex/configs/ts_forecast/Nonstationary.yaml +38 -0
  207. paddlex/configs/ts_forecast/PatchTST.yaml +38 -0
  208. paddlex/configs/ts_forecast/RLinear.yaml +38 -0
  209. paddlex/configs/ts_forecast/TiDE.yaml +38 -0
  210. paddlex/configs/ts_forecast/TimesNet.yaml +38 -0
  211. paddlex/configs/vehicle_attribute/PP-LCNet_x1_0_vehicle_attribute.yaml +41 -0
  212. paddlex/configs/vehicle_detection/PP-YOLOE-L_vehicle.yaml +41 -0
  213. paddlex/configs/vehicle_detection/PP-YOLOE-S_vehicle.yaml +42 -0
  214. paddlex/engine.py +54 -0
  215. paddlex/inference/__init__.py +17 -0
  216. paddlex/inference/components/__init__.py +18 -0
  217. paddlex/inference/components/base.py +292 -0
  218. paddlex/inference/components/llm/__init__.py +25 -0
  219. paddlex/inference/components/llm/base.py +65 -0
  220. paddlex/inference/components/llm/erniebot.py +212 -0
  221. paddlex/inference/components/paddle_predictor/__init__.py +20 -0
  222. paddlex/inference/components/paddle_predictor/predictor.py +332 -0
  223. paddlex/inference/components/retrieval/__init__.py +15 -0
  224. paddlex/inference/components/retrieval/faiss.py +359 -0
  225. paddlex/inference/components/task_related/__init__.py +33 -0
  226. paddlex/inference/components/task_related/clas.py +124 -0
  227. paddlex/inference/components/task_related/det.py +284 -0
  228. paddlex/inference/components/task_related/instance_seg.py +89 -0
  229. paddlex/inference/components/task_related/seal_det_warp.py +940 -0
  230. paddlex/inference/components/task_related/seg.py +40 -0
  231. paddlex/inference/components/task_related/table_rec.py +191 -0
  232. paddlex/inference/components/task_related/text_det.py +895 -0
  233. paddlex/inference/components/task_related/text_rec.py +353 -0
  234. paddlex/inference/components/task_related/warp.py +43 -0
  235. paddlex/inference/components/transforms/__init__.py +16 -0
  236. paddlex/inference/components/transforms/image/__init__.py +15 -0
  237. paddlex/inference/components/transforms/image/common.py +598 -0
  238. paddlex/inference/components/transforms/image/funcs.py +58 -0
  239. paddlex/inference/components/transforms/read_data.py +67 -0
  240. paddlex/inference/components/transforms/ts/__init__.py +15 -0
  241. paddlex/inference/components/transforms/ts/common.py +393 -0
  242. paddlex/inference/components/transforms/ts/funcs.py +424 -0
  243. paddlex/inference/models/__init__.py +106 -0
  244. paddlex/inference/models/anomaly_detection.py +87 -0
  245. paddlex/inference/models/base/__init__.py +16 -0
  246. paddlex/inference/models/base/base_predictor.py +76 -0
  247. paddlex/inference/models/base/basic_predictor.py +122 -0
  248. paddlex/inference/models/face_recognition.py +21 -0
  249. paddlex/inference/models/formula_recognition.py +55 -0
  250. paddlex/inference/models/general_recognition.py +99 -0
  251. paddlex/inference/models/image_classification.py +101 -0
  252. paddlex/inference/models/image_unwarping.py +43 -0
  253. paddlex/inference/models/instance_segmentation.py +66 -0
  254. paddlex/inference/models/multilabel_classification.py +33 -0
  255. paddlex/inference/models/object_detection.py +129 -0
  256. paddlex/inference/models/semantic_segmentation.py +86 -0
  257. paddlex/inference/models/table_recognition.py +106 -0
  258. paddlex/inference/models/text_detection.py +105 -0
  259. paddlex/inference/models/text_recognition.py +78 -0
  260. paddlex/inference/models/ts_ad.py +68 -0
  261. paddlex/inference/models/ts_cls.py +57 -0
  262. paddlex/inference/models/ts_fc.py +73 -0
  263. paddlex/inference/pipelines/__init__.py +127 -0
  264. paddlex/inference/pipelines/attribute_recognition.py +92 -0
  265. paddlex/inference/pipelines/base.py +86 -0
  266. paddlex/inference/pipelines/face_recognition.py +49 -0
  267. paddlex/inference/pipelines/formula_recognition.py +102 -0
  268. paddlex/inference/pipelines/layout_parsing/__init__.py +15 -0
  269. paddlex/inference/pipelines/layout_parsing/layout_parsing.py +362 -0
  270. paddlex/inference/pipelines/ocr.py +80 -0
  271. paddlex/inference/pipelines/pp_shitu_v2.py +152 -0
  272. paddlex/inference/pipelines/ppchatocrv3/__init__.py +15 -0
  273. paddlex/inference/pipelines/ppchatocrv3/ch_prompt.yaml +14 -0
  274. paddlex/inference/pipelines/ppchatocrv3/ppchatocrv3.py +717 -0
  275. paddlex/inference/pipelines/ppchatocrv3/utils.py +168 -0
  276. paddlex/inference/pipelines/seal_recognition.py +152 -0
  277. paddlex/inference/pipelines/serving/__init__.py +17 -0
  278. paddlex/inference/pipelines/serving/_pipeline_apps/__init__.py +205 -0
  279. paddlex/inference/pipelines/serving/_pipeline_apps/anomaly_detection.py +80 -0
  280. paddlex/inference/pipelines/serving/_pipeline_apps/face_recognition.py +317 -0
  281. paddlex/inference/pipelines/serving/_pipeline_apps/formula_recognition.py +119 -0
  282. paddlex/inference/pipelines/serving/_pipeline_apps/image_classification.py +101 -0
  283. paddlex/inference/pipelines/serving/_pipeline_apps/instance_segmentation.py +112 -0
  284. paddlex/inference/pipelines/serving/_pipeline_apps/layout_parsing.py +205 -0
  285. paddlex/inference/pipelines/serving/_pipeline_apps/multi_label_image_classification.py +90 -0
  286. paddlex/inference/pipelines/serving/_pipeline_apps/object_detection.py +90 -0
  287. paddlex/inference/pipelines/serving/_pipeline_apps/ocr.py +98 -0
  288. paddlex/inference/pipelines/serving/_pipeline_apps/pedestrian_attribute_recognition.py +102 -0
  289. paddlex/inference/pipelines/serving/_pipeline_apps/pp_shitu_v2.py +319 -0
  290. paddlex/inference/pipelines/serving/_pipeline_apps/ppchatocrv3.py +445 -0
  291. paddlex/inference/pipelines/serving/_pipeline_apps/seal_recognition.py +110 -0
  292. paddlex/inference/pipelines/serving/_pipeline_apps/semantic_segmentation.py +82 -0
  293. paddlex/inference/pipelines/serving/_pipeline_apps/small_object_detection.py +92 -0
  294. paddlex/inference/pipelines/serving/_pipeline_apps/table_recognition.py +110 -0
  295. paddlex/inference/pipelines/serving/_pipeline_apps/ts_ad.py +68 -0
  296. paddlex/inference/pipelines/serving/_pipeline_apps/ts_cls.py +68 -0
  297. paddlex/inference/pipelines/serving/_pipeline_apps/ts_fc.py +68 -0
  298. paddlex/inference/pipelines/serving/_pipeline_apps/vehicle_attribute_recognition.py +102 -0
  299. paddlex/inference/pipelines/serving/app.py +164 -0
  300. paddlex/inference/pipelines/serving/models.py +30 -0
  301. paddlex/inference/pipelines/serving/server.py +25 -0
  302. paddlex/inference/pipelines/serving/storage.py +161 -0
  303. paddlex/inference/pipelines/serving/utils.py +190 -0
  304. paddlex/inference/pipelines/single_model_pipeline.py +76 -0
  305. paddlex/inference/pipelines/table_recognition/__init__.py +15 -0
  306. paddlex/inference/pipelines/table_recognition/table_recognition.py +193 -0
  307. paddlex/inference/pipelines/table_recognition/utils.py +457 -0
  308. paddlex/inference/results/__init__.py +31 -0
  309. paddlex/inference/results/attribute_rec.py +89 -0
  310. paddlex/inference/results/base.py +43 -0
  311. paddlex/inference/results/chat_ocr.py +158 -0
  312. paddlex/inference/results/clas.py +133 -0
  313. paddlex/inference/results/det.py +86 -0
  314. paddlex/inference/results/face_rec.py +34 -0
  315. paddlex/inference/results/formula_rec.py +363 -0
  316. paddlex/inference/results/instance_seg.py +152 -0
  317. paddlex/inference/results/ocr.py +157 -0
  318. paddlex/inference/results/seal_rec.py +50 -0
  319. paddlex/inference/results/seg.py +72 -0
  320. paddlex/inference/results/shitu.py +35 -0
  321. paddlex/inference/results/table_rec.py +109 -0
  322. paddlex/inference/results/text_det.py +33 -0
  323. paddlex/inference/results/text_rec.py +66 -0
  324. paddlex/inference/results/ts.py +37 -0
  325. paddlex/inference/results/utils/__init__.py +13 -0
  326. paddlex/inference/results/utils/mixin.py +204 -0
  327. paddlex/inference/results/warp.py +31 -0
  328. paddlex/inference/utils/__init__.py +13 -0
  329. paddlex/inference/utils/benchmark.py +214 -0
  330. paddlex/inference/utils/color_map.py +123 -0
  331. paddlex/inference/utils/get_pipeline_path.py +26 -0
  332. paddlex/inference/utils/io/__init__.py +33 -0
  333. paddlex/inference/utils/io/readers.py +353 -0
  334. paddlex/inference/utils/io/style.py +374 -0
  335. paddlex/inference/utils/io/tablepyxl.py +149 -0
  336. paddlex/inference/utils/io/writers.py +376 -0
  337. paddlex/inference/utils/new_ir_blacklist.py +22 -0
  338. paddlex/inference/utils/official_models.py +286 -0
  339. paddlex/inference/utils/pp_option.py +236 -0
  340. paddlex/inference/utils/process_hook.py +54 -0
  341. paddlex/model.py +106 -0
  342. paddlex/modules/__init__.py +105 -0
  343. paddlex/modules/anomaly_detection/__init__.py +18 -0
  344. paddlex/modules/anomaly_detection/dataset_checker/__init__.py +95 -0
  345. paddlex/modules/anomaly_detection/dataset_checker/dataset_src/__init__.py +19 -0
  346. paddlex/modules/anomaly_detection/dataset_checker/dataset_src/analyse_dataset.py +79 -0
  347. paddlex/modules/anomaly_detection/dataset_checker/dataset_src/check_dataset.py +87 -0
  348. paddlex/modules/anomaly_detection/dataset_checker/dataset_src/convert_dataset.py +230 -0
  349. paddlex/modules/anomaly_detection/dataset_checker/dataset_src/split_dataset.py +87 -0
  350. paddlex/modules/anomaly_detection/dataset_checker/dataset_src/utils/__init__.py +13 -0
  351. paddlex/modules/anomaly_detection/dataset_checker/dataset_src/utils/visualizer.py +71 -0
  352. paddlex/modules/anomaly_detection/evaluator.py +58 -0
  353. paddlex/modules/anomaly_detection/exportor.py +22 -0
  354. paddlex/modules/anomaly_detection/model_list.py +16 -0
  355. paddlex/modules/anomaly_detection/trainer.py +71 -0
  356. paddlex/modules/base/__init__.py +18 -0
  357. paddlex/modules/base/build_model.py +34 -0
  358. paddlex/modules/base/dataset_checker/__init__.py +16 -0
  359. paddlex/modules/base/dataset_checker/dataset_checker.py +164 -0
  360. paddlex/modules/base/dataset_checker/utils.py +110 -0
  361. paddlex/modules/base/evaluator.py +154 -0
  362. paddlex/modules/base/exportor.py +121 -0
  363. paddlex/modules/base/trainer.py +111 -0
  364. paddlex/modules/face_recognition/__init__.py +18 -0
  365. paddlex/modules/face_recognition/dataset_checker/__init__.py +71 -0
  366. paddlex/modules/face_recognition/dataset_checker/dataset_src/__init__.py +16 -0
  367. paddlex/modules/face_recognition/dataset_checker/dataset_src/check_dataset.py +174 -0
  368. paddlex/modules/face_recognition/dataset_checker/dataset_src/utils/__init__.py +13 -0
  369. paddlex/modules/face_recognition/dataset_checker/dataset_src/utils/visualizer.py +156 -0
  370. paddlex/modules/face_recognition/evaluator.py +52 -0
  371. paddlex/modules/face_recognition/exportor.py +22 -0
  372. paddlex/modules/face_recognition/model_list.py +15 -0
  373. paddlex/modules/face_recognition/trainer.py +97 -0
  374. paddlex/modules/formula_recognition/__init__.py +13 -0
  375. paddlex/modules/formula_recognition/model_list.py +17 -0
  376. paddlex/modules/general_recognition/__init__.py +18 -0
  377. paddlex/modules/general_recognition/dataset_checker/__init__.py +107 -0
  378. paddlex/modules/general_recognition/dataset_checker/dataset_src/__init__.py +19 -0
  379. paddlex/modules/general_recognition/dataset_checker/dataset_src/analyse_dataset.py +98 -0
  380. paddlex/modules/general_recognition/dataset_checker/dataset_src/check_dataset.py +100 -0
  381. paddlex/modules/general_recognition/dataset_checker/dataset_src/convert_dataset.py +99 -0
  382. paddlex/modules/general_recognition/dataset_checker/dataset_src/split_dataset.py +82 -0
  383. paddlex/modules/general_recognition/dataset_checker/dataset_src/utils/__init__.py +13 -0
  384. paddlex/modules/general_recognition/dataset_checker/dataset_src/utils/visualizer.py +150 -0
  385. paddlex/modules/general_recognition/evaluator.py +31 -0
  386. paddlex/modules/general_recognition/exportor.py +22 -0
  387. paddlex/modules/general_recognition/model_list.py +19 -0
  388. paddlex/modules/general_recognition/trainer.py +52 -0
  389. paddlex/modules/image_classification/__init__.py +18 -0
  390. paddlex/modules/image_classification/dataset_checker/__init__.py +104 -0
  391. paddlex/modules/image_classification/dataset_checker/dataset_src/__init__.py +19 -0
  392. paddlex/modules/image_classification/dataset_checker/dataset_src/analyse_dataset.py +93 -0
  393. paddlex/modules/image_classification/dataset_checker/dataset_src/check_dataset.py +131 -0
  394. paddlex/modules/image_classification/dataset_checker/dataset_src/convert_dataset.py +51 -0
  395. paddlex/modules/image_classification/dataset_checker/dataset_src/split_dataset.py +81 -0
  396. paddlex/modules/image_classification/dataset_checker/dataset_src/utils/__init__.py +13 -0
  397. paddlex/modules/image_classification/dataset_checker/dataset_src/utils/visualizer.py +156 -0
  398. paddlex/modules/image_classification/evaluator.py +43 -0
  399. paddlex/modules/image_classification/exportor.py +22 -0
  400. paddlex/modules/image_classification/model_list.py +97 -0
  401. paddlex/modules/image_classification/trainer.py +82 -0
  402. paddlex/modules/image_unwarping/__init__.py +13 -0
  403. paddlex/modules/image_unwarping/model_list.py +17 -0
  404. paddlex/modules/instance_segmentation/__init__.py +18 -0
  405. paddlex/modules/instance_segmentation/dataset_checker/__init__.py +93 -0
  406. paddlex/modules/instance_segmentation/dataset_checker/dataset_src/__init__.py +19 -0
  407. paddlex/modules/instance_segmentation/dataset_checker/dataset_src/analyse_dataset.py +78 -0
  408. paddlex/modules/instance_segmentation/dataset_checker/dataset_src/check_dataset.py +92 -0
  409. paddlex/modules/instance_segmentation/dataset_checker/dataset_src/convert_dataset.py +241 -0
  410. paddlex/modules/instance_segmentation/dataset_checker/dataset_src/split_dataset.py +119 -0
  411. paddlex/modules/instance_segmentation/dataset_checker/dataset_src/utils/__init__.py +13 -0
  412. paddlex/modules/instance_segmentation/dataset_checker/dataset_src/utils/visualizer.py +221 -0
  413. paddlex/modules/instance_segmentation/evaluator.py +32 -0
  414. paddlex/modules/instance_segmentation/exportor.py +22 -0
  415. paddlex/modules/instance_segmentation/model_list.py +33 -0
  416. paddlex/modules/instance_segmentation/trainer.py +31 -0
  417. paddlex/modules/multilabel_classification/__init__.py +18 -0
  418. paddlex/modules/multilabel_classification/dataset_checker/__init__.py +106 -0
  419. paddlex/modules/multilabel_classification/dataset_checker/dataset_src/__init__.py +19 -0
  420. paddlex/modules/multilabel_classification/dataset_checker/dataset_src/analyse_dataset.py +95 -0
  421. paddlex/modules/multilabel_classification/dataset_checker/dataset_src/check_dataset.py +131 -0
  422. paddlex/modules/multilabel_classification/dataset_checker/dataset_src/convert_dataset.py +117 -0
  423. paddlex/modules/multilabel_classification/dataset_checker/dataset_src/split_dataset.py +81 -0
  424. paddlex/modules/multilabel_classification/dataset_checker/dataset_src/utils/__init__.py +13 -0
  425. paddlex/modules/multilabel_classification/dataset_checker/dataset_src/utils/visualizer.py +153 -0
  426. paddlex/modules/multilabel_classification/evaluator.py +43 -0
  427. paddlex/modules/multilabel_classification/exportor.py +22 -0
  428. paddlex/modules/multilabel_classification/model_list.py +24 -0
  429. paddlex/modules/multilabel_classification/trainer.py +85 -0
  430. paddlex/modules/object_detection/__init__.py +18 -0
  431. paddlex/modules/object_detection/dataset_checker/__init__.py +115 -0
  432. paddlex/modules/object_detection/dataset_checker/dataset_src/__init__.py +19 -0
  433. paddlex/modules/object_detection/dataset_checker/dataset_src/analyse_dataset.py +80 -0
  434. paddlex/modules/object_detection/dataset_checker/dataset_src/check_dataset.py +86 -0
  435. paddlex/modules/object_detection/dataset_checker/dataset_src/convert_dataset.py +433 -0
  436. paddlex/modules/object_detection/dataset_checker/dataset_src/split_dataset.py +119 -0
  437. paddlex/modules/object_detection/dataset_checker/dataset_src/utils/__init__.py +13 -0
  438. paddlex/modules/object_detection/dataset_checker/dataset_src/utils/visualizer.py +192 -0
  439. paddlex/modules/object_detection/evaluator.py +41 -0
  440. paddlex/modules/object_detection/exportor.py +22 -0
  441. paddlex/modules/object_detection/model_list.py +74 -0
  442. paddlex/modules/object_detection/trainer.py +85 -0
  443. paddlex/modules/semantic_segmentation/__init__.py +18 -0
  444. paddlex/modules/semantic_segmentation/dataset_checker/__init__.py +95 -0
  445. paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/__init__.py +19 -0
  446. paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/analyse_dataset.py +73 -0
  447. paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/check_dataset.py +80 -0
  448. paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/convert_dataset.py +162 -0
  449. paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/split_dataset.py +87 -0
  450. paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/utils/__init__.py +13 -0
  451. paddlex/modules/semantic_segmentation/dataset_checker/dataset_src/utils/visualizer.py +71 -0
  452. paddlex/modules/semantic_segmentation/evaluator.py +58 -0
  453. paddlex/modules/semantic_segmentation/exportor.py +22 -0
  454. paddlex/modules/semantic_segmentation/model_list.py +35 -0
  455. paddlex/modules/semantic_segmentation/trainer.py +71 -0
  456. paddlex/modules/table_recognition/__init__.py +18 -0
  457. paddlex/modules/table_recognition/dataset_checker/__init__.py +83 -0
  458. paddlex/modules/table_recognition/dataset_checker/dataset_src/__init__.py +18 -0
  459. paddlex/modules/table_recognition/dataset_checker/dataset_src/analyse_dataset.py +58 -0
  460. paddlex/modules/table_recognition/dataset_checker/dataset_src/check_dataset.py +87 -0
  461. paddlex/modules/table_recognition/dataset_checker/dataset_src/split_dataset.py +79 -0
  462. paddlex/modules/table_recognition/evaluator.py +43 -0
  463. paddlex/modules/table_recognition/exportor.py +22 -0
  464. paddlex/modules/table_recognition/model_list.py +19 -0
  465. paddlex/modules/table_recognition/trainer.py +70 -0
  466. paddlex/modules/text_detection/__init__.py +18 -0
  467. paddlex/modules/text_detection/dataset_checker/__init__.py +94 -0
  468. paddlex/modules/text_detection/dataset_checker/dataset_src/__init__.py +18 -0
  469. paddlex/modules/text_detection/dataset_checker/dataset_src/analyse_dataset.py +217 -0
  470. paddlex/modules/text_detection/dataset_checker/dataset_src/check_dataset.py +96 -0
  471. paddlex/modules/text_detection/dataset_checker/dataset_src/split_dataset.py +140 -0
  472. paddlex/modules/text_detection/evaluator.py +41 -0
  473. paddlex/modules/text_detection/exportor.py +22 -0
  474. paddlex/modules/text_detection/model_list.py +22 -0
  475. paddlex/modules/text_detection/trainer.py +68 -0
  476. paddlex/modules/text_recognition/__init__.py +18 -0
  477. paddlex/modules/text_recognition/dataset_checker/__init__.py +114 -0
  478. paddlex/modules/text_recognition/dataset_checker/dataset_src/__init__.py +19 -0
  479. paddlex/modules/text_recognition/dataset_checker/dataset_src/analyse_dataset.py +161 -0
  480. paddlex/modules/text_recognition/dataset_checker/dataset_src/check_dataset.py +97 -0
  481. paddlex/modules/text_recognition/dataset_checker/dataset_src/convert_dataset.py +94 -0
  482. paddlex/modules/text_recognition/dataset_checker/dataset_src/split_dataset.py +81 -0
  483. paddlex/modules/text_recognition/evaluator.py +63 -0
  484. paddlex/modules/text_recognition/exportor.py +25 -0
  485. paddlex/modules/text_recognition/model_list.py +20 -0
  486. paddlex/modules/text_recognition/trainer.py +105 -0
  487. paddlex/modules/ts_anomaly_detection/__init__.py +19 -0
  488. paddlex/modules/ts_anomaly_detection/dataset_checker/__init__.py +97 -0
  489. paddlex/modules/ts_anomaly_detection/dataset_checker/dataset_src/__init__.py +19 -0
  490. paddlex/modules/ts_anomaly_detection/dataset_checker/dataset_src/analyse_dataset.py +27 -0
  491. paddlex/modules/ts_anomaly_detection/dataset_checker/dataset_src/check_dataset.py +64 -0
  492. paddlex/modules/ts_anomaly_detection/dataset_checker/dataset_src/convert_dataset.py +78 -0
  493. paddlex/modules/ts_anomaly_detection/dataset_checker/dataset_src/split_dataset.py +63 -0
  494. paddlex/modules/ts_anomaly_detection/evaluator.py +67 -0
  495. paddlex/modules/ts_anomaly_detection/exportor.py +45 -0
  496. paddlex/modules/ts_anomaly_detection/model_list.py +22 -0
  497. paddlex/modules/ts_anomaly_detection/trainer.py +97 -0
  498. paddlex/modules/ts_classification/__init__.py +19 -0
  499. paddlex/modules/ts_classification/dataset_checker/__init__.py +97 -0
  500. paddlex/modules/ts_classification/dataset_checker/dataset_src/__init__.py +19 -0
  501. paddlex/modules/ts_classification/dataset_checker/dataset_src/analyse_dataset.py +74 -0
  502. paddlex/modules/ts_classification/dataset_checker/dataset_src/check_dataset.py +64 -0
  503. paddlex/modules/ts_classification/dataset_checker/dataset_src/convert_dataset.py +78 -0
  504. paddlex/modules/ts_classification/dataset_checker/dataset_src/split_dataset.py +88 -0
  505. paddlex/modules/ts_classification/evaluator.py +66 -0
  506. paddlex/modules/ts_classification/exportor.py +45 -0
  507. paddlex/modules/ts_classification/model_list.py +18 -0
  508. paddlex/modules/ts_classification/trainer.py +92 -0
  509. paddlex/modules/ts_forecast/__init__.py +19 -0
  510. paddlex/modules/ts_forecast/dataset_checker/__init__.py +97 -0
  511. paddlex/modules/ts_forecast/dataset_checker/dataset_src/__init__.py +19 -0
  512. paddlex/modules/ts_forecast/dataset_checker/dataset_src/analyse_dataset.py +27 -0
  513. paddlex/modules/ts_forecast/dataset_checker/dataset_src/check_dataset.py +64 -0
  514. paddlex/modules/ts_forecast/dataset_checker/dataset_src/convert_dataset.py +77 -0
  515. paddlex/modules/ts_forecast/dataset_checker/dataset_src/split_dataset.py +63 -0
  516. paddlex/modules/ts_forecast/evaluator.py +66 -0
  517. paddlex/modules/ts_forecast/exportor.py +45 -0
  518. paddlex/modules/ts_forecast/model_list.py +24 -0
  519. paddlex/modules/ts_forecast/trainer.py +92 -0
  520. paddlex/paddlex_cli.py +197 -0
  521. paddlex/pipelines/OCR.yaml +8 -0
  522. paddlex/pipelines/PP-ChatOCRv3-doc.yaml +27 -0
  523. paddlex/pipelines/PP-ShiTuV2.yaml +13 -0
  524. paddlex/pipelines/anomaly_detection.yaml +7 -0
  525. paddlex/pipelines/face_recognition.yaml +13 -0
  526. paddlex/pipelines/formula_recognition.yaml +8 -0
  527. paddlex/pipelines/image_classification.yaml +7 -0
  528. paddlex/pipelines/instance_segmentation.yaml +7 -0
  529. paddlex/pipelines/layout_parsing.yaml +14 -0
  530. paddlex/pipelines/multi_label_image_classification.yaml +7 -0
  531. paddlex/pipelines/object_detection.yaml +7 -0
  532. paddlex/pipelines/pedestrian_attribute_recognition.yaml +7 -0
  533. paddlex/pipelines/seal_recognition.yaml +10 -0
  534. paddlex/pipelines/semantic_segmentation.yaml +7 -0
  535. paddlex/pipelines/small_object_detection.yaml +7 -0
  536. paddlex/pipelines/table_recognition.yaml +12 -0
  537. paddlex/pipelines/ts_ad.yaml +7 -0
  538. paddlex/pipelines/ts_cls.yaml +7 -0
  539. paddlex/pipelines/ts_fc.yaml +7 -0
  540. paddlex/pipelines/vehicle_attribute_recognition.yaml +7 -0
  541. paddlex/repo_apis/PaddleClas_api/__init__.py +17 -0
  542. paddlex/repo_apis/PaddleClas_api/cls/__init__.py +19 -0
  543. paddlex/repo_apis/PaddleClas_api/cls/config.py +594 -0
  544. paddlex/repo_apis/PaddleClas_api/cls/model.py +349 -0
  545. paddlex/repo_apis/PaddleClas_api/cls/register.py +890 -0
  546. paddlex/repo_apis/PaddleClas_api/cls/runner.py +219 -0
  547. paddlex/repo_apis/PaddleClas_api/shitu_rec/__init__.py +18 -0
  548. paddlex/repo_apis/PaddleClas_api/shitu_rec/config.py +141 -0
  549. paddlex/repo_apis/PaddleClas_api/shitu_rec/model.py +23 -0
  550. paddlex/repo_apis/PaddleClas_api/shitu_rec/register.py +68 -0
  551. paddlex/repo_apis/PaddleClas_api/shitu_rec/runner.py +55 -0
  552. paddlex/repo_apis/PaddleDetection_api/__init__.py +17 -0
  553. paddlex/repo_apis/PaddleDetection_api/config_helper.py +280 -0
  554. paddlex/repo_apis/PaddleDetection_api/instance_seg/__init__.py +18 -0
  555. paddlex/repo_apis/PaddleDetection_api/instance_seg/config.py +454 -0
  556. paddlex/repo_apis/PaddleDetection_api/instance_seg/model.py +397 -0
  557. paddlex/repo_apis/PaddleDetection_api/instance_seg/register.py +263 -0
  558. paddlex/repo_apis/PaddleDetection_api/instance_seg/runner.py +226 -0
  559. paddlex/repo_apis/PaddleDetection_api/object_det/__init__.py +19 -0
  560. paddlex/repo_apis/PaddleDetection_api/object_det/config.py +517 -0
  561. paddlex/repo_apis/PaddleDetection_api/object_det/model.py +424 -0
  562. paddlex/repo_apis/PaddleDetection_api/object_det/official_categories.py +139 -0
  563. paddlex/repo_apis/PaddleDetection_api/object_det/register.py +927 -0
  564. paddlex/repo_apis/PaddleDetection_api/object_det/runner.py +226 -0
  565. paddlex/repo_apis/PaddleNLP_api/__init__.py +13 -0
  566. paddlex/repo_apis/PaddleOCR_api/__init__.py +20 -0
  567. paddlex/repo_apis/PaddleOCR_api/config_utils.py +53 -0
  568. paddlex/repo_apis/PaddleOCR_api/table_rec/__init__.py +16 -0
  569. paddlex/repo_apis/PaddleOCR_api/table_rec/config.py +64 -0
  570. paddlex/repo_apis/PaddleOCR_api/table_rec/model.py +126 -0
  571. paddlex/repo_apis/PaddleOCR_api/table_rec/register.py +53 -0
  572. paddlex/repo_apis/PaddleOCR_api/table_rec/runner.py +51 -0
  573. paddlex/repo_apis/PaddleOCR_api/text_det/__init__.py +16 -0
  574. paddlex/repo_apis/PaddleOCR_api/text_det/config.py +62 -0
  575. paddlex/repo_apis/PaddleOCR_api/text_det/model.py +72 -0
  576. paddlex/repo_apis/PaddleOCR_api/text_det/register.py +72 -0
  577. paddlex/repo_apis/PaddleOCR_api/text_det/runner.py +53 -0
  578. paddlex/repo_apis/PaddleOCR_api/text_rec/__init__.py +16 -0
  579. paddlex/repo_apis/PaddleOCR_api/text_rec/config.py +542 -0
  580. paddlex/repo_apis/PaddleOCR_api/text_rec/model.py +396 -0
  581. paddlex/repo_apis/PaddleOCR_api/text_rec/register.py +80 -0
  582. paddlex/repo_apis/PaddleOCR_api/text_rec/runner.py +240 -0
  583. paddlex/repo_apis/PaddleSeg_api/__init__.py +16 -0
  584. paddlex/repo_apis/PaddleSeg_api/base_seg_config.py +134 -0
  585. paddlex/repo_apis/PaddleSeg_api/seg/__init__.py +16 -0
  586. paddlex/repo_apis/PaddleSeg_api/seg/config.py +177 -0
  587. paddlex/repo_apis/PaddleSeg_api/seg/model.py +481 -0
  588. paddlex/repo_apis/PaddleSeg_api/seg/register.py +253 -0
  589. paddlex/repo_apis/PaddleSeg_api/seg/runner.py +262 -0
  590. paddlex/repo_apis/PaddleTS_api/__init__.py +19 -0
  591. paddlex/repo_apis/PaddleTS_api/ts_ad/__init__.py +16 -0
  592. paddlex/repo_apis/PaddleTS_api/ts_ad/config.py +89 -0
  593. paddlex/repo_apis/PaddleTS_api/ts_ad/register.py +146 -0
  594. paddlex/repo_apis/PaddleTS_api/ts_ad/runner.py +158 -0
  595. paddlex/repo_apis/PaddleTS_api/ts_base/__init__.py +13 -0
  596. paddlex/repo_apis/PaddleTS_api/ts_base/config.py +222 -0
  597. paddlex/repo_apis/PaddleTS_api/ts_base/model.py +272 -0
  598. paddlex/repo_apis/PaddleTS_api/ts_base/runner.py +158 -0
  599. paddlex/repo_apis/PaddleTS_api/ts_cls/__init__.py +16 -0
  600. paddlex/repo_apis/PaddleTS_api/ts_cls/config.py +73 -0
  601. paddlex/repo_apis/PaddleTS_api/ts_cls/register.py +59 -0
  602. paddlex/repo_apis/PaddleTS_api/ts_cls/runner.py +158 -0
  603. paddlex/repo_apis/PaddleTS_api/ts_fc/__init__.py +16 -0
  604. paddlex/repo_apis/PaddleTS_api/ts_fc/config.py +137 -0
  605. paddlex/repo_apis/PaddleTS_api/ts_fc/register.py +186 -0
  606. paddlex/repo_apis/__init__.py +13 -0
  607. paddlex/repo_apis/base/__init__.py +23 -0
  608. paddlex/repo_apis/base/config.py +238 -0
  609. paddlex/repo_apis/base/model.py +571 -0
  610. paddlex/repo_apis/base/register.py +135 -0
  611. paddlex/repo_apis/base/runner.py +390 -0
  612. paddlex/repo_apis/base/utils/__init__.py +13 -0
  613. paddlex/repo_apis/base/utils/arg.py +64 -0
  614. paddlex/repo_apis/base/utils/subprocess.py +107 -0
  615. paddlex/repo_manager/__init__.py +24 -0
  616. paddlex/repo_manager/core.py +271 -0
  617. paddlex/repo_manager/meta.py +143 -0
  618. paddlex/repo_manager/repo.py +396 -0
  619. paddlex/repo_manager/requirements.txt +18 -0
  620. paddlex/repo_manager/utils.py +298 -0
  621. paddlex/utils/__init__.py +1 -12
  622. paddlex/utils/cache.py +148 -0
  623. paddlex/utils/config.py +214 -0
  624. paddlex/utils/device.py +103 -0
  625. paddlex/utils/download.py +168 -182
  626. paddlex/utils/errors/__init__.py +17 -0
  627. paddlex/utils/errors/dataset_checker.py +78 -0
  628. paddlex/utils/errors/others.py +152 -0
  629. paddlex/utils/file_interface.py +212 -0
  630. paddlex/utils/flags.py +61 -0
  631. paddlex/utils/fonts/PingFang-SC-Regular.ttf +0 -0
  632. paddlex/utils/fonts/__init__.py +24 -0
  633. paddlex/utils/func_register.py +41 -0
  634. paddlex/utils/interactive_get_pipeline.py +55 -0
  635. paddlex/utils/lazy_loader.py +66 -0
  636. paddlex/utils/logging.py +132 -33
  637. paddlex/utils/misc.py +201 -0
  638. paddlex/utils/result_saver.py +59 -0
  639. paddlex/utils/subclass_register.py +101 -0
  640. paddlex/version.py +54 -0
  641. paddlex-3.0.0b2.dist-info/LICENSE +169 -0
  642. paddlex-3.0.0b2.dist-info/METADATA +760 -0
  643. paddlex-3.0.0b2.dist-info/RECORD +646 -0
  644. paddlex-3.0.0b2.dist-info/WHEEL +5 -0
  645. paddlex-3.0.0b2.dist-info/entry_points.txt +2 -0
  646. paddlex-3.0.0b2.dist-info/top_level.txt +1 -0
  647. PaddleClas/__init__.py +0 -16
  648. PaddleClas/paddleclas.py +0 -375
  649. PaddleClas/ppcls/__init__.py +0 -20
  650. PaddleClas/ppcls/data/__init__.py +0 -15
  651. PaddleClas/ppcls/data/imaug/__init__.py +0 -94
  652. PaddleClas/ppcls/data/imaug/autoaugment.py +0 -264
  653. PaddleClas/ppcls/data/imaug/batch_operators.py +0 -117
  654. PaddleClas/ppcls/data/imaug/cutout.py +0 -41
  655. PaddleClas/ppcls/data/imaug/fmix.py +0 -217
  656. PaddleClas/ppcls/data/imaug/grid.py +0 -89
  657. PaddleClas/ppcls/data/imaug/hide_and_seek.py +0 -44
  658. PaddleClas/ppcls/data/imaug/operators.py +0 -244
  659. PaddleClas/ppcls/data/imaug/randaugment.py +0 -106
  660. PaddleClas/ppcls/data/imaug/random_erasing.py +0 -55
  661. PaddleClas/ppcls/data/reader.py +0 -318
  662. PaddleClas/ppcls/modeling/__init__.py +0 -20
  663. PaddleClas/ppcls/modeling/architectures/__init__.py +0 -51
  664. PaddleClas/ppcls/modeling/architectures/alexnet.py +0 -132
  665. PaddleClas/ppcls/modeling/architectures/darknet.py +0 -161
  666. PaddleClas/ppcls/modeling/architectures/densenet.py +0 -308
  667. PaddleClas/ppcls/modeling/architectures/distillation_models.py +0 -65
  668. PaddleClas/ppcls/modeling/architectures/distilled_vision_transformer.py +0 -196
  669. PaddleClas/ppcls/modeling/architectures/dpn.py +0 -425
  670. PaddleClas/ppcls/modeling/architectures/efficientnet.py +0 -901
  671. PaddleClas/ppcls/modeling/architectures/ghostnet.py +0 -331
  672. PaddleClas/ppcls/modeling/architectures/googlenet.py +0 -207
  673. PaddleClas/ppcls/modeling/architectures/hrnet.py +0 -742
  674. PaddleClas/ppcls/modeling/architectures/inception_v3.py +0 -481
  675. PaddleClas/ppcls/modeling/architectures/inception_v4.py +0 -455
  676. PaddleClas/ppcls/modeling/architectures/mixnet.py +0 -782
  677. PaddleClas/ppcls/modeling/architectures/mobilenet_v1.py +0 -266
  678. PaddleClas/ppcls/modeling/architectures/mobilenet_v2.py +0 -248
  679. PaddleClas/ppcls/modeling/architectures/mobilenet_v3.py +0 -359
  680. PaddleClas/ppcls/modeling/architectures/regnet.py +0 -383
  681. PaddleClas/ppcls/modeling/architectures/repvgg.py +0 -339
  682. PaddleClas/ppcls/modeling/architectures/res2net.py +0 -272
  683. PaddleClas/ppcls/modeling/architectures/res2net_vd.py +0 -295
  684. PaddleClas/ppcls/modeling/architectures/resnest.py +0 -705
  685. PaddleClas/ppcls/modeling/architectures/resnet.py +0 -316
  686. PaddleClas/ppcls/modeling/architectures/resnet_vc.py +0 -309
  687. PaddleClas/ppcls/modeling/architectures/resnet_vd.py +0 -354
  688. PaddleClas/ppcls/modeling/architectures/resnext.py +0 -253
  689. PaddleClas/ppcls/modeling/architectures/resnext101_wsl.py +0 -447
  690. PaddleClas/ppcls/modeling/architectures/resnext_vd.py +0 -266
  691. PaddleClas/ppcls/modeling/architectures/rexnet.py +0 -240
  692. PaddleClas/ppcls/modeling/architectures/se_resnet_vd.py +0 -378
  693. PaddleClas/ppcls/modeling/architectures/se_resnext.py +0 -290
  694. PaddleClas/ppcls/modeling/architectures/se_resnext_vd.py +0 -285
  695. PaddleClas/ppcls/modeling/architectures/shufflenet_v2.py +0 -320
  696. PaddleClas/ppcls/modeling/architectures/squeezenet.py +0 -154
  697. PaddleClas/ppcls/modeling/architectures/vgg.py +0 -152
  698. PaddleClas/ppcls/modeling/architectures/vision_transformer.py +0 -402
  699. PaddleClas/ppcls/modeling/architectures/xception.py +0 -345
  700. PaddleClas/ppcls/modeling/architectures/xception_deeplab.py +0 -386
  701. PaddleClas/ppcls/modeling/loss.py +0 -154
  702. PaddleClas/ppcls/modeling/utils.py +0 -53
  703. PaddleClas/ppcls/optimizer/__init__.py +0 -19
  704. PaddleClas/ppcls/optimizer/learning_rate.py +0 -159
  705. PaddleClas/ppcls/optimizer/optimizer.py +0 -165
  706. PaddleClas/ppcls/utils/__init__.py +0 -27
  707. PaddleClas/ppcls/utils/check.py +0 -151
  708. PaddleClas/ppcls/utils/config.py +0 -201
  709. PaddleClas/ppcls/utils/logger.py +0 -120
  710. PaddleClas/ppcls/utils/metrics.py +0 -107
  711. PaddleClas/ppcls/utils/misc.py +0 -62
  712. PaddleClas/ppcls/utils/model_zoo.py +0 -213
  713. PaddleClas/ppcls/utils/save_load.py +0 -163
  714. PaddleClas/setup.py +0 -55
  715. PaddleClas/tools/__init__.py +0 -15
  716. PaddleClas/tools/download.py +0 -50
  717. PaddleClas/tools/ema.py +0 -58
  718. PaddleClas/tools/eval.py +0 -112
  719. PaddleClas/tools/export_model.py +0 -85
  720. PaddleClas/tools/export_serving_model.py +0 -76
  721. PaddleClas/tools/infer/__init__.py +0 -16
  722. PaddleClas/tools/infer/infer.py +0 -94
  723. PaddleClas/tools/infer/predict.py +0 -117
  724. PaddleClas/tools/infer/utils.py +0 -233
  725. PaddleClas/tools/program.py +0 -444
  726. PaddleClas/tools/test_hubserving.py +0 -113
  727. PaddleClas/tools/train.py +0 -141
  728. paddlex/cls.py +0 -76
  729. paddlex/command.py +0 -215
  730. paddlex/cv/__init__.py +0 -17
  731. paddlex/cv/datasets/__init__.py +0 -18
  732. paddlex/cv/datasets/coco.py +0 -169
  733. paddlex/cv/datasets/imagenet.py +0 -88
  734. paddlex/cv/datasets/seg_dataset.py +0 -91
  735. paddlex/cv/datasets/voc.py +0 -301
  736. paddlex/cv/models/__init__.py +0 -18
  737. paddlex/cv/models/base.py +0 -623
  738. paddlex/cv/models/classifier.py +0 -814
  739. paddlex/cv/models/detector.py +0 -1747
  740. paddlex/cv/models/load_model.py +0 -126
  741. paddlex/cv/models/segmenter.py +0 -673
  742. paddlex/cv/models/slim/__init__.py +0 -13
  743. paddlex/cv/models/slim/prune.py +0 -55
  744. paddlex/cv/models/utils/__init__.py +0 -13
  745. paddlex/cv/models/utils/det_metrics/__init__.py +0 -15
  746. paddlex/cv/models/utils/det_metrics/coco_utils.py +0 -217
  747. paddlex/cv/models/utils/det_metrics/metrics.py +0 -220
  748. paddlex/cv/models/utils/ema.py +0 -48
  749. paddlex/cv/models/utils/seg_metrics.py +0 -62
  750. paddlex/cv/models/utils/visualize.py +0 -394
  751. paddlex/cv/transforms/__init__.py +0 -46
  752. paddlex/cv/transforms/batch_operators.py +0 -286
  753. paddlex/cv/transforms/box_utils.py +0 -41
  754. paddlex/cv/transforms/functions.py +0 -193
  755. paddlex/cv/transforms/operators.py +0 -1402
  756. paddlex/det.py +0 -43
  757. paddlex/paddleseg/__init__.py +0 -17
  758. paddlex/paddleseg/core/__init__.py +0 -20
  759. paddlex/paddleseg/core/infer.py +0 -289
  760. paddlex/paddleseg/core/predict.py +0 -145
  761. paddlex/paddleseg/core/train.py +0 -258
  762. paddlex/paddleseg/core/val.py +0 -172
  763. paddlex/paddleseg/cvlibs/__init__.py +0 -17
  764. paddlex/paddleseg/cvlibs/callbacks.py +0 -279
  765. paddlex/paddleseg/cvlibs/config.py +0 -359
  766. paddlex/paddleseg/cvlibs/manager.py +0 -142
  767. paddlex/paddleseg/cvlibs/param_init.py +0 -91
  768. paddlex/paddleseg/datasets/__init__.py +0 -21
  769. paddlex/paddleseg/datasets/ade.py +0 -112
  770. paddlex/paddleseg/datasets/cityscapes.py +0 -86
  771. paddlex/paddleseg/datasets/cocostuff.py +0 -79
  772. paddlex/paddleseg/datasets/dataset.py +0 -164
  773. paddlex/paddleseg/datasets/mini_deep_globe_road_extraction.py +0 -95
  774. paddlex/paddleseg/datasets/optic_disc_seg.py +0 -97
  775. paddlex/paddleseg/datasets/pascal_context.py +0 -80
  776. paddlex/paddleseg/datasets/voc.py +0 -113
  777. paddlex/paddleseg/models/__init__.py +0 -39
  778. paddlex/paddleseg/models/ann.py +0 -436
  779. paddlex/paddleseg/models/attention_unet.py +0 -189
  780. paddlex/paddleseg/models/backbones/__init__.py +0 -18
  781. paddlex/paddleseg/models/backbones/hrnet.py +0 -815
  782. paddlex/paddleseg/models/backbones/mobilenetv3.py +0 -365
  783. paddlex/paddleseg/models/backbones/resnet_vd.py +0 -364
  784. paddlex/paddleseg/models/backbones/xception_deeplab.py +0 -415
  785. paddlex/paddleseg/models/bisenet.py +0 -311
  786. paddlex/paddleseg/models/danet.py +0 -220
  787. paddlex/paddleseg/models/decoupled_segnet.py +0 -233
  788. paddlex/paddleseg/models/deeplab.py +0 -258
  789. paddlex/paddleseg/models/dnlnet.py +0 -231
  790. paddlex/paddleseg/models/emanet.py +0 -219
  791. paddlex/paddleseg/models/fast_scnn.py +0 -318
  792. paddlex/paddleseg/models/fcn.py +0 -135
  793. paddlex/paddleseg/models/gcnet.py +0 -223
  794. paddlex/paddleseg/models/gscnn.py +0 -357
  795. paddlex/paddleseg/models/hardnet.py +0 -309
  796. paddlex/paddleseg/models/isanet.py +0 -202
  797. paddlex/paddleseg/models/layers/__init__.py +0 -19
  798. paddlex/paddleseg/models/layers/activation.py +0 -73
  799. paddlex/paddleseg/models/layers/attention.py +0 -146
  800. paddlex/paddleseg/models/layers/layer_libs.py +0 -168
  801. paddlex/paddleseg/models/layers/nonlocal2d.py +0 -155
  802. paddlex/paddleseg/models/layers/pyramid_pool.py +0 -182
  803. paddlex/paddleseg/models/losses/__init__.py +0 -27
  804. paddlex/paddleseg/models/losses/binary_cross_entropy_loss.py +0 -174
  805. paddlex/paddleseg/models/losses/bootstrapped_cross_entropy.py +0 -73
  806. paddlex/paddleseg/models/losses/cross_entropy_loss.py +0 -94
  807. paddlex/paddleseg/models/losses/decoupledsegnet_relax_boundary_loss.py +0 -129
  808. paddlex/paddleseg/models/losses/dice_loss.py +0 -61
  809. paddlex/paddleseg/models/losses/edge_attention_loss.py +0 -78
  810. paddlex/paddleseg/models/losses/gscnn_dual_task_loss.py +0 -141
  811. paddlex/paddleseg/models/losses/l1_loss.py +0 -76
  812. paddlex/paddleseg/models/losses/lovasz_loss.py +0 -222
  813. paddlex/paddleseg/models/losses/mean_square_error_loss.py +0 -65
  814. paddlex/paddleseg/models/losses/mixed_loss.py +0 -58
  815. paddlex/paddleseg/models/losses/ohem_cross_entropy_loss.py +0 -99
  816. paddlex/paddleseg/models/losses/ohem_edge_attention_loss.py +0 -114
  817. paddlex/paddleseg/models/ocrnet.py +0 -248
  818. paddlex/paddleseg/models/pspnet.py +0 -147
  819. paddlex/paddleseg/models/sfnet.py +0 -236
  820. paddlex/paddleseg/models/shufflenet_slim.py +0 -268
  821. paddlex/paddleseg/models/u2net.py +0 -574
  822. paddlex/paddleseg/models/unet.py +0 -155
  823. paddlex/paddleseg/models/unet_3plus.py +0 -316
  824. paddlex/paddleseg/models/unet_plusplus.py +0 -237
  825. paddlex/paddleseg/transforms/__init__.py +0 -16
  826. paddlex/paddleseg/transforms/functional.py +0 -161
  827. paddlex/paddleseg/transforms/transforms.py +0 -937
  828. paddlex/paddleseg/utils/__init__.py +0 -22
  829. paddlex/paddleseg/utils/config_check.py +0 -60
  830. paddlex/paddleseg/utils/download.py +0 -163
  831. paddlex/paddleseg/utils/env/__init__.py +0 -16
  832. paddlex/paddleseg/utils/env/seg_env.py +0 -56
  833. paddlex/paddleseg/utils/env/sys_env.py +0 -122
  834. paddlex/paddleseg/utils/logger.py +0 -48
  835. paddlex/paddleseg/utils/metrics.py +0 -146
  836. paddlex/paddleseg/utils/progbar.py +0 -212
  837. paddlex/paddleseg/utils/timer.py +0 -53
  838. paddlex/paddleseg/utils/utils.py +0 -120
  839. paddlex/paddleseg/utils/visualize.py +0 -90
  840. paddlex/ppcls/__init__.py +0 -20
  841. paddlex/ppcls/data/__init__.py +0 -15
  842. paddlex/ppcls/data/imaug/__init__.py +0 -94
  843. paddlex/ppcls/data/imaug/autoaugment.py +0 -264
  844. paddlex/ppcls/data/imaug/batch_operators.py +0 -117
  845. paddlex/ppcls/data/imaug/cutout.py +0 -41
  846. paddlex/ppcls/data/imaug/fmix.py +0 -217
  847. paddlex/ppcls/data/imaug/grid.py +0 -89
  848. paddlex/ppcls/data/imaug/hide_and_seek.py +0 -44
  849. paddlex/ppcls/data/imaug/operators.py +0 -256
  850. paddlex/ppcls/data/imaug/randaugment.py +0 -106
  851. paddlex/ppcls/data/imaug/random_erasing.py +0 -55
  852. paddlex/ppcls/data/reader.py +0 -318
  853. paddlex/ppcls/modeling/__init__.py +0 -20
  854. paddlex/ppcls/modeling/architectures/__init__.py +0 -51
  855. paddlex/ppcls/modeling/architectures/alexnet.py +0 -132
  856. paddlex/ppcls/modeling/architectures/darknet.py +0 -161
  857. paddlex/ppcls/modeling/architectures/densenet.py +0 -308
  858. paddlex/ppcls/modeling/architectures/distillation_models.py +0 -65
  859. paddlex/ppcls/modeling/architectures/distilled_vision_transformer.py +0 -196
  860. paddlex/ppcls/modeling/architectures/dpn.py +0 -425
  861. paddlex/ppcls/modeling/architectures/efficientnet.py +0 -901
  862. paddlex/ppcls/modeling/architectures/ghostnet.py +0 -331
  863. paddlex/ppcls/modeling/architectures/googlenet.py +0 -207
  864. paddlex/ppcls/modeling/architectures/hrnet.py +0 -742
  865. paddlex/ppcls/modeling/architectures/inception_v3.py +0 -541
  866. paddlex/ppcls/modeling/architectures/inception_v4.py +0 -455
  867. paddlex/ppcls/modeling/architectures/mixnet.py +0 -782
  868. paddlex/ppcls/modeling/architectures/mobilenet_v1.py +0 -266
  869. paddlex/ppcls/modeling/architectures/mobilenet_v2.py +0 -248
  870. paddlex/ppcls/modeling/architectures/mobilenet_v3.py +0 -359
  871. paddlex/ppcls/modeling/architectures/regnet.py +0 -383
  872. paddlex/ppcls/modeling/architectures/repvgg.py +0 -339
  873. paddlex/ppcls/modeling/architectures/res2net.py +0 -272
  874. paddlex/ppcls/modeling/architectures/res2net_vd.py +0 -295
  875. paddlex/ppcls/modeling/architectures/resnest.py +0 -705
  876. paddlex/ppcls/modeling/architectures/resnet.py +0 -317
  877. paddlex/ppcls/modeling/architectures/resnet_vc.py +0 -309
  878. paddlex/ppcls/modeling/architectures/resnet_vd.py +0 -354
  879. paddlex/ppcls/modeling/architectures/resnext.py +0 -259
  880. paddlex/ppcls/modeling/architectures/resnext101_wsl.py +0 -447
  881. paddlex/ppcls/modeling/architectures/resnext_vd.py +0 -266
  882. paddlex/ppcls/modeling/architectures/rexnet.py +0 -240
  883. paddlex/ppcls/modeling/architectures/se_resnet_vd.py +0 -378
  884. paddlex/ppcls/modeling/architectures/se_resnext.py +0 -290
  885. paddlex/ppcls/modeling/architectures/se_resnext_vd.py +0 -285
  886. paddlex/ppcls/modeling/architectures/shufflenet_v2.py +0 -320
  887. paddlex/ppcls/modeling/architectures/squeezenet.py +0 -154
  888. paddlex/ppcls/modeling/architectures/vgg.py +0 -152
  889. paddlex/ppcls/modeling/architectures/vision_transformer.py +0 -402
  890. paddlex/ppcls/modeling/architectures/xception.py +0 -345
  891. paddlex/ppcls/modeling/architectures/xception_deeplab.py +0 -386
  892. paddlex/ppcls/modeling/loss.py +0 -158
  893. paddlex/ppcls/modeling/utils.py +0 -53
  894. paddlex/ppcls/optimizer/__init__.py +0 -19
  895. paddlex/ppcls/optimizer/learning_rate.py +0 -159
  896. paddlex/ppcls/optimizer/optimizer.py +0 -165
  897. paddlex/ppcls/utils/__init__.py +0 -27
  898. paddlex/ppcls/utils/check.py +0 -151
  899. paddlex/ppcls/utils/config.py +0 -201
  900. paddlex/ppcls/utils/logger.py +0 -120
  901. paddlex/ppcls/utils/metrics.py +0 -112
  902. paddlex/ppcls/utils/misc.py +0 -62
  903. paddlex/ppcls/utils/model_zoo.py +0 -213
  904. paddlex/ppcls/utils/save_load.py +0 -163
  905. paddlex/ppdet/__init__.py +0 -16
  906. paddlex/ppdet/core/__init__.py +0 -15
  907. paddlex/ppdet/core/config/__init__.py +0 -13
  908. paddlex/ppdet/core/config/schema.py +0 -248
  909. paddlex/ppdet/core/config/yaml_helpers.py +0 -118
  910. paddlex/ppdet/core/workspace.py +0 -279
  911. paddlex/ppdet/data/__init__.py +0 -21
  912. paddlex/ppdet/data/reader.py +0 -304
  913. paddlex/ppdet/data/shm_utils.py +0 -67
  914. paddlex/ppdet/data/source/__init__.py +0 -27
  915. paddlex/ppdet/data/source/category.py +0 -823
  916. paddlex/ppdet/data/source/coco.py +0 -243
  917. paddlex/ppdet/data/source/dataset.py +0 -192
  918. paddlex/ppdet/data/source/keypoint_coco.py +0 -656
  919. paddlex/ppdet/data/source/mot.py +0 -360
  920. paddlex/ppdet/data/source/voc.py +0 -204
  921. paddlex/ppdet/data/source/widerface.py +0 -180
  922. paddlex/ppdet/data/transform/__init__.py +0 -28
  923. paddlex/ppdet/data/transform/autoaugment_utils.py +0 -1593
  924. paddlex/ppdet/data/transform/batch_operators.py +0 -758
  925. paddlex/ppdet/data/transform/gridmask_utils.py +0 -83
  926. paddlex/ppdet/data/transform/keypoint_operators.py +0 -665
  927. paddlex/ppdet/data/transform/mot_operators.py +0 -636
  928. paddlex/ppdet/data/transform/op_helper.py +0 -468
  929. paddlex/ppdet/data/transform/operators.py +0 -2103
  930. paddlex/ppdet/engine/__init__.py +0 -29
  931. paddlex/ppdet/engine/callbacks.py +0 -262
  932. paddlex/ppdet/engine/env.py +0 -47
  933. paddlex/ppdet/engine/export_utils.py +0 -118
  934. paddlex/ppdet/engine/tracker.py +0 -425
  935. paddlex/ppdet/engine/trainer.py +0 -535
  936. paddlex/ppdet/metrics/__init__.py +0 -23
  937. paddlex/ppdet/metrics/coco_utils.py +0 -184
  938. paddlex/ppdet/metrics/json_results.py +0 -151
  939. paddlex/ppdet/metrics/keypoint_metrics.py +0 -202
  940. paddlex/ppdet/metrics/map_utils.py +0 -396
  941. paddlex/ppdet/metrics/metrics.py +0 -300
  942. paddlex/ppdet/metrics/mot_eval_utils.py +0 -192
  943. paddlex/ppdet/metrics/mot_metrics.py +0 -184
  944. paddlex/ppdet/metrics/widerface_utils.py +0 -393
  945. paddlex/ppdet/model_zoo/__init__.py +0 -18
  946. paddlex/ppdet/model_zoo/model_zoo.py +0 -86
  947. paddlex/ppdet/model_zoo/tests/__init__.py +0 -13
  948. paddlex/ppdet/model_zoo/tests/test_get_model.py +0 -48
  949. paddlex/ppdet/model_zoo/tests/test_list_model.py +0 -68
  950. paddlex/ppdet/modeling/__init__.py +0 -41
  951. paddlex/ppdet/modeling/architectures/__init__.py +0 -40
  952. paddlex/ppdet/modeling/architectures/cascade_rcnn.py +0 -144
  953. paddlex/ppdet/modeling/architectures/centernet.py +0 -103
  954. paddlex/ppdet/modeling/architectures/deepsort.py +0 -111
  955. paddlex/ppdet/modeling/architectures/fairmot.py +0 -107
  956. paddlex/ppdet/modeling/architectures/faster_rcnn.py +0 -106
  957. paddlex/ppdet/modeling/architectures/fcos.py +0 -105
  958. paddlex/ppdet/modeling/architectures/jde.py +0 -125
  959. paddlex/ppdet/modeling/architectures/keypoint_hrhrnet.py +0 -286
  960. paddlex/ppdet/modeling/architectures/keypoint_hrnet.py +0 -203
  961. paddlex/ppdet/modeling/architectures/mask_rcnn.py +0 -135
  962. paddlex/ppdet/modeling/architectures/meta_arch.py +0 -45
  963. paddlex/ppdet/modeling/architectures/s2anet.py +0 -103
  964. paddlex/ppdet/modeling/architectures/solov2.py +0 -110
  965. paddlex/ppdet/modeling/architectures/ssd.py +0 -84
  966. paddlex/ppdet/modeling/architectures/ttfnet.py +0 -98
  967. paddlex/ppdet/modeling/architectures/yolo.py +0 -104
  968. paddlex/ppdet/modeling/backbones/__init__.py +0 -37
  969. paddlex/ppdet/modeling/backbones/blazenet.py +0 -322
  970. paddlex/ppdet/modeling/backbones/darknet.py +0 -341
  971. paddlex/ppdet/modeling/backbones/dla.py +0 -244
  972. paddlex/ppdet/modeling/backbones/ghostnet.py +0 -476
  973. paddlex/ppdet/modeling/backbones/hrnet.py +0 -724
  974. paddlex/ppdet/modeling/backbones/mobilenet_v1.py +0 -410
  975. paddlex/ppdet/modeling/backbones/mobilenet_v3.py +0 -497
  976. paddlex/ppdet/modeling/backbones/name_adapter.py +0 -69
  977. paddlex/ppdet/modeling/backbones/res2net.py +0 -358
  978. paddlex/ppdet/modeling/backbones/resnet.py +0 -606
  979. paddlex/ppdet/modeling/backbones/senet.py +0 -140
  980. paddlex/ppdet/modeling/backbones/vgg.py +0 -216
  981. paddlex/ppdet/modeling/bbox_utils.py +0 -464
  982. paddlex/ppdet/modeling/heads/__init__.py +0 -41
  983. paddlex/ppdet/modeling/heads/bbox_head.py +0 -379
  984. paddlex/ppdet/modeling/heads/cascade_head.py +0 -285
  985. paddlex/ppdet/modeling/heads/centernet_head.py +0 -194
  986. paddlex/ppdet/modeling/heads/face_head.py +0 -113
  987. paddlex/ppdet/modeling/heads/fcos_head.py +0 -270
  988. paddlex/ppdet/modeling/heads/keypoint_hrhrnet_head.py +0 -108
  989. paddlex/ppdet/modeling/heads/mask_head.py +0 -253
  990. paddlex/ppdet/modeling/heads/roi_extractor.py +0 -111
  991. paddlex/ppdet/modeling/heads/s2anet_head.py +0 -845
  992. paddlex/ppdet/modeling/heads/solov2_head.py +0 -537
  993. paddlex/ppdet/modeling/heads/ssd_head.py +0 -175
  994. paddlex/ppdet/modeling/heads/ttf_head.py +0 -314
  995. paddlex/ppdet/modeling/heads/yolo_head.py +0 -124
  996. paddlex/ppdet/modeling/keypoint_utils.py +0 -302
  997. paddlex/ppdet/modeling/layers.py +0 -1142
  998. paddlex/ppdet/modeling/losses/__init__.py +0 -35
  999. paddlex/ppdet/modeling/losses/ctfocal_loss.py +0 -67
  1000. paddlex/ppdet/modeling/losses/fairmot_loss.py +0 -41
  1001. paddlex/ppdet/modeling/losses/fcos_loss.py +0 -225
  1002. paddlex/ppdet/modeling/losses/iou_aware_loss.py +0 -48
  1003. paddlex/ppdet/modeling/losses/iou_loss.py +0 -210
  1004. paddlex/ppdet/modeling/losses/jde_loss.py +0 -182
  1005. paddlex/ppdet/modeling/losses/keypoint_loss.py +0 -228
  1006. paddlex/ppdet/modeling/losses/solov2_loss.py +0 -101
  1007. paddlex/ppdet/modeling/losses/ssd_loss.py +0 -163
  1008. paddlex/ppdet/modeling/losses/yolo_loss.py +0 -212
  1009. paddlex/ppdet/modeling/mot/__init__.py +0 -25
  1010. paddlex/ppdet/modeling/mot/matching/__init__.py +0 -19
  1011. paddlex/ppdet/modeling/mot/matching/deepsort_matching.py +0 -382
  1012. paddlex/ppdet/modeling/mot/matching/jde_matching.py +0 -145
  1013. paddlex/ppdet/modeling/mot/motion/__init__.py +0 -17
  1014. paddlex/ppdet/modeling/mot/motion/kalman_filter.py +0 -270
  1015. paddlex/ppdet/modeling/mot/tracker/__init__.py +0 -23
  1016. paddlex/ppdet/modeling/mot/tracker/base_jde_tracker.py +0 -267
  1017. paddlex/ppdet/modeling/mot/tracker/base_sde_tracker.py +0 -145
  1018. paddlex/ppdet/modeling/mot/tracker/deepsort_tracker.py +0 -165
  1019. paddlex/ppdet/modeling/mot/tracker/jde_tracker.py +0 -262
  1020. paddlex/ppdet/modeling/mot/utils.py +0 -181
  1021. paddlex/ppdet/modeling/mot/visualization.py +0 -130
  1022. paddlex/ppdet/modeling/necks/__init__.py +0 -25
  1023. paddlex/ppdet/modeling/necks/centernet_fpn.py +0 -185
  1024. paddlex/ppdet/modeling/necks/fpn.py +0 -233
  1025. paddlex/ppdet/modeling/necks/hrfpn.py +0 -131
  1026. paddlex/ppdet/modeling/necks/ttf_fpn.py +0 -243
  1027. paddlex/ppdet/modeling/necks/yolo_fpn.py +0 -1034
  1028. paddlex/ppdet/modeling/ops.py +0 -1599
  1029. paddlex/ppdet/modeling/post_process.py +0 -449
  1030. paddlex/ppdet/modeling/proposal_generator/__init__.py +0 -2
  1031. paddlex/ppdet/modeling/proposal_generator/anchor_generator.py +0 -135
  1032. paddlex/ppdet/modeling/proposal_generator/proposal_generator.py +0 -81
  1033. paddlex/ppdet/modeling/proposal_generator/rpn_head.py +0 -269
  1034. paddlex/ppdet/modeling/proposal_generator/target.py +0 -671
  1035. paddlex/ppdet/modeling/proposal_generator/target_layer.py +0 -476
  1036. paddlex/ppdet/modeling/reid/__init__.py +0 -23
  1037. paddlex/ppdet/modeling/reid/fairmot_embedding_head.py +0 -117
  1038. paddlex/ppdet/modeling/reid/jde_embedding_head.py +0 -189
  1039. paddlex/ppdet/modeling/reid/pyramidal_embedding.py +0 -151
  1040. paddlex/ppdet/modeling/reid/resnet.py +0 -320
  1041. paddlex/ppdet/modeling/shape_spec.py +0 -33
  1042. paddlex/ppdet/modeling/tests/__init__.py +0 -13
  1043. paddlex/ppdet/modeling/tests/test_architectures.py +0 -59
  1044. paddlex/ppdet/modeling/tests/test_base.py +0 -75
  1045. paddlex/ppdet/modeling/tests/test_ops.py +0 -839
  1046. paddlex/ppdet/modeling/tests/test_yolov3_loss.py +0 -420
  1047. paddlex/ppdet/optimizer.py +0 -285
  1048. paddlex/ppdet/slim/__init__.py +0 -62
  1049. paddlex/ppdet/slim/distill.py +0 -111
  1050. paddlex/ppdet/slim/prune.py +0 -85
  1051. paddlex/ppdet/slim/quant.py +0 -52
  1052. paddlex/ppdet/utils/__init__.py +0 -13
  1053. paddlex/ppdet/utils/check.py +0 -93
  1054. paddlex/ppdet/utils/checkpoint.py +0 -216
  1055. paddlex/ppdet/utils/cli.py +0 -151
  1056. paddlex/ppdet/utils/colormap.py +0 -56
  1057. paddlex/ppdet/utils/download.py +0 -477
  1058. paddlex/ppdet/utils/logger.py +0 -71
  1059. paddlex/ppdet/utils/stats.py +0 -95
  1060. paddlex/ppdet/utils/visualizer.py +0 -292
  1061. paddlex/ppdet/utils/voc_utils.py +0 -87
  1062. paddlex/seg.py +0 -38
  1063. paddlex/tools/__init__.py +0 -16
  1064. paddlex/tools/convert.py +0 -52
  1065. paddlex/tools/dataset_conversion/__init__.py +0 -24
  1066. paddlex/tools/dataset_conversion/x2coco.py +0 -379
  1067. paddlex/tools/dataset_conversion/x2imagenet.py +0 -82
  1068. paddlex/tools/dataset_conversion/x2seg.py +0 -343
  1069. paddlex/tools/dataset_conversion/x2voc.py +0 -230
  1070. paddlex/tools/dataset_split/__init__.py +0 -23
  1071. paddlex/tools/dataset_split/coco_split.py +0 -69
  1072. paddlex/tools/dataset_split/imagenet_split.py +0 -75
  1073. paddlex/tools/dataset_split/seg_split.py +0 -96
  1074. paddlex/tools/dataset_split/utils.py +0 -75
  1075. paddlex/tools/dataset_split/voc_split.py +0 -91
  1076. paddlex/tools/split.py +0 -41
  1077. paddlex/utils/checkpoint.py +0 -439
  1078. paddlex/utils/env.py +0 -71
  1079. paddlex/utils/shm.py +0 -67
  1080. paddlex/utils/stats.py +0 -68
  1081. paddlex/utils/utils.py +0 -140
  1082. paddlex-2.0.0rc4.dist-info/LICENSE +0 -201
  1083. paddlex-2.0.0rc4.dist-info/METADATA +0 -29
  1084. paddlex-2.0.0rc4.dist-info/RECORD +0 -445
  1085. paddlex-2.0.0rc4.dist-info/WHEEL +0 -5
  1086. paddlex-2.0.0rc4.dist-info/entry_points.txt +0 -3
  1087. paddlex-2.0.0rc4.dist-info/top_level.txt +0 -2
@@ -1,1599 +0,0 @@
1
- # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import paddle
16
- import paddle.nn.functional as F
17
- import paddle.nn as nn
18
- from paddle import ParamAttr
19
- from paddle.regularizer import L2Decay
20
-
21
- from paddle.fluid.framework import Variable, in_dygraph_mode
22
- from paddle.fluid import core
23
- from paddle.fluid.layer_helper import LayerHelper
24
- from paddle.fluid.dygraph import layers
25
- from paddle.fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype, convert_dtype
26
- import math
27
- import six
28
- import numpy as np
29
- from functools import reduce
30
-
31
- __all__ = [
32
- 'roi_pool',
33
- 'roi_align',
34
- 'prior_box',
35
- 'generate_proposals',
36
- 'iou_similarity',
37
- 'box_coder',
38
- 'yolo_box',
39
- 'multiclass_nms',
40
- 'distribute_fpn_proposals',
41
- 'collect_fpn_proposals',
42
- 'matrix_nms',
43
- 'batch_norm',
44
- 'mish',
45
- ]
46
-
47
-
48
- def mish(x):
49
- return x * paddle.tanh(F.softplus(x))
50
-
51
-
52
- def batch_norm(ch,
53
- norm_type='bn',
54
- norm_decay=0.,
55
- freeze_norm=False,
56
- initializer=None,
57
- data_format='NCHW'):
58
- if norm_type == 'sync_bn':
59
- batch_norm = nn.SyncBatchNorm
60
- else:
61
- batch_norm = nn.BatchNorm2D
62
-
63
- norm_lr = 0. if freeze_norm else 1.
64
- weight_attr = ParamAttr(
65
- initializer=initializer,
66
- learning_rate=norm_lr,
67
- regularizer=L2Decay(norm_decay),
68
- trainable=False if freeze_norm else True)
69
- bias_attr = ParamAttr(
70
- learning_rate=norm_lr,
71
- regularizer=L2Decay(norm_decay),
72
- trainable=False if freeze_norm else True)
73
-
74
- norm_layer = batch_norm(
75
- ch,
76
- weight_attr=weight_attr,
77
- bias_attr=bias_attr,
78
- data_format=data_format)
79
-
80
- norm_params = norm_layer.parameters()
81
- if freeze_norm:
82
- for param in norm_params:
83
- param.stop_gradient = True
84
-
85
- return norm_layer
86
-
87
-
88
- @paddle.jit.not_to_static
89
- def roi_pool(input,
90
- rois,
91
- output_size,
92
- spatial_scale=1.0,
93
- rois_num=None,
94
- name=None):
95
- """
96
-
97
- This operator implements the roi_pooling layer.
98
- Region of interest pooling (also known as RoI pooling) is to perform max pooling on inputs of nonuniform sizes to obtain fixed-size feature maps (e.g. 7*7).
99
-
100
- The operator has three steps:
101
-
102
- 1. Dividing each region proposal into equal-sized sections with output_size(h, w);
103
- 2. Finding the largest value in each section;
104
- 3. Copying these max values to the output buffer.
105
-
106
- For more information, please refer to https://stackoverflow.com/questions/43430056/what-is-roi-layer-in-fast-rcnn
107
-
108
- Args:
109
- input (Tensor): Input feature, 4D-Tensor with the shape of [N,C,H,W],
110
- where N is the batch size, C is the input channel, H is Height, W is weight.
111
- The data type is float32 or float64.
112
- rois (Tensor): ROIs (Regions of Interest) to pool over.
113
- 2D-Tensor or 2D-LoDTensor with the shape of [num_rois,4], the lod level is 1.
114
- Given as [[x1, y1, x2, y2], ...], (x1, y1) is the top left coordinates,
115
- and (x2, y2) is the bottom right coordinates.
116
- output_size (int or tuple[int, int]): The pooled output size(h, w), data type is int32. If int, h and w are both equal to output_size.
117
- spatial_scale (float, optional): Multiplicative spatial scale factor to translate ROI coords from their input scale to the scale used when pooling. Default: 1.0
118
- rois_num (Tensor): The number of RoIs in each image. Default: None
119
- name(str, optional): For detailed information, please refer
120
- to :ref:`api_guide_Name`. Usually name is no need to set and
121
- None by default.
122
-
123
-
124
- Returns:
125
- Tensor: The pooled feature, 4D-Tensor with the shape of [num_rois, C, output_size[0], output_size[1]].
126
-
127
-
128
- Examples:
129
-
130
- .. code-block:: python
131
-
132
- import paddle
133
- from paddlex.ppdet.modeling import ops
134
- paddle.enable_static()
135
-
136
- x = paddle.static.data(
137
- name='data', shape=[None, 256, 32, 32], dtype='float32')
138
- rois = paddle.static.data(
139
- name='rois', shape=[None, 4], dtype='float32')
140
- rois_num = paddle.static.data(name='rois_num', shape=[None], dtype='int32')
141
-
142
- pool_out = ops.roi_pool(
143
- input=x,
144
- rois=rois,
145
- output_size=(1, 1),
146
- spatial_scale=1.0,
147
- rois_num=rois_num)
148
- """
149
- check_type(output_size, 'output_size', (int, tuple), 'roi_pool')
150
- if isinstance(output_size, int):
151
- output_size = (output_size, output_size)
152
-
153
- pooled_height, pooled_width = output_size
154
- if in_dygraph_mode():
155
- assert rois_num is not None, "rois_num should not be None in dygraph mode."
156
- pool_out, argmaxes = core.ops.roi_pool(
157
- input, rois, rois_num, "pooled_height", pooled_height,
158
- "pooled_width", pooled_width, "spatial_scale", spatial_scale)
159
- return pool_out, argmaxes
160
-
161
- else:
162
- check_variable_and_dtype(input, 'input', ['float32'], 'roi_pool')
163
- check_variable_and_dtype(rois, 'rois', ['float32'], 'roi_pool')
164
- helper = LayerHelper('roi_pool', **locals())
165
- dtype = helper.input_dtype()
166
- pool_out = helper.create_variable_for_type_inference(dtype)
167
- argmaxes = helper.create_variable_for_type_inference(dtype='int32')
168
-
169
- inputs = {
170
- "X": input,
171
- "ROIs": rois,
172
- }
173
- if rois_num is not None:
174
- inputs['RoisNum'] = rois_num
175
- helper.append_op(
176
- type="roi_pool",
177
- inputs=inputs,
178
- outputs={"Out": pool_out,
179
- "Argmax": argmaxes},
180
- attrs={
181
- "pooled_height": pooled_height,
182
- "pooled_width": pooled_width,
183
- "spatial_scale": spatial_scale
184
- })
185
- return pool_out, argmaxes
186
-
187
-
188
- @paddle.jit.not_to_static
189
- def roi_align(input,
190
- rois,
191
- output_size,
192
- spatial_scale=1.0,
193
- sampling_ratio=-1,
194
- rois_num=None,
195
- aligned=True,
196
- name=None):
197
- """
198
-
199
- Region of interest align (also known as RoI align) is to perform
200
- bilinear interpolation on inputs of nonuniform sizes to obtain
201
- fixed-size feature maps (e.g. 7*7)
202
-
203
- Dividing each region proposal into equal-sized sections with
204
- the pooled_width and pooled_height. Location remains the origin
205
- result.
206
-
207
- In each ROI bin, the value of the four regularly sampled locations
208
- are computed directly through bilinear interpolation. The output is
209
- the mean of four locations.
210
- Thus avoid the misaligned problem.
211
-
212
- Args:
213
- input (Tensor): Input feature, 4D-Tensor with the shape of [N,C,H,W],
214
- where N is the batch size, C is the input channel, H is Height, W is weight.
215
- The data type is float32 or float64.
216
- rois (Tensor): ROIs (Regions of Interest) to pool over.It should be
217
- a 2-D Tensor or 2-D LoDTensor of shape (num_rois, 4), the lod level is 1.
218
- The data type is float32 or float64. Given as [[x1, y1, x2, y2], ...],
219
- (x1, y1) is the top left coordinates, and (x2, y2) is the bottom right coordinates.
220
- output_size (int or tuple[int, int]): The pooled output size(h, w), data type is int32. If int, h and w are both equal to output_size.
221
- spatial_scale (float32, optional): Multiplicative spatial scale factor to translate ROI coords
222
- from their input scale to the scale used when pooling. Default: 1.0
223
- sampling_ratio(int32, optional): number of sampling points in the interpolation grid.
224
- If <=0, then grid points are adaptive to roi_width and pooled_w, likewise for height. Default: -1
225
- rois_num (Tensor): The number of RoIs in each image. Default: None
226
- name(str, optional): For detailed information, please refer
227
- to :ref:`api_guide_Name`. Usually name is no need to set and
228
- None by default.
229
-
230
- Returns:
231
- Tensor:
232
-
233
- Output: The output of ROIAlignOp is a 4-D tensor with shape (num_rois, channels, pooled_h, pooled_w). The data type is float32 or float64.
234
-
235
-
236
- Examples:
237
- .. code-block:: python
238
-
239
- import paddle
240
- from paddlex.ppdet.modeling import ops
241
- paddle.enable_static()
242
-
243
- x = paddle.static.data(
244
- name='data', shape=[None, 256, 32, 32], dtype='float32')
245
- rois = paddle.static.data(
246
- name='rois', shape=[None, 4], dtype='float32')
247
- rois_num = paddle.static.data(name='rois_num', shape=[None], dtype='int32')
248
- align_out = ops.roi_align(input=x,
249
- rois=rois,
250
- ouput_size=(7, 7),
251
- spatial_scale=0.5,
252
- sampling_ratio=-1,
253
- rois_num=rois_num)
254
- """
255
- check_type(output_size, 'output_size', (int, tuple), 'roi_align')
256
- if isinstance(output_size, int):
257
- output_size = (output_size, output_size)
258
-
259
- pooled_height, pooled_width = output_size
260
-
261
- if in_dygraph_mode():
262
- assert rois_num is not None, "rois_num should not be None in dygraph mode."
263
- align_out = core.ops.roi_align(
264
- input, rois, rois_num, "pooled_height", pooled_height,
265
- "pooled_width", pooled_width, "spatial_scale", spatial_scale,
266
- "sampling_ratio", sampling_ratio, "aligned", aligned)
267
- return align_out
268
-
269
- else:
270
- check_variable_and_dtype(input, 'input', ['float32', 'float64'],
271
- 'roi_align')
272
- check_variable_and_dtype(rois, 'rois', ['float32', 'float64'],
273
- 'roi_align')
274
- helper = LayerHelper('roi_align', **locals())
275
- dtype = helper.input_dtype()
276
- align_out = helper.create_variable_for_type_inference(dtype)
277
- inputs = {
278
- "X": input,
279
- "ROIs": rois,
280
- }
281
- if rois_num is not None:
282
- inputs['RoisNum'] = rois_num
283
- helper.append_op(
284
- type="roi_align",
285
- inputs=inputs,
286
- outputs={"Out": align_out},
287
- attrs={
288
- "pooled_height": pooled_height,
289
- "pooled_width": pooled_width,
290
- "spatial_scale": spatial_scale,
291
- "sampling_ratio": sampling_ratio,
292
- "aligned": aligned,
293
- })
294
- return align_out
295
-
296
-
297
- @paddle.jit.not_to_static
298
- def iou_similarity(x, y, box_normalized=True, name=None):
299
- """
300
- Computes intersection-over-union (IOU) between two box lists.
301
- Box list 'X' should be a LoDTensor and 'Y' is a common Tensor,
302
- boxes in 'Y' are shared by all instance of the batched inputs of X.
303
- Given two boxes A and B, the calculation of IOU is as follows:
304
-
305
- $$
306
- IOU(A, B) =
307
- \\frac{area(A\\cap B)}{area(A)+area(B)-area(A\\cap B)}
308
- $$
309
-
310
- Args:
311
- x (Tensor): Box list X is a 2-D Tensor with shape [N, 4] holds N
312
- boxes, each box is represented as [xmin, ymin, xmax, ymax],
313
- the shape of X is [N, 4]. [xmin, ymin] is the left top
314
- coordinate of the box if the input is image feature map, they
315
- are close to the origin of the coordinate system.
316
- [xmax, ymax] is the right bottom coordinate of the box.
317
- The data type is float32 or float64.
318
- y (Tensor): Box list Y holds M boxes, each box is represented as
319
- [xmin, ymin, xmax, ymax], the shape of X is [N, 4].
320
- [xmin, ymin] is the left top coordinate of the box if the
321
- input is image feature map, and [xmax, ymax] is the right
322
- bottom coordinate of the box. The data type is float32 or float64.
323
- box_normalized(bool): Whether treat the priorbox as a normalized box.
324
- Set true by default.
325
- name(str, optional): For detailed information, please refer
326
- to :ref:`api_guide_Name`. Usually name is no need to set and
327
- None by default.
328
-
329
- Returns:
330
- Tensor: The output of iou_similarity op, a tensor with shape [N, M]
331
- representing pairwise iou scores. The data type is same with x.
332
-
333
- Examples:
334
- .. code-block:: python
335
-
336
- import paddle
337
- from paddlex.ppdet.modeling import ops
338
- paddle.enable_static()
339
-
340
- x = paddle.static.data(name='x', shape=[None, 4], dtype='float32')
341
- y = paddle.static.data(name='y', shape=[None, 4], dtype='float32')
342
- iou = ops.iou_similarity(x=x, y=y)
343
- """
344
-
345
- if in_dygraph_mode():
346
- out = core.ops.iou_similarity(x, y, 'box_normalized', box_normalized)
347
- return out
348
- else:
349
- helper = LayerHelper("iou_similarity", **locals())
350
- out = helper.create_variable_for_type_inference(dtype=x.dtype)
351
-
352
- helper.append_op(
353
- type="iou_similarity",
354
- inputs={"X": x,
355
- "Y": y},
356
- attrs={"box_normalized": box_normalized},
357
- outputs={"Out": out})
358
- return out
359
-
360
-
361
- @paddle.jit.not_to_static
362
- def collect_fpn_proposals(multi_rois,
363
- multi_scores,
364
- min_level,
365
- max_level,
366
- post_nms_top_n,
367
- rois_num_per_level=None,
368
- name=None):
369
- """
370
-
371
- **This OP only supports LoDTensor as input**. Concat multi-level RoIs
372
- (Region of Interest) and select N RoIs with respect to multi_scores.
373
- This operation performs the following steps:
374
-
375
- 1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
376
- 2. Concat multi-level RoIs and scores
377
- 3. Sort scores and select post_nms_top_n scores
378
- 4. Gather RoIs by selected indices from scores
379
- 5. Re-sort RoIs by corresponding batch_id
380
-
381
- Args:
382
- multi_rois(list): List of RoIs to collect. Element in list is 2-D
383
- LoDTensor with shape [N, 4] and data type is float32 or float64,
384
- N is the number of RoIs.
385
- multi_scores(list): List of scores of RoIs to collect. Element in list
386
- is 2-D LoDTensor with shape [N, 1] and data type is float32 or
387
- float64, N is the number of RoIs.
388
- min_level(int): The lowest level of FPN layer to collect
389
- max_level(int): The highest level of FPN layer to collect
390
- post_nms_top_n(int): The number of selected RoIs
391
- rois_num_per_level(list, optional): The List of RoIs' numbers.
392
- Each element is 1-D Tensor which contains the RoIs' number of each
393
- image on each level and the shape is [B] and data type is
394
- int32, B is the number of images. If it is not None then return
395
- a 1-D Tensor contains the output RoIs' number of each image and
396
- the shape is [B]. Default: None
397
- name(str, optional): For detailed information, please refer
398
- to :ref:`api_guide_Name`. Usually name is no need to set and
399
- None by default.
400
-
401
- Returns:
402
- Variable:
403
-
404
- fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
405
- float32 or float64. Selected RoIs.
406
-
407
- rois_num(Tensor): 1-D Tensor contains the RoIs's number of each
408
- image. The shape is [B] and data type is int32. B is the number of
409
- images.
410
-
411
- Examples:
412
- .. code-block:: python
413
-
414
- import paddle
415
- from paddlex.ppdet.modeling import ops
416
- paddle.enable_static()
417
- multi_rois = []
418
- multi_scores = []
419
- for i in range(4):
420
- multi_rois.append(paddle.static.data(
421
- name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
422
- for i in range(4):
423
- multi_scores.append(paddle.static.data(
424
- name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
425
-
426
- fpn_rois = ops.collect_fpn_proposals(
427
- multi_rois=multi_rois,
428
- multi_scores=multi_scores,
429
- min_level=2,
430
- max_level=5,
431
- post_nms_top_n=2000)
432
- """
433
- check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
434
- check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
435
- num_lvl = max_level - min_level + 1
436
- input_rois = multi_rois[:num_lvl]
437
- input_scores = multi_scores[:num_lvl]
438
-
439
- if in_dygraph_mode():
440
- assert rois_num_per_level is not None, "rois_num_per_level should not be None in dygraph mode."
441
- attrs = ('post_nms_topN', post_nms_top_n)
442
- output_rois, rois_num = core.ops.collect_fpn_proposals(
443
- input_rois, input_scores, rois_num_per_level, *attrs)
444
- return output_rois, rois_num
445
-
446
- else:
447
- helper = LayerHelper('collect_fpn_proposals', **locals())
448
- dtype = helper.input_dtype('multi_rois')
449
- check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
450
- 'collect_fpn_proposals')
451
- output_rois = helper.create_variable_for_type_inference(dtype)
452
- output_rois.stop_gradient = True
453
-
454
- inputs = {
455
- 'MultiLevelRois': input_rois,
456
- 'MultiLevelScores': input_scores,
457
- }
458
- outputs = {'FpnRois': output_rois}
459
- if rois_num_per_level is not None:
460
- inputs['MultiLevelRoIsNum'] = rois_num_per_level
461
- rois_num = helper.create_variable_for_type_inference(dtype='int32')
462
- rois_num.stop_gradient = True
463
- outputs['RoisNum'] = rois_num
464
- helper.append_op(
465
- type='collect_fpn_proposals',
466
- inputs=inputs,
467
- outputs=outputs,
468
- attrs={'post_nms_topN': post_nms_top_n})
469
- return output_rois, rois_num
470
-
471
-
472
- @paddle.jit.not_to_static
473
- def distribute_fpn_proposals(fpn_rois,
474
- min_level,
475
- max_level,
476
- refer_level,
477
- refer_scale,
478
- pixel_offset=False,
479
- rois_num=None,
480
- name=None):
481
- """
482
-
483
- **This op only takes LoDTensor as input.** In Feature Pyramid Networks
484
- (FPN) models, it is needed to distribute all proposals into different FPN
485
- level, with respect to scale of the proposals, the referring scale and the
486
- referring level. Besides, to restore the order of proposals, we return an
487
- array which indicates the original index of rois in current proposals.
488
- To compute FPN level for each roi, the formula is given as follows:
489
-
490
- .. math::
491
-
492
- roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
493
-
494
- level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
495
-
496
- where BBoxArea is a function to compute the area of each roi.
497
-
498
- Args:
499
-
500
- fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
501
- float32 or float64. The input fpn_rois.
502
- min_level(int32): The lowest level of FPN layer where the proposals come
503
- from.
504
- max_level(int32): The highest level of FPN layer where the proposals
505
- come from.
506
- refer_level(int32): The referring level of FPN layer with specified scale.
507
- refer_scale(int32): The referring scale of FPN layer with specified level.
508
- rois_num(Tensor): 1-D Tensor contains the number of RoIs in each image.
509
- The shape is [B] and data type is int32. B is the number of images.
510
- If it is not None then return a list of 1-D Tensor. Each element
511
- is the output RoIs' number of each image on the corresponding level
512
- and the shape is [B]. None by default.
513
- name(str, optional): For detailed information, please refer
514
- to :ref:`api_guide_Name`. Usually name is no need to set and
515
- None by default.
516
-
517
- Returns:
518
- Tuple:
519
-
520
- multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
521
- and data type of float32 and float64. The length is
522
- max_level-min_level+1. The proposals in each FPN level.
523
-
524
- restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
525
- the number of total rois. The data type is int32. It is
526
- used to restore the order of fpn_rois.
527
-
528
- rois_num_per_level(List): A list of 1-D Tensor and each Tensor is
529
- the RoIs' number in each image on the corresponding level. The shape
530
- is [B] and data type of int32. B is the number of images
531
-
532
-
533
- Examples:
534
- .. code-block:: python
535
-
536
- import paddle
537
- from paddlex.ppdet.modeling import ops
538
- paddle.enable_static()
539
- fpn_rois = paddle.static.data(
540
- name='data', shape=[None, 4], dtype='float32', lod_level=1)
541
- multi_rois, restore_ind = ops.distribute_fpn_proposals(
542
- fpn_rois=fpn_rois,
543
- min_level=2,
544
- max_level=5,
545
- refer_level=4,
546
- refer_scale=224)
547
- """
548
- num_lvl = max_level - min_level + 1
549
-
550
- if in_dygraph_mode():
551
- assert rois_num is not None, "rois_num should not be None in dygraph mode."
552
- attrs = ('min_level', min_level, 'max_level', max_level, 'refer_level',
553
- refer_level, 'refer_scale', refer_scale, 'pixel_offset',
554
- pixel_offset)
555
- multi_rois, restore_ind, rois_num_per_level = core.ops.distribute_fpn_proposals(
556
- fpn_rois, rois_num, num_lvl, num_lvl, *attrs)
557
- return multi_rois, restore_ind, rois_num_per_level
558
-
559
- else:
560
- check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
561
- 'distribute_fpn_proposals')
562
- helper = LayerHelper('distribute_fpn_proposals', **locals())
563
- dtype = helper.input_dtype('fpn_rois')
564
- multi_rois = [
565
- helper.create_variable_for_type_inference(dtype)
566
- for i in range(num_lvl)
567
- ]
568
-
569
- restore_ind = helper.create_variable_for_type_inference(dtype='int32')
570
-
571
- inputs = {'FpnRois': fpn_rois}
572
- outputs = {
573
- 'MultiFpnRois': multi_rois,
574
- 'RestoreIndex': restore_ind,
575
- }
576
-
577
- if rois_num is not None:
578
- inputs['RoisNum'] = rois_num
579
- rois_num_per_level = [
580
- helper.create_variable_for_type_inference(dtype='int32')
581
- for i in range(num_lvl)
582
- ]
583
- outputs['MultiLevelRoIsNum'] = rois_num_per_level
584
-
585
- helper.append_op(
586
- type='distribute_fpn_proposals',
587
- inputs=inputs,
588
- outputs=outputs,
589
- attrs={
590
- 'min_level': min_level,
591
- 'max_level': max_level,
592
- 'refer_level': refer_level,
593
- 'refer_scale': refer_scale,
594
- 'pixel_offset': pixel_offset
595
- })
596
- return multi_rois, restore_ind, rois_num_per_level
597
-
598
-
599
- @paddle.jit.not_to_static
600
- def yolo_box(
601
- x,
602
- origin_shape,
603
- anchors,
604
- class_num,
605
- conf_thresh,
606
- downsample_ratio,
607
- clip_bbox=True,
608
- scale_x_y=1.,
609
- name=None, ):
610
- """
611
-
612
- This operator generates YOLO detection boxes from output of YOLOv3 network.
613
-
614
- The output of previous network is in shape [N, C, H, W], while H and W
615
- should be the same, H and W specify the grid size, each grid point predict
616
- given number boxes, this given number, which following will be represented as S,
617
- is specified by the number of anchors. In the second dimension(the channel
618
- dimension), C should be equal to S * (5 + class_num), class_num is the object
619
- category number of source dataset(such as 80 in coco dataset), so the
620
- second(channel) dimension, apart from 4 box location coordinates x, y, w, h,
621
- also includes confidence score of the box and class one-hot key of each anchor
622
- box.
623
- Assume the 4 location coordinates are :math:`t_x, t_y, t_w, t_h`, the box
624
- predictions should be as follows:
625
- $$
626
- b_x = \\sigma(t_x) + c_x
627
- $$
628
- $$
629
- b_y = \\sigma(t_y) + c_y
630
- $$
631
- $$
632
- b_w = p_w e^{t_w}
633
- $$
634
- $$
635
- b_h = p_h e^{t_h}
636
- $$
637
- in the equation above, :math:`c_x, c_y` is the left top corner of current grid
638
- and :math:`p_w, p_h` is specified by anchors.
639
- The logistic regression value of the 5th channel of each anchor prediction boxes
640
- represents the confidence score of each prediction box, and the logistic
641
- regression value of the last :attr:`class_num` channels of each anchor prediction
642
- boxes represents the classifcation scores. Boxes with confidence scores less than
643
- :attr:`conf_thresh` should be ignored, and box final scores is the product of
644
- confidence scores and classification scores.
645
- $$
646
- score_{pred} = score_{conf} * score_{class}
647
- $$
648
-
649
- Args:
650
- x (Tensor): The input tensor of YoloBox operator is a 4-D tensor with shape of [N, C, H, W].
651
- The second dimension(C) stores box locations, confidence score and
652
- classification one-hot keys of each anchor box. Generally, X should be the output of YOLOv3 network.
653
- The data type is float32 or float64.
654
- origin_shape (Tensor): The image size tensor of YoloBox operator, This is a 2-D tensor with shape of [N, 2].
655
- This tensor holds height and width of each input image used for resizing output box in input image
656
- scale. The data type is int32.
657
- anchors (list|tuple): The anchor width and height, it will be parsed pair by pair.
658
- class_num (int): The number of classes to predict.
659
- conf_thresh (float): The confidence scores threshold of detection boxes. Boxes with confidence scores
660
- under threshold should be ignored.
661
- downsample_ratio (int): The downsample ratio from network input to YoloBox operator input,
662
- so 32, 16, 8 should be set for the first, second, and thrid YoloBox operators.
663
- clip_bbox (bool): Whether clip output bonding box in Input(ImgSize) boundary. Default true.
664
- scale_x_y (float): Scale the center point of decoded bounding box. Default 1.0.
665
- name (string): The default value is None. Normally there is no need
666
- for user to set this property. For more information,
667
- please refer to :ref:`api_guide_Name`
668
-
669
- Returns:
670
- boxes Tensor: A 3-D tensor with shape [N, M, 4], the coordinates of boxes, N is the batch num,
671
- M is output box number, and the 3rd dimension stores [xmin, ymin, xmax, ymax] coordinates of boxes.
672
- scores Tensor: A 3-D tensor with shape [N, M, :attr:`class_num`], the coordinates of boxes, N is the batch num,
673
- M is output box number.
674
-
675
- Raises:
676
- TypeError: Attr anchors of yolo box must be list or tuple
677
- TypeError: Attr class_num of yolo box must be an integer
678
- TypeError: Attr conf_thresh of yolo box must be a float number
679
-
680
- Examples:
681
-
682
- .. code-block:: python
683
-
684
- import paddle
685
- from paddlex.ppdet.modeling import ops
686
-
687
- paddle.enable_static()
688
- x = paddle.static.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
689
- img_size = paddle.static.data(name='img_size',shape=[None, 2],dtype='int64')
690
- anchors = [10, 13, 16, 30, 33, 23]
691
- boxes,scores = ops.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
692
- conf_thresh=0.01, downsample_ratio=32)
693
- """
694
- helper = LayerHelper('yolo_box', **locals())
695
-
696
- if not isinstance(anchors, list) and not isinstance(anchors, tuple):
697
- raise TypeError("Attr anchors of yolo_box must be list or tuple")
698
- if not isinstance(class_num, int):
699
- raise TypeError("Attr class_num of yolo_box must be an integer")
700
- if not isinstance(conf_thresh, float):
701
- raise TypeError(
702
- "Attr ignore_thresh of yolo_box must be a float number")
703
-
704
- if in_dygraph_mode():
705
- attrs = ('anchors', anchors, 'class_num', class_num, 'conf_thresh',
706
- conf_thresh, 'downsample_ratio', downsample_ratio,
707
- 'clip_bbox', clip_bbox, 'scale_x_y', scale_x_y)
708
- boxes, scores = core.ops.yolo_box(x, origin_shape, *attrs)
709
- return boxes, scores
710
- else:
711
- boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
712
- scores = helper.create_variable_for_type_inference(dtype=x.dtype)
713
-
714
- attrs = {
715
- "anchors": anchors,
716
- "class_num": class_num,
717
- "conf_thresh": conf_thresh,
718
- "downsample_ratio": downsample_ratio,
719
- "clip_bbox": clip_bbox,
720
- "scale_x_y": scale_x_y,
721
- }
722
-
723
- helper.append_op(
724
- type='yolo_box',
725
- inputs={
726
- "X": x,
727
- "ImgSize": origin_shape,
728
- },
729
- outputs={
730
- 'Boxes': boxes,
731
- 'Scores': scores,
732
- },
733
- attrs=attrs)
734
- return boxes, scores
735
-
736
-
737
- @paddle.jit.not_to_static
738
- def prior_box(input,
739
- image,
740
- min_sizes,
741
- max_sizes=None,
742
- aspect_ratios=[1.],
743
- variance=[0.1, 0.1, 0.2, 0.2],
744
- flip=False,
745
- clip=False,
746
- steps=[0.0, 0.0],
747
- offset=0.5,
748
- min_max_aspect_ratios_order=False,
749
- name=None):
750
- """
751
-
752
- This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
753
- Each position of the input produce N prior boxes, N is determined by
754
- the count of min_sizes, max_sizes and aspect_ratios, The size of the
755
- box is in range(min_size, max_size) interval, which is generated in
756
- sequence according to the aspect_ratios.
757
-
758
- Parameters:
759
- input(Tensor): 4-D tensor(NCHW), the data type should be float32 or float64.
760
- image(Tensor): 4-D tensor(NCHW), the input image data of PriorBoxOp,
761
- the data type should be float32 or float64.
762
- min_sizes(list|tuple|float): the min sizes of generated prior boxes.
763
- max_sizes(list|tuple|None): the max sizes of generated prior boxes.
764
- Default: None.
765
- aspect_ratios(list|tuple|float): the aspect ratios of generated
766
- prior boxes. Default: [1.].
767
- variance(list|tuple): the variances to be encoded in prior boxes.
768
- Default:[0.1, 0.1, 0.2, 0.2].
769
- flip(bool): Whether to flip aspect ratios. Default:False.
770
- clip(bool): Whether to clip out-of-boundary boxes. Default: False.
771
- step(list|tuple): Prior boxes step across width and height, If
772
- step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
773
- height or weight of the input will be automatically calculated.
774
- Default: [0., 0.]
775
- offset(float): Prior boxes center offset. Default: 0.5
776
- min_max_aspect_ratios_order(bool): If set True, the output prior box is
777
- in order of [min, max, aspect_ratios], which is consistent with
778
- Caffe. Please note, this order affects the weights order of
779
- convolution layer followed by and does not affect the final
780
- detection results. Default: False.
781
- name(str, optional): The default value is None. Normally there is no need for
782
- user to set this property. For more information, please refer to :ref:`api_guide_Name`
783
-
784
- Returns:
785
- Tuple: A tuple with two Variable (boxes, variances)
786
-
787
- boxes(Tensor): the output prior boxes of PriorBox.
788
- 4-D tensor, the layout is [H, W, num_priors, 4].
789
- H is the height of input, W is the width of input,
790
- num_priors is the total box count of each position of input.
791
-
792
- variances(Tensor): the expanded variances of PriorBox.
793
- 4-D tensor, the layput is [H, W, num_priors, 4].
794
- H is the height of input, W is the width of input
795
- num_priors is the total box count of each position of input
796
-
797
- Examples:
798
- .. code-block:: python
799
-
800
- import paddle
801
- from paddlex.ppdet.modeling import ops
802
-
803
- paddle.enable_static()
804
- input = paddle.static.data(name="input", shape=[None,3,6,9])
805
- image = paddle.static.data(name="image", shape=[None,3,9,12])
806
- box, var = ops.prior_box(
807
- input=input,
808
- image=image,
809
- min_sizes=[100.],
810
- clip=True,
811
- flip=True)
812
- """
813
- helper = LayerHelper("prior_box", **locals())
814
- dtype = helper.input_dtype()
815
- check_variable_and_dtype(
816
- input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
817
-
818
- def _is_list_or_tuple_(data):
819
- return (isinstance(data, list) or isinstance(data, tuple))
820
-
821
- if not _is_list_or_tuple_(min_sizes):
822
- min_sizes = [min_sizes]
823
- if not _is_list_or_tuple_(aspect_ratios):
824
- aspect_ratios = [aspect_ratios]
825
- if not (_is_list_or_tuple_(steps) and len(steps) == 2):
826
- raise ValueError('steps should be a list or tuple ',
827
- 'with length 2, (step_width, step_height).')
828
-
829
- min_sizes = list(map(float, min_sizes))
830
- aspect_ratios = list(map(float, aspect_ratios))
831
- steps = list(map(float, steps))
832
-
833
- cur_max_sizes = None
834
- if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
835
- if not _is_list_or_tuple_(max_sizes):
836
- max_sizes = [max_sizes]
837
- cur_max_sizes = max_sizes
838
-
839
- if in_dygraph_mode():
840
- attrs = ('min_sizes', min_sizes, 'aspect_ratios', aspect_ratios,
841
- 'variances', variance, 'flip', flip, 'clip', clip, 'step_w',
842
- steps[0], 'step_h', steps[1], 'offset', offset,
843
- 'min_max_aspect_ratios_order', min_max_aspect_ratios_order)
844
- if cur_max_sizes is not None:
845
- attrs += ('max_sizes', cur_max_sizes)
846
- box, var = core.ops.prior_box(input, image, *attrs)
847
- return box, var
848
- else:
849
- attrs = {
850
- 'min_sizes': min_sizes,
851
- 'aspect_ratios': aspect_ratios,
852
- 'variances': variance,
853
- 'flip': flip,
854
- 'clip': clip,
855
- 'step_w': steps[0],
856
- 'step_h': steps[1],
857
- 'offset': offset,
858
- 'min_max_aspect_ratios_order': min_max_aspect_ratios_order
859
- }
860
-
861
- if cur_max_sizes is not None:
862
- attrs['max_sizes'] = cur_max_sizes
863
-
864
- box = helper.create_variable_for_type_inference(dtype)
865
- var = helper.create_variable_for_type_inference(dtype)
866
- helper.append_op(
867
- type="prior_box",
868
- inputs={"Input": input,
869
- "Image": image},
870
- outputs={"Boxes": box,
871
- "Variances": var},
872
- attrs=attrs, )
873
- box.stop_gradient = True
874
- var.stop_gradient = True
875
- return box, var
876
-
877
-
878
- @paddle.jit.not_to_static
879
- def multiclass_nms(bboxes,
880
- scores,
881
- score_threshold,
882
- nms_top_k,
883
- keep_top_k,
884
- nms_threshold=0.3,
885
- normalized=True,
886
- nms_eta=1.,
887
- background_label=-1,
888
- return_index=False,
889
- return_rois_num=True,
890
- rois_num=None,
891
- name=None):
892
- """
893
- This operator is to do multi-class non maximum suppression (NMS) on
894
- boxes and scores.
895
- In the NMS step, this operator greedily selects a subset of detection bounding
896
- boxes that have high scores larger than score_threshold, if providing this
897
- threshold, then selects the largest nms_top_k confidences scores if nms_top_k
898
- is larger than -1. Then this operator pruns away boxes that have high IOU
899
- (intersection over union) overlap with already selected boxes by adaptive
900
- threshold NMS based on parameters of nms_threshold and nms_eta.
901
- Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
902
- per image if keep_top_k is larger than -1.
903
- Args:
904
- bboxes (Tensor): Two types of bboxes are supported:
905
- 1. (Tensor) A 3-D Tensor with shape
906
- [N, M, 4 or 8 16 24 32] represents the
907
- predicted locations of M bounding bboxes,
908
- N is the batch size. Each bounding box has four
909
- coordinate values and the layout is
910
- [xmin, ymin, xmax, ymax], when box size equals to 4.
911
- 2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
912
- M is the number of bounding boxes, C is the
913
- class number
914
- scores (Tensor): Two types of scores are supported:
915
- 1. (Tensor) A 3-D Tensor with shape [N, C, M]
916
- represents the predicted confidence predictions.
917
- N is the batch size, C is the class number, M is
918
- number of bounding boxes. For each category there
919
- are total M scores which corresponding M bounding
920
- boxes. Please note, M is equal to the 2nd dimension
921
- of BBoxes.
922
- 2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
923
- M is the number of bbox, C is the class number.
924
- In this case, input BBoxes should be the second
925
- case with shape [M, C, 4].
926
- background_label (int): The index of background label, the background
927
- label will be ignored. If set to -1, then all
928
- categories will be considered. Default: 0
929
- score_threshold (float): Threshold to filter out bounding boxes with
930
- low confidence score. If not provided,
931
- consider all boxes.
932
- nms_top_k (int): Maximum number of detections to be kept according to
933
- the confidences after the filtering detections based
934
- on score_threshold.
935
- nms_threshold (float): The threshold to be used in NMS. Default: 0.3
936
- nms_eta (float): The threshold to be used in NMS. Default: 1.0
937
- keep_top_k (int): Number of total bboxes to be kept per image after NMS
938
- step. -1 means keeping all bboxes after NMS step.
939
- normalized (bool): Whether detections are normalized. Default: True
940
- return_index(bool): Whether return selected index. Default: False
941
- rois_num(Tensor): 1-D Tensor contains the number of RoIs in each image.
942
- The shape is [B] and data type is int32. B is the number of images.
943
- If it is not None then return a list of 1-D Tensor. Each element
944
- is the output RoIs' number of each image on the corresponding level
945
- and the shape is [B]. None by default.
946
- name(str): Name of the multiclass nms op. Default: None.
947
- Returns:
948
- A tuple with two Variables: (Out, Index) if return_index is True,
949
- otherwise, a tuple with one Variable(Out) is returned.
950
- Out: A 2-D LoDTensor with shape [No, 6] represents the detections.
951
- Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
952
- or A 2-D LoDTensor with shape [No, 10] represents the detections.
953
- Each row has 10 values: [label, confidence, x1, y1, x2, y2, x3, y3,
954
- x4, y4]. No is the total number of detections.
955
- If all images have not detected results, all elements in LoD will be
956
- 0, and output tensor is empty (None).
957
- Index: Only return when return_index is True. A 2-D LoDTensor with
958
- shape [No, 1] represents the selected index which type is Integer.
959
- The index is the absolute value cross batches. No is the same number
960
- as Out. If the index is used to gather other attribute such as age,
961
- one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
962
- N is the batch size and M is the number of boxes.
963
- Examples:
964
- .. code-block:: python
965
-
966
- import paddle
967
- from paddlex.ppdet.modeling import ops
968
- boxes = paddle.static.data(name='bboxes', shape=[81, 4],
969
- dtype='float32', lod_level=1)
970
- scores = paddle.static.data(name='scores', shape=[81],
971
- dtype='float32', lod_level=1)
972
- out, index = ops.multiclass_nms(bboxes=boxes,
973
- scores=scores,
974
- background_label=0,
975
- score_threshold=0.5,
976
- nms_top_k=400,
977
- nms_threshold=0.3,
978
- keep_top_k=200,
979
- normalized=False,
980
- return_index=True)
981
- """
982
- helper = LayerHelper('multiclass_nms3', **locals())
983
-
984
- if in_dygraph_mode():
985
- attrs = ('background_label', background_label, 'score_threshold',
986
- score_threshold, 'nms_top_k', nms_top_k, 'nms_threshold',
987
- nms_threshold, 'keep_top_k', keep_top_k, 'nms_eta', nms_eta,
988
- 'normalized', normalized)
989
- output, index, nms_rois_num = core.ops.multiclass_nms3(
990
- bboxes, scores, rois_num, *attrs)
991
- if not return_index:
992
- index = None
993
- return output, nms_rois_num, index
994
-
995
- else:
996
- output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
997
- index = helper.create_variable_for_type_inference(dtype='int')
998
-
999
- inputs = {'BBoxes': bboxes, 'Scores': scores}
1000
- outputs = {'Out': output, 'Index': index}
1001
-
1002
- if rois_num is not None:
1003
- inputs['RoisNum'] = rois_num
1004
-
1005
- if return_rois_num:
1006
- nms_rois_num = helper.create_variable_for_type_inference(
1007
- dtype='int32')
1008
- outputs['NmsRoisNum'] = nms_rois_num
1009
-
1010
- helper.append_op(
1011
- type="multiclass_nms3",
1012
- inputs=inputs,
1013
- attrs={
1014
- 'background_label': background_label,
1015
- 'score_threshold': score_threshold,
1016
- 'nms_top_k': nms_top_k,
1017
- 'nms_threshold': nms_threshold,
1018
- 'keep_top_k': keep_top_k,
1019
- 'nms_eta': nms_eta,
1020
- 'normalized': normalized
1021
- },
1022
- outputs=outputs)
1023
- output.stop_gradient = True
1024
- index.stop_gradient = True
1025
- if not return_index:
1026
- index = None
1027
- if not return_rois_num:
1028
- nms_rois_num = None
1029
-
1030
- return output, nms_rois_num, index
1031
-
1032
-
1033
- @paddle.jit.not_to_static
1034
- def matrix_nms(bboxes,
1035
- scores,
1036
- score_threshold,
1037
- post_threshold,
1038
- nms_top_k,
1039
- keep_top_k,
1040
- use_gaussian=False,
1041
- gaussian_sigma=2.,
1042
- background_label=0,
1043
- normalized=True,
1044
- return_index=False,
1045
- return_rois_num=True,
1046
- name=None):
1047
- """
1048
- **Matrix NMS**
1049
- This operator does matrix non maximum suppression (NMS).
1050
- First selects a subset of candidate bounding boxes that have higher scores
1051
- than score_threshold (if provided), then the top k candidate is selected if
1052
- nms_top_k is larger than -1. Score of the remaining candidate are then
1053
- decayed according to the Matrix NMS scheme.
1054
- Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
1055
- per image if keep_top_k is larger than -1.
1056
- Args:
1057
- bboxes (Tensor): A 3-D Tensor with shape [N, M, 4] represents the
1058
- predicted locations of M bounding bboxes,
1059
- N is the batch size. Each bounding box has four
1060
- coordinate values and the layout is
1061
- [xmin, ymin, xmax, ymax], when box size equals to 4.
1062
- The data type is float32 or float64.
1063
- scores (Tensor): A 3-D Tensor with shape [N, C, M]
1064
- represents the predicted confidence predictions.
1065
- N is the batch size, C is the class number, M is
1066
- number of bounding boxes. For each category there
1067
- are total M scores which corresponding M bounding
1068
- boxes. Please note, M is equal to the 2nd dimension
1069
- of BBoxes. The data type is float32 or float64.
1070
- score_threshold (float): Threshold to filter out bounding boxes with
1071
- low confidence score.
1072
- post_threshold (float): Threshold to filter out bounding boxes with
1073
- low confidence score AFTER decaying.
1074
- nms_top_k (int): Maximum number of detections to be kept according to
1075
- the confidences after the filtering detections based
1076
- on score_threshold.
1077
- keep_top_k (int): Number of total bboxes to be kept per image after NMS
1078
- step. -1 means keeping all bboxes after NMS step.
1079
- use_gaussian (bool): Use Gaussian as the decay function. Default: False
1080
- gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
1081
- background_label (int): The index of background label, the background
1082
- label will be ignored. If set to -1, then all
1083
- categories will be considered. Default: 0
1084
- normalized (bool): Whether detections are normalized. Default: True
1085
- return_index(bool): Whether return selected index. Default: False
1086
- return_rois_num(bool): whether return rois_num. Default: True
1087
- name(str): Name of the matrix nms op. Default: None.
1088
- Returns:
1089
- A tuple with three Tensor: (Out, Index, RoisNum) if return_index is True,
1090
- otherwise, a tuple with two Tensor (Out, RoisNum) is returned.
1091
- Out (Tensor): A 2-D Tensor with shape [No, 6] containing the
1092
- detection results.
1093
- Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
1094
- (After version 1.3, when no boxes detected, the lod is changed
1095
- from {0} to {1})
1096
- Index (Tensor): A 2-D Tensor with shape [No, 1] containing the
1097
- selected indices, which are absolute values cross batches.
1098
- rois_num (Tensor): A 1-D Tensor with shape [N] containing
1099
- the number of detected boxes in each image.
1100
- Examples:
1101
- .. code-block:: python
1102
- import paddle
1103
- from paddlex.ppdet.modeling import ops
1104
- boxes = paddle.static.data(name='bboxes', shape=[None,81, 4],
1105
- dtype='float32', lod_level=1)
1106
- scores = paddle.static.data(name='scores', shape=[None,81],
1107
- dtype='float32', lod_level=1)
1108
- out = ops.matrix_nms(bboxes=boxes, scores=scores, background_label=0,
1109
- score_threshold=0.5, post_threshold=0.1,
1110
- nms_top_k=400, keep_top_k=200, normalized=False)
1111
- """
1112
- check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
1113
- 'matrix_nms')
1114
- check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
1115
- 'matrix_nms')
1116
- check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
1117
- check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
1118
- check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
1119
- check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
1120
- check_type(normalized, 'normalized', bool, 'matrix_nms')
1121
- check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
1122
- check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
1123
- check_type(background_label, 'background_label', int, 'matrix_nms')
1124
-
1125
- if in_dygraph_mode():
1126
- attrs = ('background_label', background_label, 'score_threshold',
1127
- score_threshold, 'post_threshold', post_threshold,
1128
- 'nms_top_k', nms_top_k, 'gaussian_sigma', gaussian_sigma,
1129
- 'use_gaussian', use_gaussian, 'keep_top_k', keep_top_k,
1130
- 'normalized', normalized)
1131
- out, index, rois_num = core.ops.matrix_nms(bboxes, scores, *attrs)
1132
- if not return_index:
1133
- index = None
1134
- if not return_rois_num:
1135
- rois_num = None
1136
- return out, rois_num, index
1137
- else:
1138
- helper = LayerHelper('matrix_nms', **locals())
1139
- output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
1140
- index = helper.create_variable_for_type_inference(dtype='int')
1141
- outputs = {'Out': output, 'Index': index}
1142
- if return_rois_num:
1143
- rois_num = helper.create_variable_for_type_inference(dtype='int32')
1144
- outputs['RoisNum'] = rois_num
1145
-
1146
- helper.append_op(
1147
- type="matrix_nms",
1148
- inputs={'BBoxes': bboxes,
1149
- 'Scores': scores},
1150
- attrs={
1151
- 'background_label': background_label,
1152
- 'score_threshold': score_threshold,
1153
- 'post_threshold': post_threshold,
1154
- 'nms_top_k': nms_top_k,
1155
- 'gaussian_sigma': gaussian_sigma,
1156
- 'use_gaussian': use_gaussian,
1157
- 'keep_top_k': keep_top_k,
1158
- 'normalized': normalized
1159
- },
1160
- outputs=outputs)
1161
- output.stop_gradient = True
1162
-
1163
- if not return_index:
1164
- index = None
1165
- if not return_rois_num:
1166
- rois_num = None
1167
- return output, rois_num, index
1168
-
1169
-
1170
- def bipartite_match(dist_matrix,
1171
- match_type=None,
1172
- dist_threshold=None,
1173
- name=None):
1174
- """
1175
-
1176
- This operator implements a greedy bipartite matching algorithm, which is
1177
- used to obtain the matching with the maximum distance based on the input
1178
- distance matrix. For input 2D matrix, the bipartite matching algorithm can
1179
- find the matched column for each row (matched means the largest distance),
1180
- also can find the matched row for each column. And this operator only
1181
- calculate matched indices from column to row. For each instance,
1182
- the number of matched indices is the column number of the input distance
1183
- matrix. **The OP only supports CPU**.
1184
-
1185
- There are two outputs, matched indices and distance.
1186
- A simple description, this algorithm matched the best (maximum distance)
1187
- row entity to the column entity and the matched indices are not duplicated
1188
- in each row of ColToRowMatchIndices. If the column entity is not matched
1189
- any row entity, set -1 in ColToRowMatchIndices.
1190
-
1191
- NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
1192
- If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
1193
- If Tensor, the height of ColToRowMatchIndices is 1.
1194
-
1195
- NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
1196
- layer. Please consider to use :code:`ssd_loss` instead.
1197
-
1198
- Args:
1199
- dist_matrix(Tensor): This input is a 2-D LoDTensor with shape
1200
- [K, M]. The data type is float32 or float64. It is pair-wise
1201
- distance matrix between the entities represented by each row and
1202
- each column. For example, assumed one entity is A with shape [K],
1203
- another entity is B with shape [M]. The dist_matrix[i][j] is the
1204
- distance between A[i] and B[j]. The bigger the distance is, the
1205
- better matching the pairs are. NOTE: This tensor can contain LoD
1206
- information to represent a batch of inputs. One instance of this
1207
- batch can contain different numbers of entities.
1208
- match_type(str, optional): The type of matching method, should be
1209
- 'bipartite' or 'per_prediction'. None ('bipartite') by default.
1210
- dist_threshold(float32, optional): If `match_type` is 'per_prediction',
1211
- this threshold is to determine the extra matching bboxes based
1212
- on the maximum distance, 0.5 by default.
1213
- name(str, optional): For detailed information, please refer
1214
- to :ref:`api_guide_Name`. Usually name is no need to set and
1215
- None by default.
1216
-
1217
- Returns:
1218
- Tuple:
1219
-
1220
- matched_indices(Tensor): A 2-D Tensor with shape [N, M]. The data
1221
- type is int32. N is the batch size. If match_indices[i][j] is -1, it
1222
- means B[j] does not match any entity in i-th instance.
1223
- Otherwise, it means B[j] is matched to row
1224
- match_indices[i][j] in i-th instance. The row number of
1225
- i-th instance is saved in match_indices[i][j].
1226
-
1227
- matched_distance(Tensor): A 2-D Tensor with shape [N, M]. The data
1228
- type is float32. N is batch size. If match_indices[i][j] is -1,
1229
- match_distance[i][j] is also -1.0. Otherwise, assumed
1230
- match_distance[i][j] = d, and the row offsets of each instance
1231
- are called LoD. Then match_distance[i][j] =
1232
- dist_matrix[d+LoD[i]][j].
1233
-
1234
- Examples:
1235
-
1236
- .. code-block:: python
1237
- import paddle
1238
- from paddlex.ppdet.modeling import ops
1239
- from paddlex.ppdet.modeling.utils import iou_similarity
1240
-
1241
- paddle.enable_static()
1242
-
1243
- x = paddle.static.data(name='x', shape=[None, 4], dtype='float32')
1244
- y = paddle.static.data(name='y', shape=[None, 4], dtype='float32')
1245
- iou = iou_similarity(x=x, y=y)
1246
- matched_indices, matched_dist = ops.bipartite_match(iou)
1247
- """
1248
- check_variable_and_dtype(dist_matrix, 'dist_matrix',
1249
- ['float32', 'float64'], 'bipartite_match')
1250
-
1251
- if in_dygraph_mode():
1252
- match_indices, match_distance = core.ops.bipartite_match(
1253
- dist_matrix, "match_type", match_type, "dist_threshold",
1254
- dist_threshold)
1255
- return match_indices, match_distance
1256
-
1257
- helper = LayerHelper('bipartite_match', **locals())
1258
- match_indices = helper.create_variable_for_type_inference(dtype='int32')
1259
- match_distance = helper.create_variable_for_type_inference(
1260
- dtype=dist_matrix.dtype)
1261
- helper.append_op(
1262
- type='bipartite_match',
1263
- inputs={'DistMat': dist_matrix},
1264
- attrs={
1265
- 'match_type': match_type,
1266
- 'dist_threshold': dist_threshold,
1267
- },
1268
- outputs={
1269
- 'ColToRowMatchIndices': match_indices,
1270
- 'ColToRowMatchDist': match_distance
1271
- })
1272
- return match_indices, match_distance
1273
-
1274
-
1275
- @paddle.jit.not_to_static
1276
- def box_coder(prior_box,
1277
- prior_box_var,
1278
- target_box,
1279
- code_type="encode_center_size",
1280
- box_normalized=True,
1281
- axis=0,
1282
- name=None):
1283
- """
1284
- **Box Coder Layer**
1285
- Encode/Decode the target bounding box with the priorbox information.
1286
-
1287
- The Encoding schema described below:
1288
- .. math::
1289
- ox = (tx - px) / pw / pxv
1290
- oy = (ty - py) / ph / pyv
1291
- ow = \log(\abs(tw / pw)) / pwv
1292
- oh = \log(\abs(th / ph)) / phv
1293
- The Decoding schema described below:
1294
-
1295
- .. math::
1296
-
1297
- ox = (pw * pxv * tx * + px) - tw / 2
1298
- oy = (ph * pyv * ty * + py) - th / 2
1299
- ow = \exp(pwv * tw) * pw + tw / 2
1300
- oh = \exp(phv * th) * ph + th / 2
1301
- where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
1302
- width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
1303
- the priorbox's (anchor) center coordinates, width and height. `pxv`,
1304
- `pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
1305
- `ow`, `oh` denote the encoded/decoded coordinates, width and height.
1306
- During Box Decoding, two modes for broadcast are supported. Say target
1307
- box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
1308
- [M, 4]. Then prior box will broadcast to target box along the
1309
- assigned axis.
1310
-
1311
- Args:
1312
- prior_box(Tensor): Box list prior_box is a 2-D Tensor with shape
1313
- [M, 4] holds M boxes and data type is float32 or float64. Each box
1314
- is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
1315
- left top coordinate of the anchor box, if the input is image feature
1316
- map, they are close to the origin of the coordinate system.
1317
- [xmax, ymax] is the right bottom coordinate of the anchor box.
1318
- prior_box_var(List|Tensor|None): prior_box_var supports three types
1319
- of input. One is Tensor with shape [M, 4] which holds M group and
1320
- data type is float32 or float64. The second is list consist of
1321
- 4 elements shared by all boxes and data type is float32 or float64.
1322
- Other is None and not involved in calculation.
1323
- target_box(Tensor): This input can be a 2-D LoDTensor with shape
1324
- [N, 4] when code_type is 'encode_center_size'. This input also can
1325
- be a 3-D Tensor with shape [N, M, 4] when code_type is
1326
- 'decode_center_size'. Each box is represented as
1327
- [xmin, ymin, xmax, ymax]. The data type is float32 or float64.
1328
- code_type(str): The code type used with the target box. It can be
1329
- `encode_center_size` or `decode_center_size`. `encode_center_size`
1330
- by default.
1331
- box_normalized(bool): Whether treat the priorbox as a normalized box.
1332
- Set true by default.
1333
- axis(int): Which axis in PriorBox to broadcast for box decode,
1334
- for example, if axis is 0 and TargetBox has shape [N, M, 4] and
1335
- PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
1336
- for decoding. It is only valid when code type is
1337
- `decode_center_size`. Set 0 by default.
1338
- name(str, optional): For detailed information, please refer
1339
- to :ref:`api_guide_Name`. Usually name is no need to set and
1340
- None by default.
1341
-
1342
- Returns:
1343
- Tensor:
1344
- output_box(Tensor): When code_type is 'encode_center_size', the
1345
- output tensor of box_coder_op with shape [N, M, 4] representing the
1346
- result of N target boxes encoded with M Prior boxes and variances.
1347
- When code_type is 'decode_center_size', N represents the batch size
1348
- and M represents the number of decoded boxes.
1349
-
1350
- Examples:
1351
-
1352
- .. code-block:: python
1353
-
1354
- import paddle
1355
- from paddlex.ppdet.modeling import ops
1356
- paddle.enable_static()
1357
- # For encode
1358
- prior_box_encode = paddle.static.data(name='prior_box_encode',
1359
- shape=[512, 4],
1360
- dtype='float32')
1361
- target_box_encode = paddle.static.data(name='target_box_encode',
1362
- shape=[81, 4],
1363
- dtype='float32')
1364
- output_encode = ops.box_coder(prior_box=prior_box_encode,
1365
- prior_box_var=[0.1,0.1,0.2,0.2],
1366
- target_box=target_box_encode,
1367
- code_type="encode_center_size")
1368
- # For decode
1369
- prior_box_decode = paddle.static.data(name='prior_box_decode',
1370
- shape=[512, 4],
1371
- dtype='float32')
1372
- target_box_decode = paddle.static.data(name='target_box_decode',
1373
- shape=[512, 81, 4],
1374
- dtype='float32')
1375
- output_decode = ops.box_coder(prior_box=prior_box_decode,
1376
- prior_box_var=[0.1,0.1,0.2,0.2],
1377
- target_box=target_box_decode,
1378
- code_type="decode_center_size",
1379
- box_normalized=False,
1380
- axis=1)
1381
- """
1382
- check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
1383
- 'box_coder')
1384
- check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
1385
- 'box_coder')
1386
-
1387
- if in_dygraph_mode():
1388
- if isinstance(prior_box_var, Variable):
1389
- output_box = core.ops.box_coder(
1390
- prior_box, prior_box_var, target_box, "code_type", code_type,
1391
- "box_normalized", box_normalized, "axis", axis)
1392
-
1393
- elif isinstance(prior_box_var, list):
1394
- output_box = core.ops.box_coder(
1395
- prior_box, None, target_box, "code_type", code_type,
1396
- "box_normalized", box_normalized, "axis", axis, "variance",
1397
- prior_box_var)
1398
- else:
1399
- raise TypeError(
1400
- "Input variance of box_coder must be Variable or list")
1401
- return output_box
1402
- else:
1403
- helper = LayerHelper("box_coder", **locals())
1404
-
1405
- output_box = helper.create_variable_for_type_inference(
1406
- dtype=prior_box.dtype)
1407
-
1408
- inputs = {"PriorBox": prior_box, "TargetBox": target_box}
1409
- attrs = {
1410
- "code_type": code_type,
1411
- "box_normalized": box_normalized,
1412
- "axis": axis
1413
- }
1414
- if isinstance(prior_box_var, Variable):
1415
- inputs['PriorBoxVar'] = prior_box_var
1416
- elif isinstance(prior_box_var, list):
1417
- attrs['variance'] = prior_box_var
1418
- else:
1419
- raise TypeError(
1420
- "Input variance of box_coder must be Variable or list")
1421
- helper.append_op(
1422
- type="box_coder",
1423
- inputs=inputs,
1424
- attrs=attrs,
1425
- outputs={"OutputBox": output_box})
1426
- return output_box
1427
-
1428
-
1429
- @paddle.jit.not_to_static
1430
- def generate_proposals(scores,
1431
- bbox_deltas,
1432
- im_shape,
1433
- anchors,
1434
- variances,
1435
- pre_nms_top_n=6000,
1436
- post_nms_top_n=1000,
1437
- nms_thresh=0.5,
1438
- min_size=0.1,
1439
- eta=1.0,
1440
- pixel_offset=False,
1441
- return_rois_num=False,
1442
- name=None):
1443
- """
1444
- **Generate proposal Faster-RCNN**
1445
- This operation proposes RoIs according to each box with their
1446
- probability to be a foreground object and
1447
- the box can be calculated by anchors. Bbox_deltais and scores
1448
- to be an object are the output of RPN. Final proposals
1449
- could be used to train detection net.
1450
- For generating proposals, this operation performs following steps:
1451
- 1. Transposes and resizes scores and bbox_deltas in size of
1452
- (H*W*A, 1) and (H*W*A, 4)
1453
- 2. Calculate box locations as proposals candidates.
1454
- 3. Clip boxes to image
1455
- 4. Remove predicted boxes with small area.
1456
- 5. Apply NMS to get final proposals as output.
1457
- Args:
1458
- scores(Tensor): A 4-D Tensor with shape [N, A, H, W] represents
1459
- the probability for each box to be an object.
1460
- N is batch size, A is number of anchors, H and W are height and
1461
- width of the feature map. The data type must be float32.
1462
- bbox_deltas(Tensor): A 4-D Tensor with shape [N, 4*A, H, W]
1463
- represents the difference between predicted box location and
1464
- anchor location. The data type must be float32.
1465
- im_shape(Tensor): A 2-D Tensor with shape [N, 2] represents H, W, the
1466
- origin image size or input size. The data type can be float32 or
1467
- float64.
1468
- anchors(Tensor): A 4-D Tensor represents the anchors with a layout
1469
- of [H, W, A, 4]. H and W are height and width of the feature map,
1470
- num_anchors is the box count of each position. Each anchor is
1471
- in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
1472
- variances(Tensor): A 4-D Tensor. The expanded variances of anchors with a layout of
1473
- [H, W, num_priors, 4]. Each variance is in
1474
- (xcenter, ycenter, w, h) format. The data type must be float32.
1475
- pre_nms_top_n(float): Number of total bboxes to be kept per
1476
- image before NMS. The data type must be float32. `6000` by default.
1477
- post_nms_top_n(float): Number of total bboxes to be kept per
1478
- image after NMS. The data type must be float32. `1000` by default.
1479
- nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
1480
- min_size(float): Remove predicted boxes with either height or
1481
- width < min_size. The data type must be float32. `0.1` by default.
1482
- eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
1483
- `adaptive_threshold = adaptive_threshold * eta` in each iteration.
1484
- return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
1485
- num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
1486
- the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
1487
- 'False' by default.
1488
- name(str, optional): For detailed information, please refer
1489
- to :ref:`api_guide_Name`. Usually name is no need to set and
1490
- None by default.
1491
-
1492
- Returns:
1493
- tuple:
1494
- A tuple with format ``(rpn_rois, rpn_roi_probs)``.
1495
- - **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
1496
- - **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
1497
-
1498
- Examples:
1499
- .. code-block:: python
1500
-
1501
- import paddle
1502
- from paddlex.ppdet.modeling import ops
1503
- paddle.enable_static()
1504
- scores = paddle.static.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
1505
- bbox_deltas = paddle.static.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
1506
- im_shape = paddle.static.data(name='im_shape', shape=[None, 2], dtype='float32')
1507
- anchors = paddle.static.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
1508
- variances = paddle.static.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
1509
- rois, roi_probs = ops.generate_proposals(scores, bbox_deltas,
1510
- im_shape, anchors, variances)
1511
- """
1512
- if in_dygraph_mode():
1513
- assert return_rois_num, "return_rois_num should be True in dygraph mode."
1514
- attrs = ('pre_nms_topN', pre_nms_top_n, 'post_nms_topN',
1515
- post_nms_top_n, 'nms_thresh', nms_thresh, 'min_size',
1516
- min_size, 'eta', eta, 'pixel_offset', pixel_offset)
1517
- rpn_rois, rpn_roi_probs, rpn_rois_num = core.ops.generate_proposals_v2(
1518
- scores, bbox_deltas, im_shape, anchors, variances, *attrs)
1519
- return rpn_rois, rpn_roi_probs, rpn_rois_num
1520
-
1521
- else:
1522
- helper = LayerHelper('generate_proposals_v2', **locals())
1523
-
1524
- check_variable_and_dtype(scores, 'scores', ['float32'],
1525
- 'generate_proposals_v2')
1526
- check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
1527
- 'generate_proposals_v2')
1528
- check_variable_and_dtype(im_shape, 'im_shape', ['float32', 'float64'],
1529
- 'generate_proposals_v2')
1530
- check_variable_and_dtype(anchors, 'anchors', ['float32'],
1531
- 'generate_proposals_v2')
1532
- check_variable_and_dtype(variances, 'variances', ['float32'],
1533
- 'generate_proposals_v2')
1534
-
1535
- rpn_rois = helper.create_variable_for_type_inference(
1536
- dtype=bbox_deltas.dtype)
1537
- rpn_roi_probs = helper.create_variable_for_type_inference(
1538
- dtype=scores.dtype)
1539
- outputs = {
1540
- 'RpnRois': rpn_rois,
1541
- 'RpnRoiProbs': rpn_roi_probs,
1542
- }
1543
- if return_rois_num:
1544
- rpn_rois_num = helper.create_variable_for_type_inference(
1545
- dtype='int32')
1546
- rpn_rois_num.stop_gradient = True
1547
- outputs['RpnRoisNum'] = rpn_rois_num
1548
-
1549
- helper.append_op(
1550
- type="generate_proposals_v2",
1551
- inputs={
1552
- 'Scores': scores,
1553
- 'BboxDeltas': bbox_deltas,
1554
- 'ImShape': im_shape,
1555
- 'Anchors': anchors,
1556
- 'Variances': variances
1557
- },
1558
- attrs={
1559
- 'pre_nms_topN': pre_nms_top_n,
1560
- 'post_nms_topN': post_nms_top_n,
1561
- 'nms_thresh': nms_thresh,
1562
- 'min_size': min_size,
1563
- 'eta': eta,
1564
- 'pixel_offset': pixel_offset
1565
- },
1566
- outputs=outputs)
1567
- rpn_rois.stop_gradient = True
1568
- rpn_roi_probs.stop_gradient = True
1569
-
1570
- return rpn_rois, rpn_roi_probs, rpn_rois_num
1571
-
1572
-
1573
- def sigmoid_cross_entropy_with_logits(input,
1574
- label,
1575
- ignore_index=-100,
1576
- normalize=False):
1577
- output = F.binary_cross_entropy_with_logits(input, label, reduction='none')
1578
- mask_tensor = paddle.cast(label != ignore_index, 'float32')
1579
- output = paddle.multiply(output, mask_tensor)
1580
- if normalize:
1581
- sum_valid_mask = paddle.sum(mask_tensor)
1582
- output = output / sum_valid_mask
1583
- return output
1584
-
1585
-
1586
- def smooth_l1(input,
1587
- label,
1588
- inside_weight=None,
1589
- outside_weight=None,
1590
- sigma=None):
1591
- input_new = paddle.multiply(input, inside_weight)
1592
- label_new = paddle.multiply(label, inside_weight)
1593
- delta = 1 / (sigma * sigma)
1594
- out = F.smooth_l1_loss(input_new, label_new, reduction='none', delta=delta)
1595
- out = paddle.multiply(out, outside_weight)
1596
- out = out / delta
1597
- out = paddle.reshape(out, shape=[out.shape[0], -1])
1598
- out = paddle.sum(out, axis=1)
1599
- return out