paddlex 3.0.0b2__py3-none-any.whl → 3.0.0rc0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (940) hide show
  1. paddlex/.version +1 -1
  2. paddlex/__init__.py +1 -0
  3. paddlex/__main__.py +3 -4
  4. paddlex/configs/modules/3d_bev_detection/BEVFusion.yaml +38 -0
  5. paddlex/configs/modules/face_feature/MobileFaceNet.yaml +41 -0
  6. paddlex/configs/modules/face_feature/ResNet50_face.yaml +41 -0
  7. paddlex/configs/modules/formula_recognition/LaTeX_OCR_rec.yaml +40 -0
  8. paddlex/configs/modules/formula_recognition/PP-FormulaNet-L.yaml +40 -0
  9. paddlex/configs/modules/formula_recognition/PP-FormulaNet-S.yaml +40 -0
  10. paddlex/configs/modules/formula_recognition/UniMERNet.yaml +40 -0
  11. paddlex/configs/modules/image_classification/CLIP_vit_base_patch16_224.yaml +41 -0
  12. paddlex/configs/modules/image_classification/CLIP_vit_large_patch14_224.yaml +41 -0
  13. paddlex/configs/modules/image_classification/ConvNeXt_large_384.yaml +41 -0
  14. paddlex/configs/modules/keypoint_detection/PP-TinyPose_128x96.yaml +40 -0
  15. paddlex/configs/modules/keypoint_detection/PP-TinyPose_256x192.yaml +40 -0
  16. paddlex/configs/modules/layout_detection/PP-DocLayout-L.yaml +40 -0
  17. paddlex/configs/modules/layout_detection/PP-DocLayout-M.yaml +40 -0
  18. paddlex/configs/modules/layout_detection/PP-DocLayout-S.yaml +40 -0
  19. paddlex/configs/modules/multilingual_speech_recognition/whisper_base.yaml +12 -0
  20. paddlex/configs/modules/multilingual_speech_recognition/whisper_large.yaml +12 -0
  21. paddlex/configs/modules/multilingual_speech_recognition/whisper_medium.yaml +12 -0
  22. paddlex/configs/modules/multilingual_speech_recognition/whisper_small.yaml +12 -0
  23. paddlex/configs/modules/multilingual_speech_recognition/whisper_tiny.yaml +12 -0
  24. paddlex/configs/modules/object_detection/Co-DINO-R50.yaml +40 -0
  25. paddlex/configs/modules/object_detection/Co-DINO-Swin-L.yaml +40 -0
  26. paddlex/configs/modules/object_detection/Co-Deformable-DETR-R50.yaml +40 -0
  27. paddlex/configs/modules/object_detection/Co-Deformable-DETR-Swin-T.yaml +40 -0
  28. paddlex/configs/modules/object_detection/YOLOX-X.yaml +40 -0
  29. paddlex/configs/modules/open_vocabulary_detection/GroundingDINO-T.yaml +13 -0
  30. paddlex/configs/modules/open_vocabulary_segmentation/SAM-H_box.yaml +17 -0
  31. paddlex/configs/modules/open_vocabulary_segmentation/SAM-H_point.yaml +15 -0
  32. paddlex/configs/modules/rotated_object_detection/PP-YOLOE-R-L.yaml +40 -0
  33. paddlex/configs/modules/semantic_segmentation/MaskFormer_small.yaml +42 -0
  34. paddlex/configs/modules/semantic_segmentation/MaskFormer_tiny.yaml +42 -0
  35. paddlex/configs/modules/semantic_segmentation/SeaFormer_base.yaml +40 -0
  36. paddlex/configs/modules/semantic_segmentation/SeaFormer_large.yaml +40 -0
  37. paddlex/configs/modules/semantic_segmentation/SeaFormer_small.yaml +40 -0
  38. paddlex/configs/modules/semantic_segmentation/SeaFormer_tiny.yaml +40 -0
  39. paddlex/configs/modules/table_cells_detection/RT-DETR-L_wired_table_cell_det.yaml +40 -0
  40. paddlex/configs/modules/table_cells_detection/RT-DETR-L_wireless_table_cell_det.yaml +40 -0
  41. paddlex/configs/modules/table_classification/PP-LCNet_x1_0_table_cls.yaml +41 -0
  42. paddlex/configs/modules/table_structure_recognition/SLANeXt_wired.yaml +39 -0
  43. paddlex/configs/modules/table_structure_recognition/SLANeXt_wireless.yaml +39 -0
  44. paddlex/configs/modules/text_detection/PP-OCRv3_mobile_det.yaml +40 -0
  45. paddlex/configs/modules/text_detection/PP-OCRv3_server_det.yaml +40 -0
  46. paddlex/configs/modules/text_recognition/PP-OCRv3_mobile_rec.yaml +39 -0
  47. paddlex/configs/modules/text_recognition/PP-OCRv4_server_rec_doc.yaml +39 -0
  48. paddlex/configs/modules/text_recognition/arabic_PP-OCRv3_mobile_rec.yaml +39 -0
  49. paddlex/configs/modules/text_recognition/chinese_cht_PP-OCRv3_mobile_rec.yaml +39 -0
  50. paddlex/configs/modules/text_recognition/cyrillic_PP-OCRv3_mobile_rec.yaml +39 -0
  51. paddlex/configs/modules/text_recognition/devanagari_PP-OCRv3_mobile_rec.yaml +39 -0
  52. paddlex/configs/modules/text_recognition/en_PP-OCRv3_mobile_rec.yaml +39 -0
  53. paddlex/configs/modules/text_recognition/en_PP-OCRv4_mobile_rec.yaml +39 -0
  54. paddlex/configs/modules/text_recognition/japan_PP-OCRv3_mobile_rec.yaml +39 -0
  55. paddlex/configs/modules/text_recognition/ka_PP-OCRv3_mobile_rec.yaml +39 -0
  56. paddlex/configs/modules/text_recognition/korean_PP-OCRv3_mobile_rec.yaml +39 -0
  57. paddlex/configs/modules/text_recognition/latin_PP-OCRv3_mobile_rec.yaml +39 -0
  58. paddlex/configs/modules/text_recognition/ta_PP-OCRv3_mobile_rec.yaml +39 -0
  59. paddlex/configs/modules/text_recognition/te_PP-OCRv3_mobile_rec.yaml +39 -0
  60. paddlex/configs/modules/textline_orientation/PP-LCNet_x0_25_textline_ori.yaml +41 -0
  61. paddlex/configs/modules/video_classification/PP-TSM-R50_8frames_uniform.yaml +42 -0
  62. paddlex/configs/modules/video_classification/PP-TSMv2-LCNetV2_16frames_uniform.yaml +42 -0
  63. paddlex/configs/modules/video_classification/PP-TSMv2-LCNetV2_8frames_uniform.yaml +42 -0
  64. paddlex/configs/modules/video_detection/YOWO.yaml +40 -0
  65. paddlex/configs/pipelines/3d_bev_detection.yaml +9 -0
  66. paddlex/configs/pipelines/OCR.yaml +44 -0
  67. paddlex/configs/pipelines/PP-ChatOCRv3-doc.yaml +149 -0
  68. paddlex/configs/pipelines/PP-ChatOCRv4-doc.yaml +184 -0
  69. paddlex/configs/pipelines/PP-ShiTuV2.yaml +18 -0
  70. paddlex/configs/pipelines/PP-StructureV3.yaml +226 -0
  71. paddlex/configs/pipelines/anomaly_detection.yaml +8 -0
  72. paddlex/configs/pipelines/doc_preprocessor.yaml +15 -0
  73. paddlex/configs/pipelines/face_recognition.yaml +18 -0
  74. paddlex/configs/pipelines/formula_recognition.yaml +39 -0
  75. paddlex/configs/pipelines/human_keypoint_detection.yaml +17 -0
  76. paddlex/configs/pipelines/image_classification.yaml +10 -0
  77. paddlex/configs/pipelines/image_multilabel_classification.yaml +9 -0
  78. paddlex/configs/pipelines/instance_segmentation.yaml +10 -0
  79. paddlex/configs/pipelines/layout_parsing.yaml +101 -0
  80. paddlex/configs/pipelines/multilingual_speech_recognition.yaml +9 -0
  81. paddlex/configs/pipelines/object_detection.yaml +10 -0
  82. paddlex/configs/pipelines/open_vocabulary_detection.yaml +12 -0
  83. paddlex/configs/pipelines/open_vocabulary_segmentation.yaml +13 -0
  84. paddlex/configs/pipelines/pedestrian_attribute_recognition.yaml +15 -0
  85. paddlex/configs/pipelines/rotated_object_detection.yaml +10 -0
  86. paddlex/configs/pipelines/seal_recognition.yaml +51 -0
  87. paddlex/configs/pipelines/semantic_segmentation.yaml +10 -0
  88. paddlex/configs/pipelines/small_object_detection.yaml +10 -0
  89. paddlex/configs/pipelines/table_recognition.yaml +56 -0
  90. paddlex/configs/pipelines/table_recognition_v2.yaml +76 -0
  91. paddlex/configs/pipelines/ts_anomaly_detection.yaml +8 -0
  92. paddlex/configs/pipelines/ts_classification.yaml +8 -0
  93. paddlex/configs/pipelines/ts_forecast.yaml +8 -0
  94. paddlex/configs/pipelines/vehicle_attribute_recognition.yaml +15 -0
  95. paddlex/configs/pipelines/video_classification.yaml +9 -0
  96. paddlex/configs/pipelines/video_detection.yaml +10 -0
  97. paddlex/engine.py +1 -1
  98. paddlex/hpip_links.html +19 -0
  99. paddlex/inference/__init__.py +3 -1
  100. paddlex/inference/common/batch_sampler/__init__.py +20 -0
  101. paddlex/inference/common/batch_sampler/audio_batch_sampler.py +84 -0
  102. paddlex/inference/common/batch_sampler/base_batch_sampler.py +90 -0
  103. paddlex/inference/common/batch_sampler/det_3d_batch_sampler.py +147 -0
  104. paddlex/inference/common/batch_sampler/image_batch_sampler.py +136 -0
  105. paddlex/inference/common/batch_sampler/ts_batch_sampler.py +110 -0
  106. paddlex/inference/common/batch_sampler/video_batch_sampler.py +94 -0
  107. paddlex/inference/common/reader/__init__.py +19 -0
  108. paddlex/inference/common/reader/audio_reader.py +46 -0
  109. paddlex/inference/common/reader/det_3d_reader.py +239 -0
  110. paddlex/inference/common/reader/image_reader.py +69 -0
  111. paddlex/inference/common/reader/ts_reader.py +45 -0
  112. paddlex/inference/common/reader/video_reader.py +42 -0
  113. paddlex/inference/common/result/__init__.py +29 -0
  114. paddlex/inference/common/result/base_cv_result.py +31 -0
  115. paddlex/inference/common/result/base_result.py +70 -0
  116. paddlex/inference/common/result/base_ts_result.py +42 -0
  117. paddlex/inference/common/result/base_video_result.py +36 -0
  118. paddlex/inference/common/result/mixin.py +703 -0
  119. paddlex/inference/models/3d_bev_detection/__init__.py +15 -0
  120. paddlex/inference/models/3d_bev_detection/predictor.py +314 -0
  121. paddlex/inference/models/3d_bev_detection/processors.py +978 -0
  122. paddlex/inference/models/3d_bev_detection/result.py +65 -0
  123. paddlex/inference/models/3d_bev_detection/visualizer_3d.py +131 -0
  124. paddlex/inference/models/__init__.py +37 -13
  125. paddlex/inference/models/anomaly_detection/__init__.py +15 -0
  126. paddlex/inference/models/anomaly_detection/predictor.py +145 -0
  127. paddlex/inference/models/anomaly_detection/processors.py +46 -0
  128. paddlex/inference/models/anomaly_detection/result.py +70 -0
  129. paddlex/inference/models/base/__init__.py +1 -2
  130. paddlex/inference/models/base/predictor/__init__.py +16 -0
  131. paddlex/inference/models/base/predictor/base_predictor.py +175 -0
  132. paddlex/inference/models/base/predictor/basic_predictor.py +139 -0
  133. paddlex/inference/models/common/__init__.py +35 -0
  134. paddlex/inference/models/common/static_infer.py +329 -0
  135. paddlex/inference/models/common/tokenizer/__init__.py +17 -0
  136. paddlex/inference/models/common/tokenizer/bert_tokenizer.py +655 -0
  137. paddlex/inference/models/common/tokenizer/gpt_tokenizer.py +451 -0
  138. paddlex/inference/models/common/tokenizer/tokenizer_utils.py +2141 -0
  139. paddlex/inference/models/common/tokenizer/tokenizer_utils_base.py +3504 -0
  140. paddlex/inference/models/common/tokenizer/utils.py +66 -0
  141. paddlex/inference/models/common/tokenizer/vocab.py +647 -0
  142. paddlex/inference/models/common/ts/__init__.py +15 -0
  143. paddlex/inference/models/common/ts/funcs.py +533 -0
  144. paddlex/inference/models/common/ts/processors.py +313 -0
  145. paddlex/inference/models/common/vision/__init__.py +23 -0
  146. paddlex/inference/models/common/vision/funcs.py +93 -0
  147. paddlex/inference/models/common/vision/processors.py +270 -0
  148. paddlex/inference/models/face_feature/__init__.py +15 -0
  149. paddlex/inference/models/face_feature/predictor.py +65 -0
  150. paddlex/inference/models/formula_recognition/__init__.py +15 -0
  151. paddlex/inference/models/formula_recognition/predictor.py +203 -0
  152. paddlex/inference/models/formula_recognition/processors.py +986 -0
  153. paddlex/inference/models/formula_recognition/result.py +403 -0
  154. paddlex/inference/models/image_classification/__init__.py +15 -0
  155. paddlex/inference/models/image_classification/predictor.py +182 -0
  156. paddlex/inference/models/image_classification/processors.py +87 -0
  157. paddlex/inference/models/image_classification/result.py +92 -0
  158. paddlex/inference/models/image_feature/__init__.py +15 -0
  159. paddlex/inference/models/image_feature/predictor.py +156 -0
  160. paddlex/inference/models/image_feature/processors.py +29 -0
  161. paddlex/inference/models/image_feature/result.py +33 -0
  162. paddlex/inference/models/image_multilabel_classification/__init__.py +15 -0
  163. paddlex/inference/models/image_multilabel_classification/predictor.py +94 -0
  164. paddlex/inference/models/image_multilabel_classification/processors.py +85 -0
  165. paddlex/inference/models/image_multilabel_classification/result.py +95 -0
  166. paddlex/inference/models/image_unwarping/__init__.py +15 -0
  167. paddlex/inference/models/image_unwarping/predictor.py +105 -0
  168. paddlex/inference/models/image_unwarping/processors.py +88 -0
  169. paddlex/inference/models/image_unwarping/result.py +45 -0
  170. paddlex/inference/models/instance_segmentation/__init__.py +15 -0
  171. paddlex/inference/models/instance_segmentation/predictor.py +210 -0
  172. paddlex/inference/models/instance_segmentation/processors.py +105 -0
  173. paddlex/inference/models/instance_segmentation/result.py +161 -0
  174. paddlex/inference/models/keypoint_detection/__init__.py +15 -0
  175. paddlex/inference/models/keypoint_detection/predictor.py +188 -0
  176. paddlex/inference/models/keypoint_detection/processors.py +359 -0
  177. paddlex/inference/models/keypoint_detection/result.py +192 -0
  178. paddlex/inference/models/multilingual_speech_recognition/__init__.py +15 -0
  179. paddlex/inference/models/multilingual_speech_recognition/predictor.py +141 -0
  180. paddlex/inference/models/multilingual_speech_recognition/processors.py +1941 -0
  181. paddlex/inference/models/multilingual_speech_recognition/result.py +21 -0
  182. paddlex/inference/models/object_detection/__init__.py +15 -0
  183. paddlex/inference/models/object_detection/predictor.py +348 -0
  184. paddlex/inference/models/object_detection/processors.py +855 -0
  185. paddlex/inference/models/object_detection/result.py +113 -0
  186. paddlex/inference/models/object_detection/utils.py +68 -0
  187. paddlex/inference/models/open_vocabulary_detection/__init__.py +15 -0
  188. paddlex/inference/models/open_vocabulary_detection/predictor.py +155 -0
  189. paddlex/inference/models/open_vocabulary_detection/processors/__init__.py +15 -0
  190. paddlex/inference/models/open_vocabulary_detection/processors/groundingdino_processors.py +485 -0
  191. paddlex/inference/models/open_vocabulary_segmentation/__init__.py +15 -0
  192. paddlex/inference/models/open_vocabulary_segmentation/predictor.py +120 -0
  193. paddlex/inference/models/open_vocabulary_segmentation/processors/__init__.py +15 -0
  194. paddlex/inference/models/open_vocabulary_segmentation/processors/sam_processer.py +249 -0
  195. paddlex/inference/models/open_vocabulary_segmentation/results/__init__.py +15 -0
  196. paddlex/inference/models/open_vocabulary_segmentation/results/sam_result.py +147 -0
  197. paddlex/inference/models/semantic_segmentation/__init__.py +15 -0
  198. paddlex/inference/models/semantic_segmentation/predictor.py +167 -0
  199. paddlex/inference/models/semantic_segmentation/processors.py +114 -0
  200. paddlex/inference/models/semantic_segmentation/result.py +72 -0
  201. paddlex/inference/models/table_structure_recognition/__init__.py +15 -0
  202. paddlex/inference/models/table_structure_recognition/predictor.py +171 -0
  203. paddlex/inference/models/table_structure_recognition/processors.py +235 -0
  204. paddlex/inference/models/table_structure_recognition/result.py +70 -0
  205. paddlex/inference/models/text_detection/__init__.py +15 -0
  206. paddlex/inference/models/text_detection/predictor.py +191 -0
  207. paddlex/inference/models/text_detection/processors.py +466 -0
  208. paddlex/inference/models/text_detection/result.py +51 -0
  209. paddlex/inference/models/text_recognition/__init__.py +15 -0
  210. paddlex/inference/models/text_recognition/predictor.py +106 -0
  211. paddlex/inference/models/text_recognition/processors.py +231 -0
  212. paddlex/inference/models/text_recognition/result.py +75 -0
  213. paddlex/inference/models/ts_anomaly_detection/__init__.py +15 -0
  214. paddlex/inference/models/ts_anomaly_detection/predictor.py +146 -0
  215. paddlex/inference/models/ts_anomaly_detection/processors.py +94 -0
  216. paddlex/inference/models/ts_anomaly_detection/result.py +72 -0
  217. paddlex/inference/models/ts_classification/__init__.py +15 -0
  218. paddlex/inference/models/ts_classification/predictor.py +135 -0
  219. paddlex/inference/models/ts_classification/processors.py +117 -0
  220. paddlex/inference/models/ts_classification/result.py +78 -0
  221. paddlex/inference/models/ts_forecasting/__init__.py +15 -0
  222. paddlex/inference/models/ts_forecasting/predictor.py +159 -0
  223. paddlex/inference/models/ts_forecasting/processors.py +149 -0
  224. paddlex/inference/models/ts_forecasting/result.py +83 -0
  225. paddlex/inference/models/video_classification/__init__.py +15 -0
  226. paddlex/inference/models/video_classification/predictor.py +147 -0
  227. paddlex/inference/models/video_classification/processors.py +409 -0
  228. paddlex/inference/models/video_classification/result.py +92 -0
  229. paddlex/inference/models/video_detection/__init__.py +15 -0
  230. paddlex/inference/models/video_detection/predictor.py +136 -0
  231. paddlex/inference/models/video_detection/processors.py +450 -0
  232. paddlex/inference/models/video_detection/result.py +104 -0
  233. paddlex/inference/pipelines/3d_bev_detection/__init__.py +15 -0
  234. paddlex/inference/pipelines/3d_bev_detection/pipeline.py +67 -0
  235. paddlex/inference/pipelines/__init__.py +174 -73
  236. paddlex/inference/pipelines/anomaly_detection/__init__.py +15 -0
  237. paddlex/inference/pipelines/anomaly_detection/pipeline.py +62 -0
  238. paddlex/inference/pipelines/attribute_recognition/__init__.py +15 -0
  239. paddlex/inference/pipelines/attribute_recognition/pipeline.py +105 -0
  240. paddlex/inference/pipelines/attribute_recognition/result.py +100 -0
  241. paddlex/inference/pipelines/base.py +103 -57
  242. paddlex/inference/pipelines/components/__init__.py +23 -0
  243. paddlex/inference/pipelines/components/chat_server/__init__.py +16 -0
  244. paddlex/inference/pipelines/components/chat_server/base.py +39 -0
  245. paddlex/inference/pipelines/components/chat_server/openai_bot_chat.py +236 -0
  246. paddlex/inference/pipelines/components/common/__init__.py +18 -0
  247. paddlex/inference/pipelines/components/common/base_operator.py +36 -0
  248. paddlex/inference/pipelines/components/common/base_result.py +65 -0
  249. paddlex/inference/pipelines/components/common/convert_points_and_boxes.py +46 -0
  250. paddlex/inference/pipelines/components/common/crop_image_regions.py +550 -0
  251. paddlex/inference/pipelines/components/common/seal_det_warp.py +941 -0
  252. paddlex/inference/pipelines/components/common/sort_boxes.py +83 -0
  253. paddlex/inference/pipelines/components/faisser.py +352 -0
  254. paddlex/inference/pipelines/components/prompt_engineering/__init__.py +16 -0
  255. paddlex/inference/pipelines/components/prompt_engineering/base.py +35 -0
  256. paddlex/inference/pipelines/components/prompt_engineering/generate_ensemble_prompt.py +127 -0
  257. paddlex/inference/pipelines/components/prompt_engineering/generate_kie_prompt.py +148 -0
  258. paddlex/inference/pipelines/components/retriever/__init__.py +16 -0
  259. paddlex/inference/pipelines/components/retriever/base.py +226 -0
  260. paddlex/inference/pipelines/components/retriever/openai_bot_retriever.py +70 -0
  261. paddlex/inference/pipelines/components/retriever/qianfan_bot_retriever.py +163 -0
  262. paddlex/inference/pipelines/components/utils/__init__.py +13 -0
  263. paddlex/inference/pipelines/components/utils/mixin.py +206 -0
  264. paddlex/inference/pipelines/doc_preprocessor/__init__.py +15 -0
  265. paddlex/inference/pipelines/doc_preprocessor/pipeline.py +190 -0
  266. paddlex/inference/pipelines/doc_preprocessor/result.py +103 -0
  267. paddlex/inference/pipelines/face_recognition/__init__.py +15 -0
  268. paddlex/inference/pipelines/face_recognition/pipeline.py +61 -0
  269. paddlex/inference/pipelines/face_recognition/result.py +43 -0
  270. paddlex/inference/pipelines/formula_recognition/__init__.py +15 -0
  271. paddlex/inference/pipelines/formula_recognition/pipeline.py +303 -0
  272. paddlex/inference/pipelines/formula_recognition/result.py +291 -0
  273. paddlex/inference/pipelines/image_classification/__init__.py +15 -0
  274. paddlex/inference/pipelines/image_classification/pipeline.py +71 -0
  275. paddlex/inference/pipelines/image_multilabel_classification/__init__.py +15 -0
  276. paddlex/inference/pipelines/image_multilabel_classification/pipeline.py +78 -0
  277. paddlex/inference/pipelines/instance_segmentation/__init__.py +15 -0
  278. paddlex/inference/pipelines/instance_segmentation/pipeline.py +70 -0
  279. paddlex/inference/pipelines/keypoint_detection/__init__.py +15 -0
  280. paddlex/inference/pipelines/keypoint_detection/pipeline.py +137 -0
  281. paddlex/inference/pipelines/layout_parsing/__init__.py +2 -1
  282. paddlex/inference/pipelines/layout_parsing/pipeline.py +570 -0
  283. paddlex/inference/pipelines/layout_parsing/pipeline_v2.py +739 -0
  284. paddlex/inference/pipelines/layout_parsing/result.py +203 -0
  285. paddlex/inference/pipelines/layout_parsing/result_v2.py +470 -0
  286. paddlex/inference/pipelines/layout_parsing/utils.py +2385 -0
  287. paddlex/inference/pipelines/multilingual_speech_recognition/__init__.py +15 -0
  288. paddlex/inference/pipelines/multilingual_speech_recognition/pipeline.py +67 -0
  289. paddlex/inference/pipelines/object_detection/__init__.py +15 -0
  290. paddlex/inference/pipelines/object_detection/pipeline.py +95 -0
  291. paddlex/inference/pipelines/ocr/__init__.py +15 -0
  292. paddlex/inference/pipelines/ocr/pipeline.py +389 -0
  293. paddlex/inference/pipelines/ocr/result.py +248 -0
  294. paddlex/inference/pipelines/open_vocabulary_detection/__init__.py +15 -0
  295. paddlex/inference/pipelines/open_vocabulary_detection/pipeline.py +75 -0
  296. paddlex/inference/pipelines/open_vocabulary_segmentation/__init__.py +15 -0
  297. paddlex/inference/pipelines/open_vocabulary_segmentation/pipeline.py +89 -0
  298. paddlex/inference/pipelines/pp_chatocr/__init__.py +16 -0
  299. paddlex/inference/pipelines/pp_chatocr/pipeline_base.py +102 -0
  300. paddlex/inference/pipelines/pp_chatocr/pipeline_v3.py +773 -0
  301. paddlex/inference/pipelines/pp_chatocr/pipeline_v4.py +977 -0
  302. paddlex/inference/pipelines/pp_shitu_v2/__init__.py +15 -0
  303. paddlex/inference/pipelines/pp_shitu_v2/pipeline.py +152 -0
  304. paddlex/inference/pipelines/pp_shitu_v2/result.py +126 -0
  305. paddlex/inference/pipelines/rotated_object_detection/__init__.py +15 -0
  306. paddlex/inference/pipelines/rotated_object_detection/pipeline.py +74 -0
  307. paddlex/inference/pipelines/seal_recognition/__init__.py +15 -0
  308. paddlex/inference/pipelines/seal_recognition/pipeline.py +271 -0
  309. paddlex/inference/pipelines/seal_recognition/result.py +87 -0
  310. paddlex/inference/pipelines/semantic_segmentation/__init__.py +15 -0
  311. paddlex/inference/pipelines/semantic_segmentation/pipeline.py +74 -0
  312. paddlex/inference/pipelines/small_object_detection/__init__.py +15 -0
  313. paddlex/inference/pipelines/small_object_detection/pipeline.py +74 -0
  314. paddlex/inference/pipelines/table_recognition/__init__.py +2 -1
  315. paddlex/inference/pipelines/table_recognition/pipeline.py +462 -0
  316. paddlex/inference/pipelines/table_recognition/pipeline_v2.py +792 -0
  317. paddlex/inference/pipelines/table_recognition/result.py +216 -0
  318. paddlex/inference/pipelines/table_recognition/table_recognition_post_processing.py +362 -0
  319. paddlex/inference/pipelines/table_recognition/table_recognition_post_processing_v2.py +470 -0
  320. paddlex/inference/pipelines/table_recognition/utils.py +23 -436
  321. paddlex/inference/pipelines/ts_anomaly_detection/__init__.py +15 -0
  322. paddlex/inference/pipelines/ts_anomaly_detection/pipeline.py +62 -0
  323. paddlex/inference/pipelines/ts_classification/__init__.py +15 -0
  324. paddlex/inference/pipelines/ts_classification/pipeline.py +62 -0
  325. paddlex/inference/pipelines/ts_forecasting/__init__.py +15 -0
  326. paddlex/inference/pipelines/ts_forecasting/pipeline.py +62 -0
  327. paddlex/inference/pipelines/video_classification/__init__.py +15 -0
  328. paddlex/inference/pipelines/video_classification/pipeline.py +68 -0
  329. paddlex/inference/pipelines/video_detection/__init__.py +15 -0
  330. paddlex/inference/pipelines/video_detection/pipeline.py +73 -0
  331. paddlex/inference/serving/__init__.py +13 -0
  332. paddlex/inference/serving/basic_serving/__init__.py +18 -0
  333. paddlex/inference/serving/basic_serving/_app.py +209 -0
  334. paddlex/inference/serving/basic_serving/_pipeline_apps/__init__.py +41 -0
  335. paddlex/inference/serving/basic_serving/_pipeline_apps/_common/__init__.py +13 -0
  336. paddlex/inference/serving/basic_serving/_pipeline_apps/_common/common.py +96 -0
  337. paddlex/inference/serving/basic_serving/_pipeline_apps/_common/image_recognition.py +36 -0
  338. paddlex/inference/serving/basic_serving/_pipeline_apps/_common/ocr.py +90 -0
  339. paddlex/inference/serving/basic_serving/_pipeline_apps/anomaly_detection.py +64 -0
  340. paddlex/inference/serving/basic_serving/_pipeline_apps/doc_preprocessor.py +97 -0
  341. paddlex/inference/serving/basic_serving/_pipeline_apps/face_recognition.py +223 -0
  342. paddlex/inference/serving/basic_serving/_pipeline_apps/formula_recognition.py +97 -0
  343. paddlex/inference/serving/basic_serving/_pipeline_apps/human_keypoint_detection.py +78 -0
  344. paddlex/inference/serving/basic_serving/_pipeline_apps/image_classification.py +66 -0
  345. paddlex/inference/serving/basic_serving/_pipeline_apps/image_multilabel_classification.py +70 -0
  346. paddlex/inference/serving/basic_serving/_pipeline_apps/instance_segmentation.py +81 -0
  347. paddlex/inference/serving/basic_serving/_pipeline_apps/layout_parsing.py +115 -0
  348. paddlex/inference/serving/basic_serving/_pipeline_apps/m_3d_bev_detection.py +76 -0
  349. paddlex/inference/serving/basic_serving/_pipeline_apps/multilingual_speech_recognition.py +89 -0
  350. paddlex/inference/serving/basic_serving/_pipeline_apps/object_detection.py +74 -0
  351. paddlex/inference/serving/basic_serving/_pipeline_apps/ocr.py +99 -0
  352. paddlex/inference/serving/basic_serving/_pipeline_apps/open_vocabulary_detection.py +78 -0
  353. paddlex/inference/serving/basic_serving/_pipeline_apps/open_vocabulary_segmentation.py +85 -0
  354. paddlex/inference/serving/basic_serving/_pipeline_apps/pedestrian_attribute_recognition.py +81 -0
  355. paddlex/inference/serving/basic_serving/_pipeline_apps/pp_chatocrv3_doc.py +191 -0
  356. paddlex/inference/serving/basic_serving/_pipeline_apps/pp_chatocrv4_doc.py +221 -0
  357. paddlex/inference/serving/basic_serving/_pipeline_apps/pp_shituv2.py +218 -0
  358. paddlex/inference/serving/basic_serving/_pipeline_apps/pp_structurev3.py +136 -0
  359. paddlex/inference/serving/basic_serving/_pipeline_apps/rotated_object_detection.py +78 -0
  360. paddlex/inference/serving/basic_serving/_pipeline_apps/seal_recognition.py +103 -0
  361. paddlex/inference/serving/basic_serving/_pipeline_apps/semantic_segmentation.py +64 -0
  362. paddlex/inference/serving/basic_serving/_pipeline_apps/small_object_detection.py +69 -0
  363. paddlex/inference/serving/basic_serving/_pipeline_apps/table_recognition.py +105 -0
  364. paddlex/inference/serving/basic_serving/_pipeline_apps/table_recognition_v2.py +107 -0
  365. paddlex/inference/serving/basic_serving/_pipeline_apps/ts_anomaly_detection.py +62 -0
  366. paddlex/inference/serving/basic_serving/_pipeline_apps/ts_classification.py +61 -0
  367. paddlex/inference/serving/basic_serving/_pipeline_apps/ts_forecast.py +62 -0
  368. paddlex/inference/serving/basic_serving/_pipeline_apps/vehicle_attribute_recognition.py +81 -0
  369. paddlex/inference/serving/basic_serving/_pipeline_apps/video_classification.py +73 -0
  370. paddlex/inference/serving/basic_serving/_pipeline_apps/video_detection.py +89 -0
  371. paddlex/inference/serving/basic_serving/_server.py +35 -0
  372. paddlex/inference/serving/infra/__init__.py +13 -0
  373. paddlex/inference/serving/infra/config.py +36 -0
  374. paddlex/inference/serving/infra/models.py +72 -0
  375. paddlex/inference/serving/infra/storage.py +175 -0
  376. paddlex/inference/serving/infra/utils.py +259 -0
  377. paddlex/inference/serving/schemas/__init__.py +13 -0
  378. paddlex/inference/serving/schemas/anomaly_detection.py +39 -0
  379. paddlex/inference/serving/schemas/doc_preprocessor.py +54 -0
  380. paddlex/inference/serving/schemas/face_recognition.py +124 -0
  381. paddlex/inference/serving/schemas/formula_recognition.py +56 -0
  382. paddlex/inference/serving/schemas/human_keypoint_detection.py +55 -0
  383. paddlex/inference/serving/schemas/image_classification.py +45 -0
  384. paddlex/inference/serving/schemas/image_multilabel_classification.py +47 -0
  385. paddlex/inference/serving/schemas/instance_segmentation.py +53 -0
  386. paddlex/inference/serving/schemas/layout_parsing.py +72 -0
  387. paddlex/inference/serving/schemas/m_3d_bev_detection.py +48 -0
  388. paddlex/inference/serving/schemas/multilingual_speech_recognition.py +57 -0
  389. paddlex/inference/serving/schemas/object_detection.py +52 -0
  390. paddlex/inference/serving/schemas/ocr.py +60 -0
  391. paddlex/inference/serving/schemas/open_vocabulary_detection.py +52 -0
  392. paddlex/inference/serving/schemas/open_vocabulary_segmentation.py +52 -0
  393. paddlex/inference/serving/schemas/pedestrian_attribute_recognition.py +61 -0
  394. paddlex/inference/serving/schemas/pp_chatocrv3_doc.py +134 -0
  395. paddlex/inference/serving/schemas/pp_chatocrv4_doc.py +151 -0
  396. paddlex/inference/serving/schemas/pp_shituv2.py +124 -0
  397. paddlex/inference/serving/schemas/pp_structurev3.py +84 -0
  398. paddlex/inference/serving/schemas/rotated_object_detection.py +52 -0
  399. paddlex/inference/serving/schemas/seal_recognition.py +62 -0
  400. paddlex/inference/serving/schemas/semantic_segmentation.py +45 -0
  401. paddlex/inference/serving/schemas/shared/__init__.py +13 -0
  402. paddlex/inference/serving/schemas/shared/classification.py +23 -0
  403. paddlex/inference/serving/schemas/shared/image_segmentation.py +28 -0
  404. paddlex/inference/serving/schemas/shared/object_detection.py +24 -0
  405. paddlex/inference/serving/schemas/shared/ocr.py +25 -0
  406. paddlex/inference/serving/schemas/small_object_detection.py +52 -0
  407. paddlex/inference/serving/schemas/table_recognition.py +64 -0
  408. paddlex/inference/serving/schemas/table_recognition_v2.py +66 -0
  409. paddlex/inference/serving/schemas/ts_anomaly_detection.py +37 -0
  410. paddlex/inference/serving/schemas/ts_classification.py +38 -0
  411. paddlex/inference/serving/schemas/ts_forecast.py +37 -0
  412. paddlex/inference/serving/schemas/vehicle_attribute_recognition.py +61 -0
  413. paddlex/inference/serving/schemas/video_classification.py +44 -0
  414. paddlex/inference/serving/schemas/video_detection.py +56 -0
  415. paddlex/inference/utils/benchmark.py +23 -11
  416. paddlex/inference/utils/get_pipeline_path.py +2 -1
  417. paddlex/inference/utils/io/__init__.py +3 -0
  418. paddlex/inference/utils/io/readers.py +164 -17
  419. paddlex/inference/utils/io/writers.py +85 -2
  420. paddlex/inference/utils/new_ir_blacklist.py +6 -0
  421. paddlex/inference/utils/official_models.py +277 -211
  422. paddlex/inference/utils/pp_option.py +24 -4
  423. paddlex/model.py +12 -5
  424. paddlex/modules/3d_bev_detection/__init__.py +18 -0
  425. paddlex/modules/3d_bev_detection/dataset_checker/__init__.py +95 -0
  426. paddlex/modules/3d_bev_detection/dataset_checker/dataset_src/__init__.py +17 -0
  427. paddlex/modules/3d_bev_detection/dataset_checker/dataset_src/analyse_dataset.py +106 -0
  428. paddlex/modules/3d_bev_detection/dataset_checker/dataset_src/check_dataset.py +102 -0
  429. paddlex/modules/3d_bev_detection/evaluator.py +46 -0
  430. paddlex/modules/3d_bev_detection/exportor.py +22 -0
  431. paddlex/modules/3d_bev_detection/model_list.py +18 -0
  432. paddlex/modules/3d_bev_detection/trainer.py +70 -0
  433. paddlex/modules/__init__.py +34 -1
  434. paddlex/modules/base/build_model.py +1 -1
  435. paddlex/modules/base/dataset_checker/dataset_checker.py +6 -1
  436. paddlex/modules/base/evaluator.py +20 -4
  437. paddlex/modules/base/exportor.py +30 -5
  438. paddlex/modules/base/trainer.py +29 -6
  439. paddlex/modules/face_recognition/trainer.py +1 -23
  440. paddlex/modules/formula_recognition/__init__.py +5 -0
  441. paddlex/modules/formula_recognition/dataset_checker/__init__.py +113 -0
  442. paddlex/modules/formula_recognition/dataset_checker/dataset_src/__init__.py +19 -0
  443. paddlex/modules/formula_recognition/dataset_checker/dataset_src/analyse_dataset.py +157 -0
  444. paddlex/modules/formula_recognition/dataset_checker/dataset_src/check_dataset.py +80 -0
  445. paddlex/modules/formula_recognition/dataset_checker/dataset_src/convert_dataset.py +94 -0
  446. paddlex/modules/formula_recognition/dataset_checker/dataset_src/split_dataset.py +81 -0
  447. paddlex/modules/formula_recognition/evaluator.py +77 -0
  448. paddlex/modules/formula_recognition/exportor.py +22 -0
  449. paddlex/modules/formula_recognition/model_list.py +3 -0
  450. paddlex/modules/formula_recognition/trainer.py +121 -0
  451. paddlex/modules/image_classification/model_list.py +2 -0
  452. paddlex/modules/instance_segmentation/dataset_checker/__init__.py +15 -0
  453. paddlex/modules/keypoint_detection/__init__.py +18 -0
  454. paddlex/modules/keypoint_detection/dataset_checker/__init__.py +56 -0
  455. paddlex/modules/keypoint_detection/dataset_checker/dataset_src/__init__.py +15 -0
  456. paddlex/modules/keypoint_detection/dataset_checker/dataset_src/check_dataset.py +86 -0
  457. paddlex/modules/keypoint_detection/dataset_checker/dataset_src/utils/__init__.py +13 -0
  458. paddlex/modules/keypoint_detection/dataset_checker/dataset_src/utils/visualizer.py +119 -0
  459. paddlex/modules/keypoint_detection/evaluator.py +41 -0
  460. paddlex/modules/keypoint_detection/exportor.py +22 -0
  461. paddlex/modules/keypoint_detection/model_list.py +16 -0
  462. paddlex/modules/keypoint_detection/trainer.py +39 -0
  463. paddlex/modules/multilingual_speech_recognition/__init__.py +18 -0
  464. paddlex/modules/multilingual_speech_recognition/dataset_checker.py +27 -0
  465. paddlex/modules/multilingual_speech_recognition/evaluator.py +27 -0
  466. paddlex/modules/multilingual_speech_recognition/exportor.py +27 -0
  467. paddlex/modules/multilingual_speech_recognition/model_list.py +22 -0
  468. paddlex/modules/multilingual_speech_recognition/trainer.py +40 -0
  469. paddlex/modules/object_detection/evaluator.py +12 -1
  470. paddlex/modules/object_detection/model_list.py +10 -0
  471. paddlex/modules/object_detection/trainer.py +15 -1
  472. paddlex/modules/open_vocabulary_detection/__init__.py +18 -0
  473. paddlex/modules/open_vocabulary_detection/dataset_checker.py +29 -0
  474. paddlex/modules/open_vocabulary_detection/evaluator.py +29 -0
  475. paddlex/modules/open_vocabulary_detection/exportor.py +29 -0
  476. paddlex/modules/open_vocabulary_detection/model_list.py +18 -0
  477. paddlex/modules/open_vocabulary_detection/trainer.py +42 -0
  478. paddlex/modules/open_vocabulary_segmentation/__init__.py +18 -0
  479. paddlex/modules/open_vocabulary_segmentation/dataset_checker.py +29 -0
  480. paddlex/modules/open_vocabulary_segmentation/evaluator.py +29 -0
  481. paddlex/modules/open_vocabulary_segmentation/exportor.py +29 -0
  482. paddlex/modules/open_vocabulary_segmentation/model_list.py +19 -0
  483. paddlex/modules/open_vocabulary_segmentation/trainer.py +42 -0
  484. paddlex/modules/semantic_segmentation/dataset_checker/__init__.py +15 -0
  485. paddlex/modules/semantic_segmentation/exportor.py +9 -0
  486. paddlex/modules/semantic_segmentation/model_list.py +2 -0
  487. paddlex/modules/semantic_segmentation/trainer.py +2 -0
  488. paddlex/modules/table_recognition/dataset_checker/__init__.py +16 -1
  489. paddlex/modules/table_recognition/dataset_checker/dataset_src/check_dataset.py +13 -14
  490. paddlex/modules/table_recognition/model_list.py +2 -0
  491. paddlex/modules/text_detection/dataset_checker/__init__.py +16 -1
  492. paddlex/modules/text_detection/dataset_checker/dataset_src/check_dataset.py +13 -3
  493. paddlex/modules/text_detection/model_list.py +2 -0
  494. paddlex/modules/text_recognition/dataset_checker/__init__.py +16 -4
  495. paddlex/modules/text_recognition/dataset_checker/dataset_src/check_dataset.py +13 -3
  496. paddlex/modules/text_recognition/evaluator.py +4 -3
  497. paddlex/modules/text_recognition/exportor.py +0 -3
  498. paddlex/modules/text_recognition/model_list.py +14 -0
  499. paddlex/modules/text_recognition/trainer.py +4 -3
  500. paddlex/modules/ts_anomaly_detection/dataset_checker/__init__.py +15 -0
  501. paddlex/modules/ts_anomaly_detection/trainer.py +17 -1
  502. paddlex/modules/ts_classification/dataset_checker/__init__.py +15 -0
  503. paddlex/modules/ts_classification/trainer.py +17 -1
  504. paddlex/modules/ts_forecast/dataset_checker/__init__.py +15 -0
  505. paddlex/modules/ts_forecast/trainer.py +17 -1
  506. paddlex/modules/video_classification/__init__.py +18 -0
  507. paddlex/modules/video_classification/dataset_checker/__init__.py +93 -0
  508. paddlex/modules/video_classification/dataset_checker/dataset_src/__init__.py +18 -0
  509. paddlex/modules/video_classification/dataset_checker/dataset_src/analyse_dataset.py +93 -0
  510. paddlex/modules/video_classification/dataset_checker/dataset_src/check_dataset.py +121 -0
  511. paddlex/modules/video_classification/dataset_checker/dataset_src/split_dataset.py +82 -0
  512. paddlex/modules/video_classification/evaluator.py +44 -0
  513. paddlex/modules/video_classification/exportor.py +22 -0
  514. paddlex/modules/video_classification/model_list.py +19 -0
  515. paddlex/modules/video_classification/trainer.py +88 -0
  516. paddlex/modules/video_detection/__init__.py +18 -0
  517. paddlex/modules/video_detection/dataset_checker/__init__.py +86 -0
  518. paddlex/modules/video_detection/dataset_checker/dataset_src/__init__.py +17 -0
  519. paddlex/modules/video_detection/dataset_checker/dataset_src/analyse_dataset.py +101 -0
  520. paddlex/modules/video_detection/dataset_checker/dataset_src/check_dataset.py +134 -0
  521. paddlex/modules/video_detection/evaluator.py +42 -0
  522. paddlex/modules/video_detection/exportor.py +22 -0
  523. paddlex/modules/video_detection/model_list.py +15 -0
  524. paddlex/modules/video_detection/trainer.py +82 -0
  525. paddlex/ops/__init__.py +149 -0
  526. paddlex/ops/iou3d_nms/iou3d_cpu.cpp +264 -0
  527. paddlex/ops/iou3d_nms/iou3d_cpu.h +27 -0
  528. paddlex/ops/iou3d_nms/iou3d_nms.cpp +204 -0
  529. paddlex/ops/iou3d_nms/iou3d_nms.h +33 -0
  530. paddlex/ops/iou3d_nms/iou3d_nms_api.cpp +108 -0
  531. paddlex/ops/iou3d_nms/iou3d_nms_kernel.cu +482 -0
  532. paddlex/ops/setup.py +37 -0
  533. paddlex/ops/voxel/voxelize_op.cc +191 -0
  534. paddlex/ops/voxel/voxelize_op.cu +346 -0
  535. paddlex/paddle2onnx_requirements.txt +1 -0
  536. paddlex/paddlex_cli.py +339 -72
  537. paddlex/repo_apis/Paddle3D_api/__init__.py +17 -0
  538. paddlex/repo_apis/Paddle3D_api/bev_fusion/__init__.py +18 -0
  539. paddlex/repo_apis/Paddle3D_api/bev_fusion/config.py +118 -0
  540. paddlex/repo_apis/Paddle3D_api/bev_fusion/model.py +238 -0
  541. paddlex/repo_apis/Paddle3D_api/bev_fusion/register.py +55 -0
  542. paddlex/repo_apis/Paddle3D_api/bev_fusion/runner.py +104 -0
  543. paddlex/repo_apis/Paddle3D_api/pp3d_config.py +144 -0
  544. paddlex/repo_apis/PaddleClas_api/cls/model.py +6 -0
  545. paddlex/repo_apis/PaddleClas_api/cls/register.py +20 -2
  546. paddlex/repo_apis/PaddleDetection_api/instance_seg/config.py +8 -4
  547. paddlex/repo_apis/PaddleDetection_api/instance_seg/model.py +6 -0
  548. paddlex/repo_apis/PaddleDetection_api/object_det/config.py +27 -5
  549. paddlex/repo_apis/PaddleDetection_api/object_det/model.py +6 -0
  550. paddlex/repo_apis/PaddleDetection_api/object_det/official_categories.py +81 -0
  551. paddlex/repo_apis/PaddleDetection_api/object_det/register.py +182 -3
  552. paddlex/repo_apis/PaddleOCR_api/__init__.py +1 -0
  553. paddlex/repo_apis/PaddleOCR_api/formula_rec/__init__.py +16 -0
  554. paddlex/repo_apis/PaddleOCR_api/formula_rec/config.py +570 -0
  555. paddlex/repo_apis/PaddleOCR_api/formula_rec/model.py +402 -0
  556. paddlex/repo_apis/PaddleOCR_api/formula_rec/register.py +73 -0
  557. paddlex/repo_apis/PaddleOCR_api/formula_rec/runner.py +240 -0
  558. paddlex/repo_apis/PaddleOCR_api/table_rec/register.py +18 -0
  559. paddlex/repo_apis/PaddleOCR_api/text_det/register.py +18 -0
  560. paddlex/repo_apis/PaddleOCR_api/text_rec/config.py +21 -0
  561. paddlex/repo_apis/PaddleOCR_api/text_rec/model.py +6 -0
  562. paddlex/repo_apis/PaddleOCR_api/text_rec/register.py +126 -7
  563. paddlex/repo_apis/PaddleSeg_api/seg/config.py +9 -0
  564. paddlex/repo_apis/PaddleSeg_api/seg/model.py +10 -0
  565. paddlex/repo_apis/PaddleSeg_api/seg/register.py +20 -0
  566. paddlex/repo_apis/PaddleTS_api/ts_base/config.py +24 -0
  567. paddlex/repo_apis/PaddleTS_api/ts_base/model.py +11 -7
  568. paddlex/repo_apis/PaddleVideo_api/__init__.py +17 -0
  569. paddlex/repo_apis/PaddleVideo_api/config_utils.py +51 -0
  570. paddlex/repo_apis/PaddleVideo_api/video_cls/__init__.py +19 -0
  571. paddlex/repo_apis/PaddleVideo_api/video_cls/config.py +547 -0
  572. paddlex/repo_apis/PaddleVideo_api/video_cls/model.py +346 -0
  573. paddlex/repo_apis/PaddleVideo_api/video_cls/register.py +71 -0
  574. paddlex/repo_apis/PaddleVideo_api/video_cls/runner.py +205 -0
  575. paddlex/repo_apis/PaddleVideo_api/video_det/__init__.py +19 -0
  576. paddlex/repo_apis/PaddleVideo_api/video_det/config.py +548 -0
  577. paddlex/repo_apis/PaddleVideo_api/video_det/model.py +298 -0
  578. paddlex/repo_apis/PaddleVideo_api/video_det/register.py +45 -0
  579. paddlex/repo_apis/PaddleVideo_api/video_det/runner.py +200 -0
  580. paddlex/repo_apis/base/runner.py +2 -1
  581. paddlex/repo_manager/meta.py +29 -2
  582. paddlex/repo_manager/repo.py +24 -5
  583. paddlex/repo_manager/requirements.txt +10 -7
  584. paddlex/repo_manager/utils.py +62 -1
  585. paddlex/serving_requirements.txt +9 -0
  586. paddlex/utils/config.py +4 -3
  587. paddlex/utils/custom_device_whitelist.py +457 -0
  588. paddlex/utils/device.py +74 -26
  589. paddlex/utils/env.py +28 -0
  590. paddlex/utils/flags.py +4 -0
  591. paddlex/utils/fonts/__init__.py +48 -5
  592. paddlex/utils/lazy_loader.py +2 -0
  593. paddlex/utils/logging.py +1 -2
  594. paddlex/utils/pipeline_arguments.py +711 -0
  595. paddlex-3.0.0rc0.dist-info/METADATA +1035 -0
  596. paddlex-3.0.0rc0.dist-info/RECORD +1015 -0
  597. paddlex-3.0.0rc0.dist-info/WHEEL +5 -0
  598. paddlex/configs/face_recognition/MobileFaceNet.yaml +0 -44
  599. paddlex/configs/face_recognition/ResNet50_face.yaml +0 -44
  600. paddlex/configs/formula_recognition/LaTeX_OCR_rec.yaml +0 -40
  601. paddlex/configs/image_classification/CLIP_vit_base_patch16_224.yaml +0 -41
  602. paddlex/configs/image_classification/CLIP_vit_large_patch14_224.yaml +0 -41
  603. paddlex/configs/image_classification/ConvNeXt_large_384.yaml +0 -41
  604. paddlex/configs/object_detection/YOLOX-X.yaml +0 -40
  605. paddlex/configs/semantic_segmentation/SeaFormer_base.yaml +0 -40
  606. paddlex/configs/semantic_segmentation/SeaFormer_large.yaml +0 -40
  607. paddlex/configs/semantic_segmentation/SeaFormer_small.yaml +0 -40
  608. paddlex/configs/semantic_segmentation/SeaFormer_tiny.yaml +0 -40
  609. paddlex/inference/components/__init__.py +0 -18
  610. paddlex/inference/components/base.py +0 -292
  611. paddlex/inference/components/llm/__init__.py +0 -25
  612. paddlex/inference/components/llm/base.py +0 -65
  613. paddlex/inference/components/llm/erniebot.py +0 -212
  614. paddlex/inference/components/paddle_predictor/__init__.py +0 -20
  615. paddlex/inference/components/paddle_predictor/predictor.py +0 -332
  616. paddlex/inference/components/retrieval/__init__.py +0 -15
  617. paddlex/inference/components/retrieval/faiss.py +0 -359
  618. paddlex/inference/components/task_related/__init__.py +0 -33
  619. paddlex/inference/components/task_related/clas.py +0 -124
  620. paddlex/inference/components/task_related/det.py +0 -284
  621. paddlex/inference/components/task_related/instance_seg.py +0 -89
  622. paddlex/inference/components/task_related/seal_det_warp.py +0 -940
  623. paddlex/inference/components/task_related/seg.py +0 -40
  624. paddlex/inference/components/task_related/table_rec.py +0 -191
  625. paddlex/inference/components/task_related/text_det.py +0 -895
  626. paddlex/inference/components/task_related/text_rec.py +0 -353
  627. paddlex/inference/components/task_related/warp.py +0 -43
  628. paddlex/inference/components/transforms/__init__.py +0 -16
  629. paddlex/inference/components/transforms/image/__init__.py +0 -15
  630. paddlex/inference/components/transforms/image/common.py +0 -598
  631. paddlex/inference/components/transforms/image/funcs.py +0 -58
  632. paddlex/inference/components/transforms/read_data.py +0 -67
  633. paddlex/inference/components/transforms/ts/__init__.py +0 -15
  634. paddlex/inference/components/transforms/ts/common.py +0 -393
  635. paddlex/inference/components/transforms/ts/funcs.py +0 -424
  636. paddlex/inference/models/anomaly_detection.py +0 -87
  637. paddlex/inference/models/base/base_predictor.py +0 -76
  638. paddlex/inference/models/base/basic_predictor.py +0 -122
  639. paddlex/inference/models/face_recognition.py +0 -21
  640. paddlex/inference/models/formula_recognition.py +0 -55
  641. paddlex/inference/models/general_recognition.py +0 -99
  642. paddlex/inference/models/image_classification.py +0 -101
  643. paddlex/inference/models/image_unwarping.py +0 -43
  644. paddlex/inference/models/instance_segmentation.py +0 -66
  645. paddlex/inference/models/multilabel_classification.py +0 -33
  646. paddlex/inference/models/object_detection.py +0 -129
  647. paddlex/inference/models/semantic_segmentation.py +0 -86
  648. paddlex/inference/models/table_recognition.py +0 -106
  649. paddlex/inference/models/text_detection.py +0 -105
  650. paddlex/inference/models/text_recognition.py +0 -78
  651. paddlex/inference/models/ts_ad.py +0 -68
  652. paddlex/inference/models/ts_cls.py +0 -57
  653. paddlex/inference/models/ts_fc.py +0 -73
  654. paddlex/inference/pipelines/attribute_recognition.py +0 -92
  655. paddlex/inference/pipelines/face_recognition.py +0 -49
  656. paddlex/inference/pipelines/formula_recognition.py +0 -102
  657. paddlex/inference/pipelines/layout_parsing/layout_parsing.py +0 -362
  658. paddlex/inference/pipelines/ocr.py +0 -80
  659. paddlex/inference/pipelines/pp_shitu_v2.py +0 -152
  660. paddlex/inference/pipelines/ppchatocrv3/__init__.py +0 -15
  661. paddlex/inference/pipelines/ppchatocrv3/ch_prompt.yaml +0 -14
  662. paddlex/inference/pipelines/ppchatocrv3/ppchatocrv3.py +0 -717
  663. paddlex/inference/pipelines/ppchatocrv3/utils.py +0 -168
  664. paddlex/inference/pipelines/seal_recognition.py +0 -152
  665. paddlex/inference/pipelines/serving/__init__.py +0 -17
  666. paddlex/inference/pipelines/serving/_pipeline_apps/__init__.py +0 -205
  667. paddlex/inference/pipelines/serving/_pipeline_apps/anomaly_detection.py +0 -80
  668. paddlex/inference/pipelines/serving/_pipeline_apps/face_recognition.py +0 -317
  669. paddlex/inference/pipelines/serving/_pipeline_apps/formula_recognition.py +0 -119
  670. paddlex/inference/pipelines/serving/_pipeline_apps/image_classification.py +0 -101
  671. paddlex/inference/pipelines/serving/_pipeline_apps/instance_segmentation.py +0 -112
  672. paddlex/inference/pipelines/serving/_pipeline_apps/layout_parsing.py +0 -205
  673. paddlex/inference/pipelines/serving/_pipeline_apps/multi_label_image_classification.py +0 -90
  674. paddlex/inference/pipelines/serving/_pipeline_apps/object_detection.py +0 -90
  675. paddlex/inference/pipelines/serving/_pipeline_apps/ocr.py +0 -98
  676. paddlex/inference/pipelines/serving/_pipeline_apps/pedestrian_attribute_recognition.py +0 -102
  677. paddlex/inference/pipelines/serving/_pipeline_apps/pp_shitu_v2.py +0 -319
  678. paddlex/inference/pipelines/serving/_pipeline_apps/ppchatocrv3.py +0 -445
  679. paddlex/inference/pipelines/serving/_pipeline_apps/seal_recognition.py +0 -110
  680. paddlex/inference/pipelines/serving/_pipeline_apps/semantic_segmentation.py +0 -82
  681. paddlex/inference/pipelines/serving/_pipeline_apps/small_object_detection.py +0 -92
  682. paddlex/inference/pipelines/serving/_pipeline_apps/table_recognition.py +0 -110
  683. paddlex/inference/pipelines/serving/_pipeline_apps/ts_ad.py +0 -68
  684. paddlex/inference/pipelines/serving/_pipeline_apps/ts_cls.py +0 -68
  685. paddlex/inference/pipelines/serving/_pipeline_apps/ts_fc.py +0 -68
  686. paddlex/inference/pipelines/serving/_pipeline_apps/vehicle_attribute_recognition.py +0 -102
  687. paddlex/inference/pipelines/serving/app.py +0 -164
  688. paddlex/inference/pipelines/serving/models.py +0 -30
  689. paddlex/inference/pipelines/serving/server.py +0 -25
  690. paddlex/inference/pipelines/serving/storage.py +0 -161
  691. paddlex/inference/pipelines/serving/utils.py +0 -190
  692. paddlex/inference/pipelines/single_model_pipeline.py +0 -76
  693. paddlex/inference/pipelines/table_recognition/table_recognition.py +0 -193
  694. paddlex/inference/results/__init__.py +0 -31
  695. paddlex/inference/results/attribute_rec.py +0 -89
  696. paddlex/inference/results/base.py +0 -43
  697. paddlex/inference/results/chat_ocr.py +0 -158
  698. paddlex/inference/results/clas.py +0 -133
  699. paddlex/inference/results/det.py +0 -86
  700. paddlex/inference/results/face_rec.py +0 -34
  701. paddlex/inference/results/formula_rec.py +0 -363
  702. paddlex/inference/results/instance_seg.py +0 -152
  703. paddlex/inference/results/ocr.py +0 -157
  704. paddlex/inference/results/seal_rec.py +0 -50
  705. paddlex/inference/results/seg.py +0 -72
  706. paddlex/inference/results/shitu.py +0 -35
  707. paddlex/inference/results/table_rec.py +0 -109
  708. paddlex/inference/results/text_det.py +0 -33
  709. paddlex/inference/results/text_rec.py +0 -66
  710. paddlex/inference/results/ts.py +0 -37
  711. paddlex/inference/results/utils/mixin.py +0 -204
  712. paddlex/inference/results/warp.py +0 -31
  713. paddlex/inference/utils/process_hook.py +0 -54
  714. paddlex/pipelines/OCR.yaml +0 -8
  715. paddlex/pipelines/PP-ChatOCRv3-doc.yaml +0 -27
  716. paddlex/pipelines/PP-ShiTuV2.yaml +0 -13
  717. paddlex/pipelines/anomaly_detection.yaml +0 -7
  718. paddlex/pipelines/face_recognition.yaml +0 -13
  719. paddlex/pipelines/formula_recognition.yaml +0 -8
  720. paddlex/pipelines/image_classification.yaml +0 -7
  721. paddlex/pipelines/instance_segmentation.yaml +0 -7
  722. paddlex/pipelines/layout_parsing.yaml +0 -14
  723. paddlex/pipelines/multi_label_image_classification.yaml +0 -7
  724. paddlex/pipelines/object_detection.yaml +0 -7
  725. paddlex/pipelines/pedestrian_attribute_recognition.yaml +0 -7
  726. paddlex/pipelines/seal_recognition.yaml +0 -10
  727. paddlex/pipelines/semantic_segmentation.yaml +0 -7
  728. paddlex/pipelines/small_object_detection.yaml +0 -7
  729. paddlex/pipelines/table_recognition.yaml +0 -12
  730. paddlex/pipelines/ts_ad.yaml +0 -7
  731. paddlex/pipelines/ts_cls.yaml +0 -7
  732. paddlex/pipelines/ts_fc.yaml +0 -7
  733. paddlex/pipelines/vehicle_attribute_recognition.yaml +0 -7
  734. paddlex/utils/fonts/PingFang-SC-Regular.ttf +0 -0
  735. paddlex-3.0.0b2.dist-info/METADATA +0 -760
  736. paddlex-3.0.0b2.dist-info/RECORD +0 -646
  737. paddlex-3.0.0b2.dist-info/WHEEL +0 -5
  738. /paddlex/configs/{doc_text_orientation → modules/doc_text_orientation}/PP-LCNet_x1_0_doc_ori.yaml +0 -0
  739. /paddlex/configs/{face_detection → modules/face_detection}/BlazeFace-FPN-SSH.yaml +0 -0
  740. /paddlex/configs/{face_detection → modules/face_detection}/BlazeFace.yaml +0 -0
  741. /paddlex/configs/{face_detection → modules/face_detection}/PP-YOLOE_plus-S_face.yaml +0 -0
  742. /paddlex/configs/{face_detection → modules/face_detection}/PicoDet_LCNet_x2_5_face.yaml +0 -0
  743. /paddlex/configs/{human_detection → modules/human_detection}/PP-YOLOE-L_human.yaml +0 -0
  744. /paddlex/configs/{human_detection → modules/human_detection}/PP-YOLOE-S_human.yaml +0 -0
  745. /paddlex/configs/{anomaly_detection → modules/image_anomaly_detection}/STFPM.yaml +0 -0
  746. /paddlex/configs/{image_classification → modules/image_classification}/ConvNeXt_base_224.yaml +0 -0
  747. /paddlex/configs/{image_classification → modules/image_classification}/ConvNeXt_base_384.yaml +0 -0
  748. /paddlex/configs/{image_classification → modules/image_classification}/ConvNeXt_large_224.yaml +0 -0
  749. /paddlex/configs/{image_classification → modules/image_classification}/ConvNeXt_small.yaml +0 -0
  750. /paddlex/configs/{image_classification → modules/image_classification}/ConvNeXt_tiny.yaml +0 -0
  751. /paddlex/configs/{image_classification → modules/image_classification}/FasterNet-L.yaml +0 -0
  752. /paddlex/configs/{image_classification → modules/image_classification}/FasterNet-M.yaml +0 -0
  753. /paddlex/configs/{image_classification → modules/image_classification}/FasterNet-S.yaml +0 -0
  754. /paddlex/configs/{image_classification → modules/image_classification}/FasterNet-T0.yaml +0 -0
  755. /paddlex/configs/{image_classification → modules/image_classification}/FasterNet-T1.yaml +0 -0
  756. /paddlex/configs/{image_classification → modules/image_classification}/FasterNet-T2.yaml +0 -0
  757. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV1_x0_25.yaml +0 -0
  758. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV1_x0_5.yaml +0 -0
  759. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV1_x0_75.yaml +0 -0
  760. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV1_x1_0.yaml +0 -0
  761. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV2_x0_25.yaml +0 -0
  762. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV2_x0_5.yaml +0 -0
  763. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV2_x1_0.yaml +0 -0
  764. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV2_x1_5.yaml +0 -0
  765. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV2_x2_0.yaml +0 -0
  766. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_large_x0_35.yaml +0 -0
  767. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_large_x0_5.yaml +0 -0
  768. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_large_x0_75.yaml +0 -0
  769. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_large_x1_0.yaml +0 -0
  770. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_large_x1_25.yaml +0 -0
  771. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_small_x0_35.yaml +0 -0
  772. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_small_x0_5.yaml +0 -0
  773. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_small_x0_75.yaml +0 -0
  774. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_small_x1_0.yaml +0 -0
  775. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV3_small_x1_25.yaml +0 -0
  776. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV4_conv_large.yaml +0 -0
  777. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV4_conv_medium.yaml +0 -0
  778. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV4_conv_small.yaml +0 -0
  779. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV4_hybrid_large.yaml +0 -0
  780. /paddlex/configs/{image_classification → modules/image_classification}/MobileNetV4_hybrid_medium.yaml +0 -0
  781. /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNetV2-B0.yaml +0 -0
  782. /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNetV2-B1.yaml +0 -0
  783. /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNetV2-B2.yaml +0 -0
  784. /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNetV2-B3.yaml +0 -0
  785. /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNetV2-B4.yaml +0 -0
  786. /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNetV2-B5.yaml +0 -0
  787. /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNetV2-B6.yaml +0 -0
  788. /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNet_base.yaml +0 -0
  789. /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNet_small.yaml +0 -0
  790. /paddlex/configs/{image_classification → modules/image_classification}/PP-HGNet_tiny.yaml +0 -0
  791. /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNetV2_base.yaml +0 -0
  792. /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNetV2_large.yaml +0 -0
  793. /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNetV2_small.yaml +0 -0
  794. /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNet_x0_25.yaml +0 -0
  795. /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNet_x0_35.yaml +0 -0
  796. /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNet_x0_5.yaml +0 -0
  797. /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNet_x0_75.yaml +0 -0
  798. /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNet_x1_0.yaml +0 -0
  799. /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNet_x1_5.yaml +0 -0
  800. /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNet_x2_0.yaml +0 -0
  801. /paddlex/configs/{image_classification → modules/image_classification}/PP-LCNet_x2_5.yaml +0 -0
  802. /paddlex/configs/{image_classification → modules/image_classification}/ResNet101.yaml +0 -0
  803. /paddlex/configs/{image_classification → modules/image_classification}/ResNet101_vd.yaml +0 -0
  804. /paddlex/configs/{image_classification → modules/image_classification}/ResNet152.yaml +0 -0
  805. /paddlex/configs/{image_classification → modules/image_classification}/ResNet152_vd.yaml +0 -0
  806. /paddlex/configs/{image_classification → modules/image_classification}/ResNet18.yaml +0 -0
  807. /paddlex/configs/{image_classification → modules/image_classification}/ResNet18_vd.yaml +0 -0
  808. /paddlex/configs/{image_classification → modules/image_classification}/ResNet200_vd.yaml +0 -0
  809. /paddlex/configs/{image_classification → modules/image_classification}/ResNet34.yaml +0 -0
  810. /paddlex/configs/{image_classification → modules/image_classification}/ResNet34_vd.yaml +0 -0
  811. /paddlex/configs/{image_classification → modules/image_classification}/ResNet50.yaml +0 -0
  812. /paddlex/configs/{image_classification → modules/image_classification}/ResNet50_vd.yaml +0 -0
  813. /paddlex/configs/{image_classification → modules/image_classification}/StarNet-S1.yaml +0 -0
  814. /paddlex/configs/{image_classification → modules/image_classification}/StarNet-S2.yaml +0 -0
  815. /paddlex/configs/{image_classification → modules/image_classification}/StarNet-S3.yaml +0 -0
  816. /paddlex/configs/{image_classification → modules/image_classification}/StarNet-S4.yaml +0 -0
  817. /paddlex/configs/{image_classification → modules/image_classification}/SwinTransformer_base_patch4_window12_384.yaml +0 -0
  818. /paddlex/configs/{image_classification → modules/image_classification}/SwinTransformer_base_patch4_window7_224.yaml +0 -0
  819. /paddlex/configs/{image_classification → modules/image_classification}/SwinTransformer_large_patch4_window12_384.yaml +0 -0
  820. /paddlex/configs/{image_classification → modules/image_classification}/SwinTransformer_large_patch4_window7_224.yaml +0 -0
  821. /paddlex/configs/{image_classification → modules/image_classification}/SwinTransformer_small_patch4_window7_224.yaml +0 -0
  822. /paddlex/configs/{image_classification → modules/image_classification}/SwinTransformer_tiny_patch4_window7_224.yaml +0 -0
  823. /paddlex/configs/{general_recognition → modules/image_feature}/PP-ShiTuV2_rec.yaml +0 -0
  824. /paddlex/configs/{general_recognition → modules/image_feature}/PP-ShiTuV2_rec_CLIP_vit_base.yaml +0 -0
  825. /paddlex/configs/{general_recognition → modules/image_feature}/PP-ShiTuV2_rec_CLIP_vit_large.yaml +0 -0
  826. /paddlex/configs/{multilabel_classification → modules/image_multilabel_classification}/CLIP_vit_base_patch16_448_ML.yaml +0 -0
  827. /paddlex/configs/{multilabel_classification → modules/image_multilabel_classification}/PP-HGNetV2-B0_ML.yaml +0 -0
  828. /paddlex/configs/{multilabel_classification → modules/image_multilabel_classification}/PP-HGNetV2-B4_ML.yaml +0 -0
  829. /paddlex/configs/{multilabel_classification → modules/image_multilabel_classification}/PP-HGNetV2-B6_ML.yaml +0 -0
  830. /paddlex/configs/{multilabel_classification → modules/image_multilabel_classification}/PP-LCNet_x1_0_ML.yaml +0 -0
  831. /paddlex/configs/{multilabel_classification → modules/image_multilabel_classification}/ResNet50_ML.yaml +0 -0
  832. /paddlex/configs/{image_unwarping → modules/image_unwarping}/UVDoc.yaml +0 -0
  833. /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/Cascade-MaskRCNN-ResNet50-FPN.yaml +0 -0
  834. /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/Cascade-MaskRCNN-ResNet50-vd-SSLDv2-FPN.yaml +0 -0
  835. /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/Mask-RT-DETR-H.yaml +0 -0
  836. /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/Mask-RT-DETR-L.yaml +0 -0
  837. /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/Mask-RT-DETR-M.yaml +0 -0
  838. /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/Mask-RT-DETR-S.yaml +0 -0
  839. /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/Mask-RT-DETR-X.yaml +0 -0
  840. /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/MaskRCNN-ResNeXt101-vd-FPN.yaml +0 -0
  841. /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/MaskRCNN-ResNet101-FPN.yaml +0 -0
  842. /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/MaskRCNN-ResNet101-vd-FPN.yaml +0 -0
  843. /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/MaskRCNN-ResNet50-FPN.yaml +0 -0
  844. /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/MaskRCNN-ResNet50-vd-FPN.yaml +0 -0
  845. /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/MaskRCNN-ResNet50.yaml +0 -0
  846. /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/PP-YOLOE_seg-S.yaml +0 -0
  847. /paddlex/configs/{instance_segmentation → modules/instance_segmentation}/SOLOv2.yaml +0 -0
  848. /paddlex/configs/{structure_analysis → modules/layout_detection}/PicoDet-L_layout_17cls.yaml +0 -0
  849. /paddlex/configs/{structure_analysis → modules/layout_detection}/PicoDet-L_layout_3cls.yaml +0 -0
  850. /paddlex/configs/{structure_analysis → modules/layout_detection}/PicoDet-S_layout_17cls.yaml +0 -0
  851. /paddlex/configs/{structure_analysis → modules/layout_detection}/PicoDet-S_layout_3cls.yaml +0 -0
  852. /paddlex/configs/{structure_analysis → modules/layout_detection}/PicoDet_layout_1x.yaml +0 -0
  853. /paddlex/configs/{structure_analysis → modules/layout_detection}/PicoDet_layout_1x_table.yaml +0 -0
  854. /paddlex/configs/{structure_analysis → modules/layout_detection}/RT-DETR-H_layout_17cls.yaml +0 -0
  855. /paddlex/configs/{structure_analysis → modules/layout_detection}/RT-DETR-H_layout_3cls.yaml +0 -0
  856. /paddlex/configs/{mainbody_detection → modules/mainbody_detection}/PP-ShiTuV2_det.yaml +0 -0
  857. /paddlex/configs/{object_detection → modules/object_detection}/Cascade-FasterRCNN-ResNet50-FPN.yaml +0 -0
  858. /paddlex/configs/{object_detection → modules/object_detection}/Cascade-FasterRCNN-ResNet50-vd-SSLDv2-FPN.yaml +0 -0
  859. /paddlex/configs/{object_detection → modules/object_detection}/CenterNet-DLA-34.yaml +0 -0
  860. /paddlex/configs/{object_detection → modules/object_detection}/CenterNet-ResNet50.yaml +0 -0
  861. /paddlex/configs/{object_detection → modules/object_detection}/DETR-R50.yaml +0 -0
  862. /paddlex/configs/{object_detection → modules/object_detection}/FCOS-ResNet50.yaml +0 -0
  863. /paddlex/configs/{object_detection → modules/object_detection}/FasterRCNN-ResNeXt101-vd-FPN.yaml +0 -0
  864. /paddlex/configs/{object_detection → modules/object_detection}/FasterRCNN-ResNet101-FPN.yaml +0 -0
  865. /paddlex/configs/{object_detection → modules/object_detection}/FasterRCNN-ResNet101.yaml +0 -0
  866. /paddlex/configs/{object_detection → modules/object_detection}/FasterRCNN-ResNet34-FPN.yaml +0 -0
  867. /paddlex/configs/{object_detection → modules/object_detection}/FasterRCNN-ResNet50-FPN.yaml +0 -0
  868. /paddlex/configs/{object_detection → modules/object_detection}/FasterRCNN-ResNet50-vd-FPN.yaml +0 -0
  869. /paddlex/configs/{object_detection → modules/object_detection}/FasterRCNN-ResNet50-vd-SSLDv2-FPN.yaml +0 -0
  870. /paddlex/configs/{object_detection → modules/object_detection}/FasterRCNN-ResNet50.yaml +0 -0
  871. /paddlex/configs/{object_detection → modules/object_detection}/FasterRCNN-Swin-Tiny-FPN.yaml +0 -0
  872. /paddlex/configs/{object_detection → modules/object_detection}/PP-YOLOE_plus-L.yaml +0 -0
  873. /paddlex/configs/{object_detection → modules/object_detection}/PP-YOLOE_plus-M.yaml +0 -0
  874. /paddlex/configs/{object_detection → modules/object_detection}/PP-YOLOE_plus-S.yaml +0 -0
  875. /paddlex/configs/{object_detection → modules/object_detection}/PP-YOLOE_plus-X.yaml +0 -0
  876. /paddlex/configs/{object_detection → modules/object_detection}/PicoDet-L.yaml +0 -0
  877. /paddlex/configs/{object_detection → modules/object_detection}/PicoDet-M.yaml +0 -0
  878. /paddlex/configs/{object_detection → modules/object_detection}/PicoDet-S.yaml +0 -0
  879. /paddlex/configs/{object_detection → modules/object_detection}/PicoDet-XS.yaml +0 -0
  880. /paddlex/configs/{object_detection → modules/object_detection}/RT-DETR-H.yaml +0 -0
  881. /paddlex/configs/{object_detection → modules/object_detection}/RT-DETR-L.yaml +0 -0
  882. /paddlex/configs/{object_detection → modules/object_detection}/RT-DETR-R18.yaml +0 -0
  883. /paddlex/configs/{object_detection → modules/object_detection}/RT-DETR-R50.yaml +0 -0
  884. /paddlex/configs/{object_detection → modules/object_detection}/RT-DETR-X.yaml +0 -0
  885. /paddlex/configs/{object_detection → modules/object_detection}/YOLOX-L.yaml +0 -0
  886. /paddlex/configs/{object_detection → modules/object_detection}/YOLOX-M.yaml +0 -0
  887. /paddlex/configs/{object_detection → modules/object_detection}/YOLOX-N.yaml +0 -0
  888. /paddlex/configs/{object_detection → modules/object_detection}/YOLOX-S.yaml +0 -0
  889. /paddlex/configs/{object_detection → modules/object_detection}/YOLOX-T.yaml +0 -0
  890. /paddlex/configs/{object_detection → modules/object_detection}/YOLOv3-DarkNet53.yaml +0 -0
  891. /paddlex/configs/{object_detection → modules/object_detection}/YOLOv3-MobileNetV3.yaml +0 -0
  892. /paddlex/configs/{object_detection → modules/object_detection}/YOLOv3-ResNet50_vd_DCN.yaml +0 -0
  893. /paddlex/configs/{pedestrian_attribute → modules/pedestrian_attribute_recognition}/PP-LCNet_x1_0_pedestrian_attribute.yaml +0 -0
  894. /paddlex/configs/{text_detection_seal → modules/seal_text_detection}/PP-OCRv4_mobile_seal_det.yaml +0 -0
  895. /paddlex/configs/{text_detection_seal → modules/seal_text_detection}/PP-OCRv4_server_seal_det.yaml +0 -0
  896. /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/Deeplabv3-R101.yaml +0 -0
  897. /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/Deeplabv3-R50.yaml +0 -0
  898. /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/Deeplabv3_Plus-R101.yaml +0 -0
  899. /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/Deeplabv3_Plus-R50.yaml +0 -0
  900. /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/OCRNet_HRNet-W18.yaml +0 -0
  901. /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/OCRNet_HRNet-W48.yaml +0 -0
  902. /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/PP-LiteSeg-B.yaml +0 -0
  903. /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/PP-LiteSeg-T.yaml +0 -0
  904. /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/SegFormer-B0.yaml +0 -0
  905. /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/SegFormer-B1.yaml +0 -0
  906. /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/SegFormer-B2.yaml +0 -0
  907. /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/SegFormer-B3.yaml +0 -0
  908. /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/SegFormer-B4.yaml +0 -0
  909. /paddlex/configs/{semantic_segmentation → modules/semantic_segmentation}/SegFormer-B5.yaml +0 -0
  910. /paddlex/configs/{small_object_detection → modules/small_object_detection}/PP-YOLOE_plus_SOD-L.yaml +0 -0
  911. /paddlex/configs/{small_object_detection → modules/small_object_detection}/PP-YOLOE_plus_SOD-S.yaml +0 -0
  912. /paddlex/configs/{small_object_detection → modules/small_object_detection}/PP-YOLOE_plus_SOD-largesize-L.yaml +0 -0
  913. /paddlex/configs/{table_recognition → modules/table_structure_recognition}/SLANet.yaml +0 -0
  914. /paddlex/configs/{table_recognition → modules/table_structure_recognition}/SLANet_plus.yaml +0 -0
  915. /paddlex/configs/{text_detection → modules/text_detection}/PP-OCRv4_mobile_det.yaml +0 -0
  916. /paddlex/configs/{text_detection → modules/text_detection}/PP-OCRv4_server_det.yaml +0 -0
  917. /paddlex/configs/{text_recognition → modules/text_recognition}/PP-OCRv4_mobile_rec.yaml +0 -0
  918. /paddlex/configs/{text_recognition → modules/text_recognition}/PP-OCRv4_server_rec.yaml +0 -0
  919. /paddlex/configs/{text_recognition → modules/text_recognition}/ch_RepSVTR_rec.yaml +0 -0
  920. /paddlex/configs/{text_recognition → modules/text_recognition}/ch_SVTRv2_rec.yaml +0 -0
  921. /paddlex/configs/{ts_anomaly_detection → modules/ts_anomaly_detection}/AutoEncoder_ad.yaml +0 -0
  922. /paddlex/configs/{ts_anomaly_detection → modules/ts_anomaly_detection}/DLinear_ad.yaml +0 -0
  923. /paddlex/configs/{ts_anomaly_detection → modules/ts_anomaly_detection}/Nonstationary_ad.yaml +0 -0
  924. /paddlex/configs/{ts_anomaly_detection → modules/ts_anomaly_detection}/PatchTST_ad.yaml +0 -0
  925. /paddlex/configs/{ts_anomaly_detection → modules/ts_anomaly_detection}/TimesNet_ad.yaml +0 -0
  926. /paddlex/configs/{ts_classification → modules/ts_classification}/TimesNet_cls.yaml +0 -0
  927. /paddlex/configs/{ts_forecast → modules/ts_forecast}/DLinear.yaml +0 -0
  928. /paddlex/configs/{ts_forecast → modules/ts_forecast}/NLinear.yaml +0 -0
  929. /paddlex/configs/{ts_forecast → modules/ts_forecast}/Nonstationary.yaml +0 -0
  930. /paddlex/configs/{ts_forecast → modules/ts_forecast}/PatchTST.yaml +0 -0
  931. /paddlex/configs/{ts_forecast → modules/ts_forecast}/RLinear.yaml +0 -0
  932. /paddlex/configs/{ts_forecast → modules/ts_forecast}/TiDE.yaml +0 -0
  933. /paddlex/configs/{ts_forecast → modules/ts_forecast}/TimesNet.yaml +0 -0
  934. /paddlex/configs/{vehicle_attribute → modules/vehicle_attribute_recognition}/PP-LCNet_x1_0_vehicle_attribute.yaml +0 -0
  935. /paddlex/configs/{vehicle_detection → modules/vehicle_detection}/PP-YOLOE-L_vehicle.yaml +0 -0
  936. /paddlex/configs/{vehicle_detection → modules/vehicle_detection}/PP-YOLOE-S_vehicle.yaml +0 -0
  937. /paddlex/inference/{results/utils → common}/__init__.py +0 -0
  938. {paddlex-3.0.0b2.dist-info → paddlex-3.0.0rc0.dist-info}/LICENSE +0 -0
  939. {paddlex-3.0.0b2.dist-info → paddlex-3.0.0rc0.dist-info}/entry_points.txt +0 -0
  940. {paddlex-3.0.0b2.dist-info → paddlex-3.0.0rc0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,2141 @@
1
+ # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import bisect
16
+ import io
17
+ import itertools
18
+ import json
19
+ import os
20
+ import re
21
+ import six
22
+ import inspect
23
+ import unicodedata
24
+ import functools
25
+ from collections import OrderedDict
26
+ from dataclasses import asdict, dataclass
27
+ from typing import Any, Dict, List, Optional, Tuple, Union
28
+
29
+ import numpy
30
+ import numpy as np
31
+ import lazy_paddle as paddle
32
+ from jinja2 import Template
33
+ from jinja2.exceptions import TemplateError, TemplateSyntaxError
34
+ from jinja2.sandbox import ImmutableSandboxedEnvironment
35
+
36
+ from .tokenizer_utils_base import CHAT_TEMPLATE_CONFIG_NAME
37
+ from .....utils import logging
38
+
39
+ from functools import lru_cache
40
+
41
+ from .vocab import Vocab
42
+ from .tokenizer_utils_base import (
43
+ AddedToken,
44
+ BatchEncoding,
45
+ EncodedInput,
46
+ EncodedInputPair,
47
+ PaddingStrategy,
48
+ PreTokenizedInput,
49
+ PreTokenizedInputPair,
50
+ PretrainedTokenizerBase,
51
+ TensorType,
52
+ TextInput,
53
+ TextInputPair,
54
+ TruncationStrategy,
55
+ )
56
+ from .utils import convert_to_dict_message, fn_args_to_dict
57
+
58
+ __all__ = [
59
+ "ChatTemplate",
60
+ "Trie",
61
+ "ChatTemplateMixin",
62
+ "PretrainedTokenizer",
63
+ "InitTrackerMeta",
64
+ ]
65
+
66
+
67
+ @dataclass
68
+ class ChatTemplate:
69
+ conversation: Union[List[str], None] = None
70
+ system: Union[str, None] = None
71
+ query: str = None
72
+
73
+ @staticmethod
74
+ @lru_cache()
75
+ def _compile_jinja_template(chat_template) -> Template:
76
+ def raise_exception(message):
77
+ raise TemplateError(message)
78
+
79
+ jinja_env = ImmutableSandboxedEnvironment(
80
+ trim_blocks=True, lstrip_blocks=True, keep_trailing_newline=True
81
+ )
82
+ jinja_env.globals["raise_exception"] = raise_exception
83
+ return jinja_env.from_string(chat_template)
84
+
85
+ def render_conversation(
86
+ self,
87
+ conversation_data: Union[List[str], Dict[str, str]],
88
+ index: int = 0,
89
+ context_data: Dict[str, Any] = {},
90
+ ) -> List[str]:
91
+ """
92
+ Args:
93
+ conversation_data (list[str]): the conversation data which must be two parts
94
+ index (int): the index of current conversation
95
+
96
+ Returns:
97
+ list[str]: the rendered conversation data
98
+ """
99
+ if self.conversation is None:
100
+ raise ValueError(
101
+ "The template for multi-turns is invalid, please check `conversation` filed in your chat-template."
102
+ )
103
+
104
+ if isinstance(conversation_data, (list, tuple)):
105
+ assert (
106
+ len(conversation_data) == 2
107
+ ), "Each round/turn of conversation must be two participants, eg: [user-query, bot-query]"
108
+
109
+ conversation_data = {
110
+ "user": conversation_data[0],
111
+ "bot": conversation_data[1],
112
+ "index": index,
113
+ }
114
+ conversation_data.update(context_data)
115
+
116
+ one_turn_conversation = []
117
+ for conversation in self.conversation:
118
+ template = self._compile_jinja_template(conversation)
119
+ result = template.render(conversation_data)
120
+ one_turn_conversation.append(result)
121
+ return one_turn_conversation
122
+
123
+ def render_query(
124
+ self, query: str, index: int = 0, context_data: Dict[str, Union[int, str]] = {}
125
+ ):
126
+ if self.query is None:
127
+ return query
128
+
129
+ template = self._compile_jinja_template(self.query)
130
+ return template.render(query=query, index=index, **context_data)
131
+
132
+ def _init_context_data(
133
+ self, context_data: Dict[str, Union[int, str]] = {}
134
+ ) -> Dict[str, Union[int, str]]:
135
+ """init the context data for chat-template"""
136
+ context_data["is_training"] = context_data.get("is_training", False)
137
+ return context_data
138
+
139
+ def render_system(self, context_data: Dict[str, Union[int, str]] = {}) -> str:
140
+ if self.system is None:
141
+ return ""
142
+
143
+ template = self._compile_jinja_template(self.system)
144
+ return template.render(**context_data)
145
+
146
+ def __call__(
147
+ self,
148
+ conversations: Union[List[List[str]], str],
149
+ context_data: Dict[str, Union[int, str]] = {},
150
+ ) -> str:
151
+ """render the conversations by chat-template
152
+
153
+ Args:
154
+ conversations (list[list[str]]): the conversations of use and bot
155
+
156
+ Returns:
157
+ str: the result of conversation
158
+ """
159
+ if isinstance(conversations, str):
160
+ conversations = [[conversations]]
161
+
162
+ # [1 ... n-1] conversation
163
+ final_query = self.render_system(context_data=context_data)
164
+ context_data["length"] = len(conversations)
165
+ for index, conversation in enumerate(conversations[:-1]):
166
+ context_data["is_first"] = index == 0
167
+ context_data["is_last"] = False
168
+ final_query += "".join(
169
+ self.render_conversation(
170
+ conversation, index=index, context_data=context_data
171
+ )
172
+ )
173
+
174
+ if not isinstance(conversations[-1], list) and not len(conversations[-1]) != 1:
175
+ raise ValueError(
176
+ "The length of last conversation must be one, eg: [[user-query, bot-answer], [user-query, bot-answer], ..., [user-query]]"
177
+ )
178
+ if len(conversations[-1]) > 1:
179
+ logging.warning(
180
+ f"The last conversation is not a single-round, chat-template will skip the conversation: {conversations[-1][1:]}"
181
+ )
182
+
183
+ final_query += self.render_query(
184
+ conversations[-1][0],
185
+ index=len(conversations) - 1,
186
+ context_data=context_data,
187
+ )
188
+ return final_query
189
+
190
+ @classmethod
191
+ def from_dict(cls, config: Dict):
192
+ return cls(**config)
193
+
194
+ @classmethod
195
+ def from_file(cls, file: str):
196
+ with open(file, "r", encoding="utf-8") as f:
197
+ config = json.load(f)
198
+ return cls.from_dict(config)
199
+
200
+
201
+ def adapt_stale_fwd_patch(self, name, value):
202
+ """
203
+ Since there are some monkey patches for forward of PretrainedModel, such as
204
+ model compression, we make these patches compatible with the latest forward
205
+ method.
206
+ """
207
+ if name == "forward":
208
+ # NOTE(guosheng): In dygraph to static, `layer.forward` would be patched
209
+ # by an instance of `StaticFunction`. And use string compare to avoid to
210
+ # import fluid.
211
+ if type(value).__name__.endswith(
212
+ "StaticFunction"
213
+ ) or self.forward.__class__.__name__.endswith("StaticFunction"):
214
+ return value
215
+ (
216
+ patch_spec_args,
217
+ patch_spec_varargs,
218
+ patch_spec_varkw,
219
+ patch_spec_defaults,
220
+ _,
221
+ _,
222
+ _,
223
+ ) = inspect.getfullargspec(value)
224
+ (spec_args, spec_varargs, spec_varkw, spec_defaults, _, _, _) = (
225
+ inspect.getfullargspec(self.forward)
226
+ )
227
+ new_args = [
228
+ arg
229
+ for arg in ("output_hidden_states", "output_attentions", "return_dict")
230
+ if arg not in patch_spec_args and arg in spec_args
231
+ ]
232
+
233
+ if new_args:
234
+ if self.__module__.startswith("paddlenlp"):
235
+ logging.warning(
236
+ f"The `forward` method of {self.__class__ if isinstance(self, paddle.nn.Layer) else self} is patched and the patch "
237
+ "might be based on an old oversion which missing some "
238
+ f"arguments compared with the latest, such as {new_args}. "
239
+ "We automatically add compatibility on the patch for "
240
+ "these arguemnts, and maybe the patch should be updated."
241
+ )
242
+ else:
243
+ logging.warning(
244
+ f"The `forward` method of {self.__class__ if isinstance(self, paddle.nn.Layer) else self} "
245
+ "is patched and the patch might be conflict with patches made "
246
+ f"by paddlenlp which seems have more arguments such as {new_args}. "
247
+ "We automatically add compatibility on the patch for "
248
+ "these arguemnts, and maybe the patch should be updated."
249
+ )
250
+ if isinstance(self, paddle.nn.Layer) and inspect.isfunction(value):
251
+
252
+ @functools.wraps(value)
253
+ def wrap_fwd(*args, **kwargs):
254
+ for arg in new_args:
255
+ kwargs.pop(arg, None)
256
+ return value(self, *args, **kwargs)
257
+
258
+ else:
259
+
260
+ @functools.wraps(value)
261
+ def wrap_fwd(*args, **kwargs):
262
+ for arg in new_args:
263
+ kwargs.pop(arg, None)
264
+ return value(*args, **kwargs)
265
+
266
+ return wrap_fwd
267
+ return value
268
+
269
+
270
+ # NOTE:
271
+ # Modification:
272
+ # class InitTrackerMeta(type(paddle.nn.Layer)) -> class InitTrackerMeta(type)
273
+ # Context:
274
+ # 1. In paddle 3.0rc, type(paddle.nn.Layer) == type
275
+ # 2. Solve the conflict between ultra-infer and paddle
276
+ class InitTrackerMeta(type):
277
+ """
278
+ This metaclass wraps the `__init__` method of a class to add `init_config`
279
+ attribute for instances of that class, and `init_config` use a dict to track
280
+ the initial configuration. If the class has `_pre_init` or `_post_init`
281
+ method, it would be hooked before or after `__init__` and called as
282
+ `_pre_init(self, init_fn, init_args)` or `_post_init(self, init_fn, init_args)`.
283
+ Since InitTrackerMeta would be used as metaclass for pretrained model classes,
284
+ which always are Layer and `type(Layer)` is not `type`, thus use `type(Layer)`
285
+ rather than `type` as base class for it to avoid inheritance metaclass
286
+ conflicts.
287
+ """
288
+
289
+ def __init__(cls, name, bases, attrs):
290
+ init_func = cls.__init__
291
+ # If attrs has `__init__`, wrap it using accessable `_pre_init, _post_init`.
292
+ # Otherwise, no need to wrap again since the super cls has been wraped.
293
+ # TODO: remove reduplicated tracker if using super cls `__init__`
294
+ pre_init_func = getattr(cls, "_pre_init", None) if "__init__" in attrs else None
295
+ post_init_func = (
296
+ getattr(cls, "_post_init", None) if "__init__" in attrs else None
297
+ )
298
+ cls.__init__ = InitTrackerMeta.init_and_track_conf(
299
+ init_func, pre_init_func, post_init_func
300
+ )
301
+ super(InitTrackerMeta, cls).__init__(name, bases, attrs)
302
+
303
+ @staticmethod
304
+ def init_and_track_conf(init_func, pre_init_func=None, post_init_func=None):
305
+ """
306
+ wraps `init_func` which is `__init__` method of a class to add `init_config`
307
+ attribute for instances of that class.
308
+ Args:
309
+ init_func (callable): It should be the `__init__` method of a class.
310
+ warning: `self` always is the class type of down-stream model, eg: BertForTokenClassification
311
+ pre_init_func (callable, optional): If provided, it would be hooked after
312
+ `init_func` and called as `pre_init_func(self, init_func, *init_args, **init_args)`.
313
+ Default None.
314
+ post_init_func (callable, optional): If provided, it would be hooked after
315
+ `init_func` and called as `post_init_func(self, init_func, *init_args, **init_args)`.
316
+ Default None.
317
+
318
+ Returns:
319
+ function: the wrapped function
320
+ """
321
+
322
+ @functools.wraps(init_func)
323
+ def __impl__(self, *args, **kwargs):
324
+ # registed helper by `pre_init_func`
325
+ if pre_init_func:
326
+ pre_init_func(self, init_func, *args, **kwargs)
327
+ # keep full configuration
328
+ init_func(self, *args, **kwargs)
329
+ # registed helper by `post_init_func`
330
+ if post_init_func:
331
+ post_init_func(self, init_func, *args, **kwargs)
332
+ self.init_config = kwargs
333
+ if args:
334
+ kwargs["init_args"] = args
335
+ kwargs["init_class"] = self.__class__.__name__
336
+
337
+ return __impl__
338
+
339
+ def __setattr__(self, name, value):
340
+ value = adapt_stale_fwd_patch(self, name, value)
341
+ return super(InitTrackerMeta, self).__setattr__(name, value)
342
+
343
+
344
+ class Trie:
345
+ """
346
+ Trie in Python. Creates a Trie out of a list of words. The trie is used to split on `added_tokens` in one pass
347
+ Loose reference https://en.wikipedia.org/wiki/Trie
348
+ """
349
+
350
+ def __init__(self):
351
+ self.data = {}
352
+
353
+ def add(self, word: str):
354
+ """
355
+ Passes over every char (utf-8 char) on word and recursively adds it to the internal `data` trie representation.
356
+ The special key `""` is used to represent termination.
357
+
358
+ This function is idempotent, adding twice the same word will leave the trie unchanged
359
+
360
+ Example:
361
+
362
+ ```python
363
+ >>> trie = Trie()
364
+ >>> trie.add("Hello 友達")
365
+ >>> trie.data
366
+ {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}}
367
+
368
+ >>> trie.add("Hello")
369
+ >>> trie.data
370
+ {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}}
371
+ ```
372
+ """
373
+ if not word:
374
+ # Prevent empty string
375
+ return
376
+ ref = self.data
377
+ for char in word:
378
+ ref[char] = char in ref and ref[char] or {}
379
+ ref = ref[char]
380
+ ref[""] = 1
381
+
382
+ def split(self, text: str) -> List[str]:
383
+ """
384
+ Will look for the words added to the trie within `text`. Output is the original string splitted along the
385
+ boundaries of the words found.
386
+
387
+ This trie will match the longest possible word first !
388
+
389
+ Example:
390
+
391
+ ```python
392
+ >>> trie = Trie()
393
+ >>> trie.split("[CLS] This is a extra_id_100")
394
+ ["[CLS] This is a extra_id_100"]
395
+
396
+ >>> trie.add("[CLS]")
397
+ >>> trie.add("extra_id_1")
398
+ >>> trie.add("extra_id_100")
399
+ >>> trie.split("[CLS] This is a extra_id_100")
400
+ ["[CLS]", " This is a ", "extra_id_100"]
401
+ ```
402
+ """
403
+ # indexes are counted left of the chars index.
404
+ # "hello", index 0, is left of h, index 1 is between h and e.
405
+ # index 5 is right of the "o".
406
+
407
+ # States are going to capture every possible start (indexes as above)
408
+ # as keys, and have as values, a pointer to the position in the trie
409
+ # where we're at. This is a partial match for now.
410
+ # This enables to keep track of multiple matches while we're iterating
411
+ # the string
412
+ # If the trie contains, "blowing", and "lower" and we encounter the
413
+ # string "blower", we need to split into ["b", "lower"].
414
+ # This is where we need to keep track of multiple possible starts.
415
+ states = OrderedDict()
416
+
417
+ # This will contain every indices where we need
418
+ # to cut.
419
+ # We force to cut at offset 0 and len(text) (added later)
420
+ offsets = [0]
421
+
422
+ # This is used by the lookahead which needs to skip over
423
+ # some text where the full match exceeded the place in the initial
424
+ # for loop
425
+ skip = 0
426
+ # Main loop, Giving this algorithm O(n) complexity
427
+ for current, current_char in enumerate(text):
428
+ if skip and current < skip:
429
+ # Prevents the lookahead for matching twice
430
+ # like extra_id_100 and id_100
431
+ continue
432
+
433
+ # This will track every state
434
+ # that stop matching, we need to stop tracking them.
435
+ # If we look at "lowball", we're going to match "l" (add it to states), "o", "w", then
436
+ # fail on "b", we need to remove 0 from the valid states.
437
+ to_remove = set()
438
+ # Whenever we found a match, we need to drop everything
439
+ # this is a greedy algorithm, it will match on the first found token
440
+ reset = False
441
+
442
+ # In this case, we already have partial matches (But unfinished)
443
+ for start, trie_pointer in states.items():
444
+ if "" in trie_pointer:
445
+ # This is a final match, we need to reset and
446
+ # store the results in `offsets`.
447
+
448
+ # Lookahead to match longest first
449
+ # Important in case of extra_id_1 vs extra_id_100
450
+ # Here we are also actively looking for other earlier partial
451
+ # matches
452
+ # "[CLS]", "L", we need to match CLS even if L is special
453
+ for lookstart, looktrie_pointer in states.items():
454
+ if lookstart > start:
455
+ # This partial match is later, we can stop looking
456
+ break
457
+ elif lookstart < start:
458
+ # This partial match is earlier, the trie pointer
459
+ # was already updated, so index is + 1
460
+ lookahead_index = current + 1
461
+ end = current + 1
462
+ else:
463
+ # Here lookstart == start and
464
+ # looktrie_pointer == trie_pointer
465
+ # It wasn't updated yet so indices are current ones
466
+ lookahead_index = current
467
+ end = current
468
+ next_char = (
469
+ text[lookahead_index]
470
+ if lookahead_index < len(text)
471
+ else None
472
+ )
473
+ if "" in looktrie_pointer:
474
+ start = lookstart
475
+ end = lookahead_index
476
+ skip = lookahead_index
477
+
478
+ while next_char in looktrie_pointer:
479
+ looktrie_pointer = looktrie_pointer[next_char]
480
+ lookahead_index += 1
481
+ if "" in looktrie_pointer:
482
+ start = lookstart
483
+ end = lookahead_index
484
+ skip = lookahead_index
485
+
486
+ if lookahead_index == len(text):
487
+ # End of string
488
+ break
489
+ next_char = text[lookahead_index]
490
+ # End lookahead
491
+
492
+ # Storing and resetting
493
+ offsets.append(start)
494
+ offsets.append(end)
495
+ reset = True
496
+ break
497
+ elif current_char in trie_pointer:
498
+ # The current character being looked at has a match within the trie
499
+ # update the pointer (it will be stored back into states later).
500
+ trie_pointer = trie_pointer[current_char]
501
+
502
+ # Storing back the new pointer into the states.
503
+ # Partial matches got longer by one.
504
+ states[start] = trie_pointer
505
+ else:
506
+ # The new character has not match in the trie, we need
507
+ # to stop keeping track of this partial match.
508
+ # We can't do it directly within the loop because of how
509
+ # python iteration works
510
+ to_remove.add(start)
511
+
512
+ # Either clearing the full start (we found a real match)
513
+ # Or clearing only the partial matches that didn't work.
514
+ if reset:
515
+ states = {}
516
+ else:
517
+ for start in to_remove:
518
+ del states[start]
519
+
520
+ # If this character is a starting character within the trie
521
+ # start keeping track of this partial match.
522
+ if current >= skip and current_char in self.data:
523
+ states[current] = self.data[current_char]
524
+
525
+ # We have a cut at the end with states.
526
+ for start, trie_pointer in states.items():
527
+ if "" in trie_pointer:
528
+ # This is a final match, we need to reset and
529
+ # store the results in `offsets`.
530
+ end = len(text)
531
+ offsets.append(start)
532
+ offsets.append(end)
533
+ # Longest cut is always the one with lower start so the first
534
+ # item so we need to break.
535
+ break
536
+
537
+ return self.cut_text(text, offsets)
538
+
539
+ def cut_text(self, text, offsets):
540
+ # We have all the offsets now, we just need to do the actual splitting.
541
+ # We need to eventually add the first part of the string and the eventual
542
+ # last part.
543
+ offsets.append(len(text))
544
+ tokens = []
545
+ start = 0
546
+ for end in offsets:
547
+ if start > end:
548
+ logging.error(
549
+ "There was a bug in Trie algorithm in tokenization. Attempting to recover. Please report it anyway."
550
+ )
551
+ continue
552
+ elif start == end:
553
+ # This might happen if there's a match at index 0
554
+ # we're also preventing zero-width cuts in case of two
555
+ # consecutive matches
556
+ continue
557
+ tokens.append(text[start:end])
558
+ start = end
559
+
560
+ return tokens
561
+
562
+
563
+ def _insert_one_token_to_ordered_list(token_list: List[str], new_token: str):
564
+ """
565
+ Inserts one token to an ordered list if it does not already exist. Note: token_list must be sorted.
566
+ """
567
+ insertion_idx = bisect.bisect_left(token_list, new_token)
568
+ # Checks if new_token is already in the ordered token_list
569
+ if insertion_idx < len(token_list) and token_list[insertion_idx] == new_token:
570
+ # new_token is in token_list, don't add
571
+ return
572
+ else:
573
+ token_list.insert(insertion_idx, new_token)
574
+
575
+
576
+ def _is_control(char):
577
+ """Checks whether `chars` is a control character."""
578
+ # These are technically control characters but we count them as whitespace
579
+ # characters.
580
+ if char == "\t" or char == "\n" or char == "\r":
581
+ return False
582
+ cat = unicodedata.category(char)
583
+ if cat.startswith("C"):
584
+ return True
585
+ return False
586
+
587
+
588
+ def _is_nonnormalized_char(char):
589
+ """Check whther `chars` is a non-normalized character."""
590
+ cp = ord(char)
591
+ if (
592
+ (0xFF00 <= cp <= 0xFFEF)
593
+ or (0xFE50 <= cp <= 0xFE6B) # Halfwidth and Fullwidth Forms
594
+ or (0x3358 <= cp <= 0x33FF) # Small Form Variants
595
+ or (0x249C <= cp <= 0x24E9) # CJK Compatibility
596
+ or (0x3200 <= cp <= 0x32FF) # Enclosed Alphanumerics: Ⓛ ⒰
597
+ ): # Enclosed CJK Letters and Months
598
+ return True
599
+
600
+ return False
601
+
602
+
603
+ def _is_nonnormalized_numeric(char):
604
+ """Check whether `chars` is a non-normalized numeric character."""
605
+ cp = ord(char)
606
+ if (
607
+ (0x2460 <= cp <= 0x249B)
608
+ or (0x24EA <= cp <= 0x24FF) #
609
+ or (0x2776 <= cp <= 0x2793) #
610
+ or (0x2160 <= cp <= 0x217F) # Enclosed Alphanumerics
611
+ ): # Number Forms
612
+ return True
613
+
614
+ return False
615
+
616
+
617
+ def normalize_chars(text):
618
+ """
619
+ Normalize the text for multiligual and chinese models. Unicode range:
620
+ https://www.ling.upenn.edu/courses/Spring_2003/ling538/UnicodeRanges.html
621
+ """
622
+ output = []
623
+ for char in text:
624
+ if _is_nonnormalized_char(char):
625
+ for c in unicodedata.normalize("NFKC", char):
626
+ output.append(c)
627
+ elif _is_nonnormalized_numeric(char):
628
+ output.append(" ")
629
+ for c in str(int(unicodedata.numeric(char))):
630
+ output.append(c)
631
+ output.append(" ")
632
+ elif ord(char) == 0xF979: # https://www.zhihu.com/question/20697984
633
+ output.append("凉")
634
+ else:
635
+ output.append(char)
636
+ return "".join(output)
637
+
638
+
639
+ class ChatTemplateMixin:
640
+ chat_template: Optional[ChatTemplate] = None
641
+
642
+ def apply_chat_template(
643
+ self,
644
+ conversation: Union[Dict[str, str], str],
645
+ tokenize: bool = True,
646
+ context_data: Dict[str, Any] = {},
647
+ **tokenizer_kwargs,
648
+ ) -> Union[str, Dict[str, Union["numpy.ndarray", "paddle.Tensor"]]]:
649
+ """apply chat_template rules to conversation which should not be batched data
650
+
651
+ Args:
652
+ conversation (List[List[str, str]] | str): the conversation messages between user and bot
653
+ context_data (Dict[str, Any]): the context data for chat_template.json
654
+ tokenize (bool, optional): whether do tokenization. Defaults to True.
655
+
656
+ Returns:
657
+ str | dict[str, Union["numpy.ndarray", "paddle.Tensor"]]: return the result of applied data
658
+ """
659
+ if not self.chat_template:
660
+ raise ValueError(
661
+ "chat_template is not set, please set chat_template first."
662
+ )
663
+ elif isinstance(self.chat_template, Template):
664
+ add_generation_prompt = tokenizer_kwargs.pop("add_generation_prompt", True)
665
+ query = self._apply_chat_template(
666
+ conversation, add_generation_prompt=add_generation_prompt
667
+ )
668
+ elif isinstance(self.chat_template, ChatTemplate):
669
+ query = self._apply_chat_template_paddle(conversation, context_data)
670
+
671
+ if not tokenize:
672
+ return query
673
+
674
+ # chat_template should not add special tokens
675
+ tokenizer_kwargs["add_special_tokens"] = False
676
+ return self(query, **tokenizer_kwargs)
677
+
678
+ def _apply_chat_template_paddle(
679
+ self,
680
+ conversation: Union[List[Dict[str, str]], str],
681
+ context_data: Dict[str, Any] = {},
682
+ ) -> Union[str, Dict[str, Union["numpy.ndarray", "paddle.Tensor"]]]:
683
+ context_data = self.chat_template._init_context_data(context_data)
684
+
685
+ if isinstance(conversation, str):
686
+ conversation = [[conversation]]
687
+ elif isinstance(conversation, list) and isinstance(conversation[0], str):
688
+ raise ValueError(
689
+ "apply_chat_template do not support appling batch conversations, "
690
+ "so you should apply the conversation one by one."
691
+ )
692
+
693
+ query = self.chat_template(conversation, context_data=context_data)
694
+ return query
695
+
696
+ def _apply_chat_template(
697
+ self,
698
+ conversation: Union[Dict[str, str], str],
699
+ add_generation_prompt=True,
700
+ ) -> Union[str, Dict[str, Union["numpy.ndarray", "paddle.Tensor"]]]:
701
+ if isinstance(conversation, str):
702
+ conversations = [{"role": "user", "content": conversation}]
703
+ elif isinstance(conversation, list):
704
+ assert len(conversation) > 0, "empty conversation is not allowed"
705
+ if isinstance(conversation[0], list):
706
+ conversations = convert_to_dict_message(conversation)
707
+ elif isinstance(conversation[0], dict):
708
+ conversations = conversation
709
+ else:
710
+ raise ValueError(
711
+ "apply_chat_template do not support appling batch conversations, "
712
+ "so you should apply the conversation one by one."
713
+ )
714
+ query = self.chat_template.render(
715
+ messages=conversations,
716
+ **self.special_tokens_map,
717
+ add_generation_prompt=add_generation_prompt,
718
+ )
719
+ return query
720
+
721
+ def encode_chat_inputs(
722
+ self,
723
+ conversations: List[Dict[str, str]],
724
+ context_data: Dict[str, Any] = {},
725
+ **kwargs,
726
+ ):
727
+ """Encodes conversation to pairs of token ids.
728
+ Turn 0: bos + system + sep + user bot + eos
729
+ Turn t: sep + bot + query bot + eos
730
+
731
+ Args:
732
+ conversation (List[Dict[str, str]]): the conversation of data
733
+ context_data (Dict[str, Any]): the context data of conversation
734
+
735
+ Returns:
736
+ List[list[int], list[int]]: the pair of input_ids and target_ids
737
+ """
738
+ if not self.chat_template:
739
+ raise ValueError(
740
+ "chat_template is not set, please set chat_template first."
741
+ )
742
+ elif isinstance(self.chat_template, Template):
743
+ add_generation_prompt = kwargs.pop("add_generation_prompt", True)
744
+ query = self._encode_chat_inputs(
745
+ conversations, context_data, add_generation_prompt=add_generation_prompt
746
+ )
747
+ elif isinstance(self.chat_template, ChatTemplate):
748
+ query = self._encode_chat_inputs_paddle(conversations, context_data)
749
+ return query
750
+
751
+ def _encode_chat_inputs_paddle(
752
+ self, conversations: List[Dict[str, str]], context_data: Dict[str, Any] = {}
753
+ ):
754
+ context_data = self.chat_template._init_context_data(context_data)
755
+ # encode system
756
+ result = {}
757
+ if self.chat_template.system:
758
+ system = self.chat_template.render_system(context_data)
759
+ result["system"] = self.encode(system, add_special_tokens=False)[
760
+ "input_ids"
761
+ ]
762
+
763
+ # encode conversation
764
+ conversation_ids = []
765
+ for index, conversation in enumerate(conversations):
766
+ # give more control to chat_template
767
+ context_data["is_first"] = index == 0
768
+ context_data["is_last"] = index == len(conversations) - 1
769
+
770
+ user_input, bot_output = self.chat_template.render_conversation(
771
+ conversation, index=index, context_data=context_data
772
+ )
773
+ user_ids = self.encode(user_input, add_special_tokens=False)["input_ids"]
774
+ bot_ids = self.encode(bot_output, add_special_tokens=False)["input_ids"]
775
+ conversation_ids.append([user_ids, bot_ids])
776
+
777
+ result["conversations"] = conversation_ids
778
+ return result
779
+
780
+ def _encode_chat_inputs(
781
+ self,
782
+ conversations: List[Dict[str, str]],
783
+ context_data: Dict[str, Any] = {},
784
+ system: str = None,
785
+ add_generation_prompt=True,
786
+ ):
787
+ result = {}
788
+
789
+ # Some template do not support system msg, so we need to check it first.
790
+ if system:
791
+ try:
792
+ self.chat_template.render(
793
+ messages={"role": "system", "content": system}
794
+ )
795
+ except Exception as e:
796
+ raise ValueError("System is not supported in this tokenizer.", e)
797
+
798
+ # convert list msg to role dict msg
799
+ conversation_dict = []
800
+ origin_msg = []
801
+ for round in conversations:
802
+ round_role = [
803
+ {"role": "user", "content": round[0]},
804
+ {"role": "assistant", "content": round[1]},
805
+ ]
806
+ origin_msg.extend(round_role)
807
+ conversation_dict.append(round_role)
808
+ ans = []
809
+
810
+ # get answer in single round, then compile the chat entirely and split by single round ans
811
+ # attention: answer should include end token!
812
+ for conv in conversation_dict:
813
+ roundi = [system] + conv if system else conv
814
+ roundi_str = self.chat_template.render(
815
+ messages=roundi, add_generation_prompt=False, **self.special_tokens_map
816
+ )
817
+ roundi_no_ans = [system] + [conv[0]] if system else [conv[0]]
818
+ roundi_no_ans_str = self.chat_template.render(
819
+ messages=roundi_no_ans,
820
+ add_generation_prompt=add_generation_prompt,
821
+ **self.special_tokens_map,
822
+ )
823
+ ans_roundi = roundi_str[len(roundi_no_ans_str) :]
824
+ ans.append(ans_roundi)
825
+
826
+ non_learnable_parts = self._extract_non_learnable_parts(origin_msg, ans)
827
+ assert len(non_learnable_parts) == len(ans)
828
+
829
+ conversation_ids = []
830
+ for i in range(len(non_learnable_parts)):
831
+ conversation_ids.append(
832
+ self.batch_encode(
833
+ [non_learnable_parts[i], ans[i]],
834
+ add_special_tokens=False,
835
+ padding=False,
836
+ )["input_ids"]
837
+ )
838
+
839
+ result["conversations"] = conversation_ids
840
+ return result
841
+
842
+ def _extract_non_learnable_parts(
843
+ self, origin_msg: List[Dict[str, str]], split_s: List[str]
844
+ ):
845
+ """Split the entire chat by specified words. Extract the non-learnable parts."""
846
+ # distingish and replace the special words in original string to an uncompiled form: Like | -> \|
847
+ regex_pattern = "|".join(map(re.escape, split_s))
848
+ # splited by replaced specified words
849
+ non_learnable_parts = re.split(
850
+ r"(?:%s)" % regex_pattern,
851
+ self.chat_template.render(
852
+ messages=origin_msg,
853
+ add_generation_prompt=False,
854
+ **self.special_tokens_map,
855
+ ),
856
+ )
857
+ if non_learnable_parts[-1] == "":
858
+ non_learnable_parts.pop()
859
+ return non_learnable_parts
860
+
861
+ @classmethod
862
+ def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs):
863
+ cache_dir = kwargs.pop("cache_dir", None)
864
+ from_hf_hub = kwargs.pop("from_hf_hub", False)
865
+ from_aistudio = kwargs.pop("from_aistudio", False)
866
+ subfolder = kwargs.pop("subfolder", "")
867
+ if subfolder is None:
868
+ subfolder = ""
869
+
870
+ kwargs["subfolder"] = subfolder
871
+ kwargs["cache_dir"] = cache_dir
872
+ kwargs["from_hf_hub"] = from_hf_hub
873
+ kwargs["from_aistudio"] = from_aistudio
874
+ kwargs["return_tokenizer_file_dir"] = True
875
+ tokenizer, tokenizer_config_file_dir = super().from_pretrained(
876
+ pretrained_model_name_or_path, *args, **kwargs
877
+ )
878
+
879
+ # load chat-template
880
+ chat_template_file = os.path.join(
881
+ tokenizer_config_file_dir, CHAT_TEMPLATE_CONFIG_NAME
882
+ )
883
+ if not os.path.exists(chat_template_file):
884
+ return tokenizer
885
+
886
+ if tokenizer.chat_template is not None:
887
+ logging.warning(
888
+ "Chat-template already exists in config file, it will be overwritten by chat_template.json file."
889
+ )
890
+ logging.warning(
891
+ "`chat_template.json` will be deprecated in the future! Please set it in `tokenizer_config.json`."
892
+ )
893
+ tokenizer.init_chat_template(chat_template_file)
894
+ return tokenizer
895
+
896
+ def init_chat_template(self, chat_template: Union[str, Dict]):
897
+ """init chat_tempalte by file_path or template dict data
898
+
899
+ Args:
900
+ chat_template (str | dict): file_path or template dict data
901
+ """
902
+ if isinstance(chat_template, str):
903
+ if not os.path.exists(chat_template):
904
+ try:
905
+ self.chat_template: Template = ChatTemplate._compile_jinja_template(
906
+ chat_template
907
+ )
908
+ except TemplateSyntaxError:
909
+ # It is neither jinjia string nor path string
910
+ raise TemplateSyntaxError(
911
+ "The chat-template in json is not valid jinja string: {}".format(
912
+ chat_template
913
+ ),
914
+ lineno=0, # fake lineno, useless required msg
915
+ )
916
+ else:
917
+ self.chat_template = ChatTemplate.from_file(chat_template)
918
+ elif isinstance(chat_template, dict):
919
+ self.chat_template = ChatTemplate.from_dict(chat_template)
920
+ elif isinstance(chat_template, ChatTemplate):
921
+ self.chat_template = chat_template
922
+ else:
923
+ raise ValueError("Receive error chat_template data: ", chat_template)
924
+
925
+ def save_resources(self, save_directory):
926
+ super().save_resources(save_directory)
927
+
928
+ if isinstance(
929
+ self.chat_template, ChatTemplate
930
+ ): # Future remove if ChatTemplate is deprecated
931
+ chat_template_file = os.path.join(save_directory, CHAT_TEMPLATE_CONFIG_NAME)
932
+ with open(chat_template_file, "w", encoding="utf-8") as f:
933
+ json.dump(asdict(self.chat_template), f, ensure_ascii=False, indent=4)
934
+ logging.info("Chat-template config file saved in " + chat_template_file)
935
+
936
+
937
+ @six.add_metaclass(InitTrackerMeta)
938
+ class PretrainedTokenizer(ChatTemplateMixin, PretrainedTokenizerBase):
939
+ """
940
+ Base class for all tokenizers.
941
+
942
+ Inherits from [`~tokenizer_utils_base.PretrainedTokenizerBase`].
943
+
944
+ Handle all the shared methods for tokenization and special tokens as well as methods downloading/caching/loading
945
+ pretrained tokenizers as well as adding tokens to the vocabulary.
946
+
947
+ This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the
948
+ specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
949
+
950
+ - **resource_files_names** (`Dict[str, str]`) -- A dictionary with, as keys, the `__init__` keyword name of each
951
+ vocabulary file required by the model, and as associated values, the filename for saving the associated file
952
+ (string).
953
+ - **pretrained_resource_files_map** (`Dict[str, Dict[str, str]]`) -- A dictionary of dictionaries, with the
954
+ high-level keys being the `__init__` keyword name of each vocabulary file required by the model, the
955
+ low-level being the `short-cut-names` of the pretrained models with, as associated values, the `url` to the
956
+ associated pretrained vocabulary file.
957
+ - **max_model_input_sizes** (`Dict[str, Optional[int]]`) -- A dictionary with, as keys, the `short-cut-names`
958
+ of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model,
959
+ or `None` if the model has no maximum input size.
960
+ - **pretrained_init_configuration** (`Dict[str, Dict[str, Any]]`) -- A dictionary with, as keys, the
961
+ `short-cut-names` of the pretrained models, and as associated values, a dictionary of specific arguments to
962
+ pass to the `__init__` method of the tokenizer class for this pretrained model when loading the tokenizer
963
+ with the [`~tokenizer_utils_base.PretrainedTokenizerBase.from_pretrained`] method.
964
+ - **model_input_names** (`List[str]`) -- A list of inputs expected in the forward pass of the model.
965
+ - **padding_side** (`str`) -- The default value for the side on which the model should have padding applied.
966
+ Should be `'right'` or `'left'`.
967
+ - **truncation_side** (`str`) -- The default value for the side on which the model should have truncation
968
+ applied. Should be `'right'` or `'left'`.
969
+
970
+ Moreover, methods common to tokenizers for tokenization, token/id conversion
971
+ and encoding as model inputs are also provided here.
972
+
973
+ Besides, metaclass `InitTrackerMeta` is used to create `PretrainedTokenizer`,
974
+ by which subclasses can track arguments for initialization automatically
975
+ and expose special tokens initialization used as attributes.
976
+ """
977
+
978
+ added_tokens_encoder: Dict[str, int] = {}
979
+ added_tokens_decoder: Dict[int, str] = {}
980
+ unique_no_split_tokens: List[str] = []
981
+ tokens_trie = Trie()
982
+
983
+ _decode_use_source_tokenizer = False
984
+
985
+ def _pre_init(self, original_init, *args, **kwargs):
986
+ """
987
+ It would be hooked before `__init__` to add specials tokens (arguments of
988
+ `__init__` whose name ends with `_token`) as attributes of the tokenizer
989
+ instance.
990
+ """
991
+ init_dict = fn_args_to_dict(original_init, *((self,) + args), **kwargs)
992
+ init_dict.pop("self", None)
993
+ super(PretrainedTokenizer, self).__init__(**init_dict)
994
+
995
+ self.added_tokens_encoder: Dict[str, int] = {}
996
+ self.added_tokens_decoder: Dict[int, str] = {}
997
+ self.unique_no_split_tokens: List[str] = []
998
+ self.tokens_trie = Trie()
999
+
1000
+ self._decode_use_source_tokenizer = False
1001
+
1002
+ def _build_special_tokens_map_extended(self, **kwargs):
1003
+ for key, value in kwargs.items():
1004
+ if value is None:
1005
+ continue
1006
+ if key in self.SPECIAL_TOKENS_ATTRIBUTES:
1007
+ if key == "additional_special_tokens":
1008
+ assert isinstance(
1009
+ value, (list, tuple)
1010
+ ), f"Value {value} is not a list or tuple"
1011
+ assert all(
1012
+ isinstance(t, (str, AddedToken)) for t in value
1013
+ ), "One of the tokens is not a string or an AddedToken"
1014
+ setattr(self, key, value)
1015
+ elif isinstance(value, (str, AddedToken)):
1016
+ setattr(self, key, value)
1017
+ else:
1018
+ raise TypeError(
1019
+ f"special token {key} has to be either str or AddedToken but got: {type(value)}"
1020
+ )
1021
+
1022
+ @property
1023
+ def vocab_size(self) -> int:
1024
+ """
1025
+ `int`: Size of the base vocabulary (without the added tokens).
1026
+ """
1027
+ raise NotImplementedError
1028
+
1029
+ @property
1030
+ def is_fast(self) -> bool:
1031
+ return False
1032
+
1033
+ def get_added_vocab(self) -> Dict[str, int]:
1034
+ """
1035
+ Returns the added tokens in the vocabulary as a dictionary of token to index.
1036
+
1037
+ Returns:
1038
+ `Dict[str, int]`: The added tokens.
1039
+ """
1040
+ return self.added_tokens_encoder
1041
+
1042
+ def __len__(self):
1043
+ """
1044
+ Size of the full vocabulary with the added tokens.
1045
+ """
1046
+ return self.vocab_size + len(self.added_tokens_encoder)
1047
+
1048
+ def _add_tokens(
1049
+ self,
1050
+ new_tokens: Union[List[str], List[AddedToken]],
1051
+ special_tokens: bool = False,
1052
+ ) -> int:
1053
+ """
1054
+ Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
1055
+ it with indices starting from length of the current vocabulary.
1056
+
1057
+ Args:
1058
+ new_tokens (`List[str]`or `List[AddedToken]`):
1059
+ Token(s) to add in vocabulary. A token is only added if it's not already in the vocabulary (tested by
1060
+ checking if the tokenizer assign the index of the `unk_token` to them).
1061
+ special_tokens (`bool`, *optional*, defaults to `False`):
1062
+ Whether or not the tokens should be added as special tokens.
1063
+
1064
+ Returns:
1065
+ `int`: The number of tokens actually added to the vocabulary.
1066
+
1067
+ Examples:
1068
+
1069
+ ```python
1070
+ # Let's see how to increase the vocabulary of Bert model and tokenizer
1071
+ tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
1072
+ model = BertModel.from_pretrained("bert-base-uncased")
1073
+
1074
+ num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"])
1075
+ print("We have added", num_added_toks, "tokens")
1076
+ ```"""
1077
+ new_tokens = [str(tok) for tok in new_tokens]
1078
+
1079
+ tokens_to_add = []
1080
+ for token in new_tokens:
1081
+ if not isinstance(token, str):
1082
+ raise TypeError(f"Token {token} is not a string but a {type(token)}.")
1083
+ if (
1084
+ not special_tokens
1085
+ and hasattr(self, "do_lower_case")
1086
+ and self.do_lower_case
1087
+ ):
1088
+ token = token.lower()
1089
+ if (
1090
+ token != self.unk_token
1091
+ and self.convert_tokens_to_ids(token)
1092
+ == self.convert_tokens_to_ids(self.unk_token)
1093
+ and token not in tokens_to_add
1094
+ ):
1095
+ tokens_to_add.append(token)
1096
+ if self.verbose:
1097
+ logging.info(f"Adding {token} to the vocabulary")
1098
+
1099
+ added_tok_encoder = dict(
1100
+ (tok, len(self) + i) for i, tok in enumerate(tokens_to_add)
1101
+ )
1102
+ added_tok_decoder = {v: k for k, v in added_tok_encoder.items()}
1103
+ self.added_tokens_encoder.update(added_tok_encoder)
1104
+ self.added_tokens_decoder.update(added_tok_decoder)
1105
+
1106
+ # Make sure we don't split on any special tokens (even they were already in the vocab before e.g. for Albert)
1107
+ if special_tokens:
1108
+ if len(new_tokens) == 1:
1109
+ _insert_one_token_to_ordered_list(
1110
+ self.unique_no_split_tokens, new_tokens[0]
1111
+ )
1112
+ else:
1113
+ self.unique_no_split_tokens = sorted(
1114
+ set(self.unique_no_split_tokens).union(set(new_tokens))
1115
+ )
1116
+ else:
1117
+ # Or on the newly added tokens
1118
+ if len(tokens_to_add) == 1:
1119
+ _insert_one_token_to_ordered_list(
1120
+ self.unique_no_split_tokens, tokens_to_add[0]
1121
+ )
1122
+ else:
1123
+ self.unique_no_split_tokens = sorted(
1124
+ set(self.unique_no_split_tokens).union(set(tokens_to_add))
1125
+ )
1126
+ self._create_trie(self.unique_no_split_tokens)
1127
+
1128
+ return len(tokens_to_add)
1129
+
1130
+ def _create_trie(self, unique_no_split_tokens):
1131
+ trie = Trie()
1132
+ for token in unique_no_split_tokens:
1133
+ if (
1134
+ hasattr(self, "do_lower_case")
1135
+ and self.do_lower_case
1136
+ and token not in self.all_special_tokens
1137
+ ):
1138
+ trie.add(token.lower())
1139
+ else:
1140
+ trie.add(token)
1141
+ self.tokens_trie = trie
1142
+
1143
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
1144
+ """
1145
+ Performs any necessary transformations before tokenization.
1146
+
1147
+ This method should pop the arguments from kwargs and return the remaining `kwargs` as well. We test the
1148
+ `kwargs` at the end of the encoding process to be sure all the arguments have been used.
1149
+
1150
+ Args:
1151
+ text (`str`):
1152
+ The text to prepare.
1153
+ is_split_into_words (`bool`, *optional*, defaults to `False`):
1154
+ Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
1155
+ tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
1156
+ which it will tokenize. This is useful for NER or token classification.
1157
+ kwargs:
1158
+ Keyword arguments to use for the tokenization.
1159
+
1160
+ Returns:
1161
+ `Tuple[str, Dict[str, Any]]`: The prepared text and the unused kwargs.
1162
+ """
1163
+
1164
+ return (text, kwargs)
1165
+
1166
+ def tokenize(self, text: TextInput, **kwargs) -> List[str]:
1167
+ """
1168
+ Converts a string in a sequence of tokens, using the tokenizer.
1169
+
1170
+ Split in words for word-based vocabulary or sub-words for sub-word-based vocabularies
1171
+ (BPE/SentencePieces/WordPieces). Takes care of added tokens.
1172
+
1173
+ Args:
1174
+ text (`str`):
1175
+ The sequence to be encoded.
1176
+ **kwargs (additional keyword arguments):
1177
+ Passed along to the model-specific `prepare_for_tokenization` preprocessing method.
1178
+
1179
+ Returns:
1180
+ `List[str]`: The list of tokens.
1181
+ """
1182
+ # Simple mapping string => AddedToken for special tokens with specific tokenization behaviors
1183
+ all_special_tokens_extended = dict(
1184
+ (str(t), t)
1185
+ for t in self.all_special_tokens_extended
1186
+ if isinstance(t, AddedToken)
1187
+ )
1188
+
1189
+ text, kwargs = self.prepare_for_tokenization(text, **kwargs)
1190
+
1191
+ # TODO: should this be in the base class?
1192
+ if hasattr(self, "do_lower_case") and self.do_lower_case:
1193
+ # convert non-special tokens to lowercase
1194
+ escaped_special_toks = [
1195
+ re.escape(s_tok)
1196
+ for s_tok in (self.unique_no_split_tokens + self.all_special_tokens)
1197
+ ]
1198
+ pattern = r"(" + r"|".join(escaped_special_toks) + r")|" + r"(.+?)"
1199
+ text = re.sub(
1200
+ pattern, lambda m: m.groups()[0] or m.groups()[1].lower(), text
1201
+ )
1202
+
1203
+ no_split_token = set(self.unique_no_split_tokens)
1204
+ tokens = self.tokens_trie.split(text)
1205
+
1206
+ # ["This is something", "<special_token_1>", " else"]
1207
+ for i, token in enumerate(tokens):
1208
+ if token in no_split_token:
1209
+ tok_extended = all_special_tokens_extended.get(token, None)
1210
+ left = tokens[i - 1] if i > 0 else None
1211
+ right = tokens[i + 1] if i < len(tokens) - 1 else None
1212
+ if isinstance(tok_extended, AddedToken):
1213
+ if tok_extended.rstrip and right:
1214
+ # A bit counter-intuitive but we strip the left of the string
1215
+ # since tok_extended.rstrip means the special token is eating all white spaces on its right
1216
+ tokens[i + 1] = right.lstrip()
1217
+ # Strip white spaces on the left
1218
+ if tok_extended.lstrip and left:
1219
+ tokens[i - 1] = left.rstrip() # Opposite here
1220
+ else:
1221
+ # We strip left and right by default
1222
+ if right:
1223
+ tokens[i + 1] = right.lstrip()
1224
+ if left:
1225
+ tokens[i - 1] = left.rstrip()
1226
+ # ["This is something", "<special_token_1>", "else"]
1227
+ tokenized_text = []
1228
+ for token in tokens:
1229
+ # Need to skip eventual empty (fully stripped) tokens
1230
+ if not token:
1231
+ continue
1232
+ if token in no_split_token:
1233
+ tokenized_text.append(token)
1234
+ else:
1235
+ tokenized_text.extend(self._tokenize(token))
1236
+ # ["This", " is", " something", "<special_token_1>", "else"]
1237
+ return tokenized_text
1238
+
1239
+ def _tokenize(self, text, **kwargs):
1240
+ """
1241
+ Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
1242
+ vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
1243
+
1244
+ Do NOT take care of added tokens.
1245
+ """
1246
+ raise NotImplementedError
1247
+
1248
+ def convert_tokens_to_ids(self, tokens):
1249
+ if tokens is None:
1250
+ return None
1251
+
1252
+ if isinstance(tokens, str):
1253
+ return self._convert_token_to_id_with_added_voc(tokens)
1254
+
1255
+ ids = []
1256
+ for token in tokens:
1257
+ ids.append(self._convert_token_to_id_with_added_voc(token))
1258
+
1259
+ return ids
1260
+
1261
+ def _convert_token_to_id_with_added_voc(self, token):
1262
+ if token is None:
1263
+ return None
1264
+
1265
+ if token in self.added_tokens_encoder:
1266
+ return self.added_tokens_encoder[token]
1267
+ return self._convert_token_to_id(token)
1268
+
1269
+ def _convert_token_to_id(self, token):
1270
+
1271
+ return self.vocab.to_indices(token)
1272
+
1273
+ def convert_tokens_to_string(self, tokens):
1274
+ """
1275
+ Converts a sequence of tokens (list of string) to a single string by
1276
+ using ``' '.join(tokens)`` .
1277
+
1278
+ Args:
1279
+ tokens (list[str]): A sequence of tokens.
1280
+
1281
+ Returns:
1282
+ str: Converted string.
1283
+ """
1284
+ return " ".join(tokens)
1285
+
1286
+ def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
1287
+ if isinstance(ids, int):
1288
+ if ids in self.added_tokens_decoder:
1289
+ return self.added_tokens_decoder[ids]
1290
+ else:
1291
+ return self._convert_id_to_token(ids)
1292
+ tokens = []
1293
+ for index in ids:
1294
+ index = int(index)
1295
+ if skip_special_tokens and index in self.all_special_ids:
1296
+ continue
1297
+ if index in self.added_tokens_decoder:
1298
+ tokens.append(self.added_tokens_decoder[index])
1299
+ else:
1300
+ tokens.append(self._convert_id_to_token(index))
1301
+ return tokens
1302
+
1303
+ def _convert_id_to_token(self, index):
1304
+
1305
+ return self.vocab.to_tokens(index)
1306
+
1307
+ @staticmethod
1308
+ def load_vocabulary(
1309
+ filepath,
1310
+ unk_token=None,
1311
+ pad_token=None,
1312
+ bos_token=None,
1313
+ eos_token=None,
1314
+ **kwargs,
1315
+ ):
1316
+ """
1317
+ Instantiate an instance of `Vocab` from a file reserving all tokens
1318
+ by using `Vocab.from_dict`. The file contains a token per line, and the
1319
+ line number would be the index of corresponding token.
1320
+
1321
+ Args:
1322
+ filepath (str): path of file to construct vocabulary.
1323
+ unk_token (str): special token for unknown token. If no need, it also
1324
+ could be `None`. Defaults to `None`.
1325
+ pad_token (str): special token for padding token. If no need, it also
1326
+ could be `None`. Defaults to `None`.
1327
+ bos_token (str): special token for bos token. If no need, it also
1328
+ could be `None`. Defaults to `None`.
1329
+ eos_token (str): special token for eos token. If no need, it also
1330
+ could be `None`. Defaults to `None`.
1331
+ **kwargs (dict): keyword arguments for `Vocab.from_dict`.
1332
+
1333
+ Returns:
1334
+ Vocab: An instance of `Vocab`.
1335
+ """
1336
+ token_to_idx = {}
1337
+ with io.open(filepath, "r", encoding="utf-8") as f:
1338
+ for index, line in enumerate(f):
1339
+ token = line.rstrip("\n")
1340
+ token_to_idx[token] = int(index)
1341
+ vocab = Vocab.from_dict(
1342
+ token_to_idx,
1343
+ unk_token=unk_token,
1344
+ pad_token=pad_token,
1345
+ bos_token=bos_token,
1346
+ eos_token=eos_token,
1347
+ **kwargs,
1348
+ )
1349
+ return vocab
1350
+
1351
+ @staticmethod
1352
+ def save_vocabulary(filepath, vocab):
1353
+ """
1354
+ Save all tokens to a vocabulary file. The file contains a token per line,
1355
+ and the line number would be the index of corresponding token.
1356
+
1357
+ Args:
1358
+ filepath (str): File path to be saved to.
1359
+ vocab (Vocab|dict): The `Vocab` or `dict` instance to be saved.
1360
+ """
1361
+ if isinstance(vocab, Vocab):
1362
+ tokens = vocab.idx_to_token
1363
+ else:
1364
+ tokens = sorted(vocab.keys(), key=lambda token: vocab[token])
1365
+ with io.open(filepath, "w", encoding="utf-8") as f:
1366
+ for token in tokens:
1367
+ f.write(token + "\n")
1368
+
1369
+ def get_special_tokens_mask(
1370
+ self, token_ids_0, token_ids_1=None, already_has_special_tokens=False
1371
+ ):
1372
+ """
1373
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
1374
+ special tokens using the tokenizer ``encode`` methods.
1375
+
1376
+ Args:
1377
+ token_ids_0 (List[int]): List of ids of the first sequence.
1378
+ token_ids_1 (List[int], optional): List of ids of the second sequence.
1379
+ already_has_special_tokens (bool, optional): Whether or not the token list is already
1380
+ formatted with special tokens for the model. Defaults to None.
1381
+
1382
+ Returns:
1383
+ results (List[int]): The list of integers in the range [0, 1]:
1384
+ 1 for a special token, 0 for a sequence token.
1385
+ """
1386
+ if already_has_special_tokens:
1387
+ if token_ids_1 is not None:
1388
+ raise ValueError(
1389
+ "You should not supply a second sequence if the provided sequence of "
1390
+ "ids is already formatted with special tokens for the model."
1391
+ )
1392
+
1393
+ return super().get_special_tokens_mask(
1394
+ token_ids_0=token_ids_0,
1395
+ token_ids_1=token_ids_1,
1396
+ already_has_special_tokens=True,
1397
+ )
1398
+ return [0] * ((len(token_ids_1) if token_ids_1 else 0) + len(token_ids_0))
1399
+
1400
+ def num_special_tokens_to_add(self, pair):
1401
+ """
1402
+ Returns the number of added tokens when encoding a sequence with special tokens.
1403
+
1404
+ Args:
1405
+ pair (bool, optional):
1406
+ Whether the number of added tokens should be computed in the case of a sequence pair or a single
1407
+ sequence. Defaults to `False`.
1408
+ Returns:
1409
+ int: Number of special tokens added to sequences.
1410
+ """
1411
+ token_ids_0 = []
1412
+ token_ids_1 = []
1413
+ return len(
1414
+ self.build_inputs_with_special_tokens(
1415
+ token_ids_0, token_ids_1 if pair else None
1416
+ )
1417
+ )
1418
+
1419
+ def _encode_plus(
1420
+ self,
1421
+ text: Union[TextInput, PreTokenizedInput, EncodedInput],
1422
+ text_pair: Optional[Union[TextInput, PreTokenizedInput, EncodedInput]] = None,
1423
+ add_special_tokens: bool = True,
1424
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
1425
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
1426
+ max_length: Optional[int] = None,
1427
+ stride: int = 0,
1428
+ is_split_into_words: bool = False,
1429
+ pad_to_multiple_of: Optional[int] = None,
1430
+ return_tensors: Optional[Union[str, TensorType]] = None,
1431
+ return_position_ids: Optional[bool] = None,
1432
+ return_token_type_ids: Optional[bool] = None,
1433
+ return_attention_mask: Optional[bool] = None,
1434
+ return_overflowing_tokens: bool = False,
1435
+ return_special_tokens_mask: bool = False,
1436
+ return_offsets_mapping: bool = False,
1437
+ return_length: bool = False,
1438
+ verbose: bool = True,
1439
+ **kwargs,
1440
+ ) -> BatchEncoding:
1441
+ def get_input_ids(text):
1442
+ if isinstance(text, str):
1443
+ tokens = self.tokenize(text, **kwargs)
1444
+ return self.convert_tokens_to_ids(tokens)
1445
+ elif (
1446
+ isinstance(text, (list, tuple))
1447
+ and len(text) > 0
1448
+ and isinstance(text[0], str)
1449
+ ):
1450
+ if is_split_into_words:
1451
+ tokens = list(
1452
+ itertools.chain(
1453
+ *(
1454
+ self.tokenize(t, is_split_into_words=True, **kwargs)
1455
+ for t in text
1456
+ )
1457
+ )
1458
+ )
1459
+ return self.convert_tokens_to_ids(tokens)
1460
+ else:
1461
+ return self.convert_tokens_to_ids(text)
1462
+ elif (
1463
+ isinstance(text, (list, tuple))
1464
+ and len(text) > 0
1465
+ and isinstance(text[0], int)
1466
+ ):
1467
+ return text
1468
+ else:
1469
+ if is_split_into_words:
1470
+ raise ValueError(
1471
+ f"Input {text} is not valid. Should be a string or a list/tuple of strings when `is_split_into_words=True`."
1472
+ )
1473
+ else:
1474
+ raise ValueError(
1475
+ f"Input {text} is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
1476
+ )
1477
+
1478
+ first_ids = get_input_ids(text)
1479
+ second_ids = get_input_ids(text_pair) if text_pair is not None else None
1480
+
1481
+ if return_offsets_mapping:
1482
+ kwargs["text"] = text
1483
+ kwargs["text_pair"] = text_pair
1484
+
1485
+ return self.prepare_for_model(
1486
+ first_ids,
1487
+ pair_ids=second_ids,
1488
+ add_special_tokens=add_special_tokens,
1489
+ padding=padding_strategy.value,
1490
+ truncation=truncation_strategy.value,
1491
+ max_length=max_length,
1492
+ stride=stride,
1493
+ pad_to_multiple_of=pad_to_multiple_of,
1494
+ return_tensors=return_tensors,
1495
+ prepend_batch_axis=True,
1496
+ return_position_ids=return_position_ids,
1497
+ return_attention_mask=return_attention_mask,
1498
+ return_token_type_ids=return_token_type_ids,
1499
+ return_overflowing_tokens=return_overflowing_tokens,
1500
+ return_special_tokens_mask=return_special_tokens_mask,
1501
+ return_offsets_mapping=return_offsets_mapping,
1502
+ return_length=return_length,
1503
+ verbose=verbose,
1504
+ **kwargs,
1505
+ )
1506
+
1507
+ def _batch_encode_plus(
1508
+ self,
1509
+ batch_text_or_text_pairs: Union[
1510
+ List[TextInput],
1511
+ List[TextInputPair],
1512
+ List[PreTokenizedInput],
1513
+ List[PreTokenizedInputPair],
1514
+ List[EncodedInput],
1515
+ List[EncodedInputPair],
1516
+ ],
1517
+ add_special_tokens: bool = True,
1518
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
1519
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
1520
+ max_length: Optional[int] = None,
1521
+ stride: int = 0,
1522
+ is_split_into_words: bool = False,
1523
+ pad_to_multiple_of: Optional[int] = None,
1524
+ return_position_ids: Optional[bool] = None,
1525
+ return_tensors: Optional[Union[str, TensorType]] = None,
1526
+ return_token_type_ids: Optional[bool] = None,
1527
+ return_attention_mask: Optional[bool] = None,
1528
+ return_overflowing_tokens: bool = False,
1529
+ return_special_tokens_mask: bool = False,
1530
+ return_dict: bool = True,
1531
+ return_offsets_mapping: bool = False,
1532
+ return_length: bool = False,
1533
+ verbose: bool = True,
1534
+ **kwargs,
1535
+ ) -> BatchEncoding:
1536
+ def get_input_ids(text):
1537
+ if isinstance(text, str):
1538
+ tokens = self.tokenize(text, **kwargs)
1539
+ return self.convert_tokens_to_ids(tokens)
1540
+ elif (
1541
+ isinstance(text, (list, tuple))
1542
+ and len(text) > 0
1543
+ and isinstance(text[0], str)
1544
+ ):
1545
+ if is_split_into_words:
1546
+ tokens = list(
1547
+ itertools.chain(
1548
+ *(
1549
+ self.tokenize(t, is_split_into_words=True, **kwargs)
1550
+ for t in text
1551
+ )
1552
+ )
1553
+ )
1554
+ return self.convert_tokens_to_ids(tokens)
1555
+ else:
1556
+ return self.convert_tokens_to_ids(text)
1557
+ elif (
1558
+ isinstance(text, (list, tuple))
1559
+ and len(text) > 0
1560
+ and isinstance(text[0], int)
1561
+ ):
1562
+ return text
1563
+ else:
1564
+ raise ValueError(
1565
+ "Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
1566
+ )
1567
+
1568
+ input_ids = []
1569
+ for ids_or_pair_ids in batch_text_or_text_pairs:
1570
+ if not isinstance(ids_or_pair_ids, (list, tuple)):
1571
+ ids, pair_ids = ids_or_pair_ids, None
1572
+ elif is_split_into_words and not isinstance(
1573
+ ids_or_pair_ids[0], (list, tuple)
1574
+ ):
1575
+ ids, pair_ids = ids_or_pair_ids, None
1576
+ else:
1577
+ ids, pair_ids = ids_or_pair_ids
1578
+
1579
+ first_ids = get_input_ids(ids)
1580
+ second_ids = get_input_ids(pair_ids) if pair_ids is not None else None
1581
+ input_ids.append((first_ids, second_ids))
1582
+
1583
+ if stride > 0 and second_ids is not None:
1584
+ kwargs["batch_text_or_text_pairs"] = batch_text_or_text_pairs
1585
+ else:
1586
+ if return_offsets_mapping:
1587
+ has_pair = False
1588
+ if len(batch_text_or_text_pairs) > 0:
1589
+ if isinstance(batch_text_or_text_pairs[0], (list, tuple)):
1590
+ has_pair = True
1591
+ kwargs["texts"] = None
1592
+ kwargs["text_pairs"] = None
1593
+ if has_pair:
1594
+ kwargs["texts"] = [text[0] for text in batch_text_or_text_pairs]
1595
+ kwargs["text_pairs"] = [
1596
+ text[1] for text in batch_text_or_text_pairs
1597
+ ]
1598
+ else:
1599
+ kwargs["texts"] = [text for text in batch_text_or_text_pairs]
1600
+
1601
+ batch_outputs = self._batch_prepare_for_model(
1602
+ input_ids,
1603
+ add_special_tokens=add_special_tokens,
1604
+ padding_strategy=padding_strategy,
1605
+ truncation_strategy=truncation_strategy,
1606
+ max_length=max_length,
1607
+ stride=stride,
1608
+ pad_to_multiple_of=pad_to_multiple_of,
1609
+ return_position_ids=return_position_ids,
1610
+ return_attention_mask=return_attention_mask,
1611
+ return_token_type_ids=return_token_type_ids,
1612
+ return_overflowing_tokens=return_overflowing_tokens,
1613
+ return_special_tokens_mask=return_special_tokens_mask,
1614
+ return_dict=return_dict,
1615
+ return_offsets_mapping=return_offsets_mapping,
1616
+ return_length=return_length,
1617
+ return_tensors=return_tensors,
1618
+ verbose=verbose,
1619
+ **kwargs,
1620
+ )
1621
+
1622
+ return batch_outputs
1623
+
1624
+ def _batch_prepare_for_model(
1625
+ self,
1626
+ batch_ids_pairs: List[Union[PreTokenizedInputPair, Tuple[List[int], None]]],
1627
+ add_special_tokens: bool = True,
1628
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
1629
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
1630
+ max_length: Optional[int] = None,
1631
+ stride: int = 0,
1632
+ pad_to_multiple_of: Optional[int] = None,
1633
+ return_position_ids: Optional[bool] = None,
1634
+ return_tensors: Optional[str] = None,
1635
+ return_token_type_ids: Optional[bool] = None,
1636
+ return_attention_mask: Optional[bool] = None,
1637
+ return_overflowing_tokens: bool = False,
1638
+ return_special_tokens_mask: bool = False,
1639
+ return_dict: bool = True,
1640
+ return_offsets_mapping: bool = False,
1641
+ return_length: bool = False,
1642
+ verbose: bool = True,
1643
+ **kwargs,
1644
+ ) -> BatchEncoding:
1645
+ """
1646
+ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
1647
+ adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
1648
+ manages a moving window (with user defined stride) for overflowing tokens
1649
+
1650
+ Args:
1651
+ batch_ids_pairs: list of tokenized input ids or input ids pairs
1652
+ """
1653
+ if return_token_type_ids and not add_special_tokens:
1654
+ raise ValueError(
1655
+ "Asking to return token_type_ids while setting add_special_tokens to False "
1656
+ "results in an undefined behavior. Please set add_special_tokens to True or "
1657
+ "set return_token_type_ids to None."
1658
+ )
1659
+
1660
+ batch_outputs = {}
1661
+ batch_outputs_list = []
1662
+ for example_id, (first_ids, second_ids) in enumerate(batch_ids_pairs):
1663
+ if stride > 0 and second_ids is not None:
1664
+ if return_token_type_ids is None:
1665
+ return_token_type_ids = "token_type_ids" in self.model_input_names
1666
+ if return_attention_mask is None:
1667
+ return_attention_mask = "attention_mask" in self.model_input_names
1668
+
1669
+ max_len_for_pair = (
1670
+ max_length
1671
+ - len(first_ids)
1672
+ - (
1673
+ self.num_special_tokens_to_add(pair=True)
1674
+ if add_special_tokens
1675
+ else 0
1676
+ )
1677
+ )
1678
+
1679
+ text, text_pair = kwargs["batch_text_or_text_pairs"][example_id]
1680
+ token_offset_mapping = self.get_offset_mapping(text)
1681
+ token_pair_offset_mapping = self.get_offset_mapping(text_pair)
1682
+
1683
+ offset = 0
1684
+ while offset < len(second_ids):
1685
+ encoded_inputs = {}
1686
+ length = len(second_ids) - offset
1687
+ if length > max_len_for_pair:
1688
+ length = max_len_for_pair
1689
+
1690
+ ids = first_ids
1691
+ pair_ids = second_ids[offset : offset + length]
1692
+ pair = bool(pair_ids is not None)
1693
+ mapping = token_offset_mapping
1694
+ pair_mapping = token_pair_offset_mapping[offset : offset + length]
1695
+ if add_special_tokens:
1696
+ offset_mapping = self.build_offset_mapping_with_special_tokens(
1697
+ mapping, pair_mapping
1698
+ )
1699
+ sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
1700
+ token_type_ids = self.create_token_type_ids_from_sequences(
1701
+ ids, pair_ids
1702
+ )
1703
+ else:
1704
+ offset_mapping = mapping + pair_mapping
1705
+ sequence = ids + pair_ids if pair else ids
1706
+ token_type_ids = [0] * len(ids) + (
1707
+ [0] * len(pair_ids) if pair else []
1708
+ )
1709
+ encoded_inputs["offset_mapping"] = offset_mapping
1710
+ # Build output dictionnary
1711
+ encoded_inputs["input_ids"] = sequence
1712
+ if return_token_type_ids:
1713
+ encoded_inputs["token_type_ids"] = token_type_ids
1714
+ if return_special_tokens_mask:
1715
+ if add_special_tokens:
1716
+ encoded_inputs["special_tokens_mask"] = (
1717
+ self.get_special_tokens_mask(ids, pair_ids)
1718
+ )
1719
+ else:
1720
+ encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
1721
+
1722
+ # Check lengths
1723
+ self._eventual_warn_about_too_long_sequence(
1724
+ encoded_inputs["input_ids"], max_length, verbose
1725
+ )
1726
+ if return_position_ids:
1727
+ encoded_inputs["position_ids"] = list(
1728
+ range(len(encoded_inputs["input_ids"]))
1729
+ )
1730
+
1731
+ if return_length:
1732
+ encoded_inputs["length"] = len(encoded_inputs["input_ids"])
1733
+ encoded_inputs["seq_len"] = encoded_inputs["length"]
1734
+
1735
+ encoded_inputs["overflow_to_sample"] = example_id
1736
+
1737
+ for key, value in encoded_inputs.items():
1738
+ if key not in batch_outputs:
1739
+ batch_outputs[key] = []
1740
+ batch_outputs[key].append(value)
1741
+
1742
+ if offset + length == len(second_ids):
1743
+ break
1744
+ offset += min(length, stride)
1745
+ else:
1746
+ if return_offsets_mapping:
1747
+ kwargs["text"] = kwargs["texts"][example_id]
1748
+ kwargs["text_pair"] = None
1749
+ if kwargs["text_pairs"] is not None:
1750
+ kwargs["text_pair"] = kwargs["text_pairs"][example_id]
1751
+
1752
+ encoded_inputs = self.prepare_for_model(
1753
+ first_ids,
1754
+ second_ids,
1755
+ add_special_tokens=add_special_tokens,
1756
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
1757
+ truncation=truncation_strategy.value,
1758
+ max_length=max_length,
1759
+ stride=stride,
1760
+ pad_to_multiple_of=None, # we pad in batch afterward
1761
+ return_position_ids=return_position_ids, # we pad in batch afterward
1762
+ return_attention_mask=False, # we pad in batch afterward
1763
+ return_token_type_ids=return_token_type_ids,
1764
+ return_overflowing_tokens=return_overflowing_tokens,
1765
+ return_special_tokens_mask=return_special_tokens_mask,
1766
+ return_offsets_mapping=return_offsets_mapping,
1767
+ return_length=return_length,
1768
+ return_tensors=None, # We convert the whole batch to tensors at the end
1769
+ prepend_batch_axis=False,
1770
+ verbose=verbose,
1771
+ **kwargs,
1772
+ )
1773
+ for key, value in encoded_inputs.items():
1774
+ if key not in batch_outputs:
1775
+ batch_outputs[key] = []
1776
+ batch_outputs[key].append(value)
1777
+
1778
+ batch_outputs = self.pad(
1779
+ batch_outputs,
1780
+ padding=padding_strategy.value,
1781
+ max_length=max_length,
1782
+ pad_to_multiple_of=pad_to_multiple_of,
1783
+ return_attention_mask=return_attention_mask,
1784
+ )
1785
+ if return_dict:
1786
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
1787
+ return batch_outputs
1788
+ else:
1789
+ for k, v in batch_outputs.items():
1790
+ for i in range(len(v)):
1791
+ if i >= len(batch_outputs_list):
1792
+ batch_outputs_list.append({k: v[i]})
1793
+ else:
1794
+ batch_outputs_list[i][k] = v[i]
1795
+ return batch_outputs_list
1796
+
1797
+ def _get_bert_like_offset_mapping(self, text: str):
1798
+ """
1799
+ Returns the map of tokens and the start and end index of their start and end character.
1800
+ Modified from https://github.com/bojone/bert4keras/blob/master/bert4keras/tokenizers.py#L372
1801
+ Args:
1802
+ text (str):
1803
+ Input text.
1804
+ Returns:
1805
+ list: The offset map of input text.
1806
+
1807
+ """
1808
+ if text is None:
1809
+ return None
1810
+ split_tokens = self.tokenize(text)
1811
+
1812
+ normalized_text, char_mapping = "", []
1813
+
1814
+ for i, ch in enumerate(text):
1815
+ if hasattr(self, "do_lower_case") and self.do_lower_case:
1816
+ ch = ch.lower()
1817
+ if self.basic_tokenizer.strip_accents is not False:
1818
+ ch = unicodedata.normalize("NFD", ch)
1819
+ ch = "".join([c for c in ch if unicodedata.category(c) != "Mn"])
1820
+ elif self.basic_tokenizer.strip_accents:
1821
+ ch = unicodedata.normalize("NFD", ch)
1822
+ ch = "".join([c for c in ch if unicodedata.category(c) != "Mn"])
1823
+
1824
+ ch = "".join(
1825
+ [
1826
+ c
1827
+ for c in ch
1828
+ if not (ord(c) == 0 or ord(c) == 0xFFFD or _is_control(c))
1829
+ ]
1830
+ )
1831
+ normalized_text += ch
1832
+
1833
+ char_mapping.extend([i] * len(ch))
1834
+ text, token_mapping, offset = normalized_text, [], 0
1835
+
1836
+ char_mapping_indexes = []
1837
+ for index, token in enumerate(split_tokens):
1838
+ if token[:2] == "##":
1839
+ token = token[2:]
1840
+ if token in self.all_special_tokens:
1841
+ token = (
1842
+ token.lower()
1843
+ if hasattr(self, "do_lower_case") and self.do_lower_case
1844
+ else token
1845
+ )
1846
+ # The greek letter "sigma" has 2 forms of lowercase, σ and ς respectively.
1847
+ # When used as a final letter of a word, the final form (ς) is used. Otherwise, the form (σ) is used.
1848
+ # https://latin.stackexchange.com/questions/6168/how-and-when-did-we-get-two-forms-of-sigma
1849
+ if "σ" in token or "ς" in token:
1850
+ start = (
1851
+ text[offset:].replace("ς", "σ").index(token.replace("ς", "σ"))
1852
+ + offset
1853
+ )
1854
+ else:
1855
+
1856
+ # try to fix: https://github.com/PaddlePaddle/PaddleNLP/issues/3985
1857
+ if token not in text[offset:]:
1858
+ # check whether there are consecutive UNK tokens, eg: ['好', '[UNK]', '[UNK]', 'good']
1859
+ if (
1860
+ index < len(split_tokens) - 1
1861
+ and split_tokens[index + 1] in self.all_special_tokens
1862
+ ):
1863
+ start = offset
1864
+ token = " " # only contains one char
1865
+ else:
1866
+ start = -1
1867
+ else:
1868
+ start = text[offset:].index(token) + offset
1869
+
1870
+ end = start + len(token)
1871
+ char_mapping_indexes.append([start, end])
1872
+
1873
+ if start != -1:
1874
+ offset = end
1875
+
1876
+ token_mapping = []
1877
+ for index, (start, end) in enumerate(char_mapping_indexes):
1878
+ if start == -1:
1879
+ # init start
1880
+ if index == 0:
1881
+ start = 0
1882
+ else:
1883
+ start = char_mapping_indexes[index - 1][1]
1884
+
1885
+ # init end
1886
+ if index == len(char_mapping_indexes) - 1:
1887
+ end = len(char_mapping)
1888
+ else:
1889
+ # next start
1890
+ end = char_mapping_indexes[index + 1][0]
1891
+
1892
+ token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
1893
+
1894
+ return token_mapping
1895
+
1896
+ def get_offset_mapping(self, text: str, split_tokens: Optional[List[str]] = None):
1897
+ """
1898
+ Returns the map of tokens and the start and end index of their start and end character.
1899
+ Modified from https://github.com/bojone/bert4keras/blob/master/bert4keras/tokenizers.py#L372
1900
+ Args:
1901
+ text (str):
1902
+ Input text.
1903
+ split_tokens (Optional[List[str]]):
1904
+ the tokens which has been split which can accelerate the operation.
1905
+
1906
+ Returns:
1907
+ list: The offset map of input text.
1908
+
1909
+ """
1910
+ if text is None:
1911
+ return None
1912
+ split_tokens = self.tokenize(text)
1913
+
1914
+ # bert-like tokenizer use the old-school code block
1915
+ if hasattr(self, "basic_tokenizer") or hasattr(self, "wordpiece_tokenizer"):
1916
+ return self._get_bert_like_offset_mapping(text)
1917
+
1918
+ if not split_tokens:
1919
+ split_tokens = self.tokenize(text)
1920
+
1921
+ normalized_text, char_mapping = "", []
1922
+
1923
+ for i, ch in enumerate(text):
1924
+ normalized_text += normalize_chars(ch)
1925
+ char_mapping.extend([i] * len(ch))
1926
+
1927
+ text, token_mapping, offset = normalized_text, [], 0
1928
+ do_lower_case = getattr(self, "do_lower_case", False)
1929
+
1930
+ # lower the text if the token is lower-cased
1931
+ # keep align with token
1932
+ if do_lower_case:
1933
+ text = text.lower()
1934
+
1935
+ char_mapping_indexes = []
1936
+ for token in split_tokens:
1937
+
1938
+ # convert tokens into original string
1939
+ token: str = self.convert_tokens_to_string(token).strip()
1940
+
1941
+ if token in self.all_special_tokens:
1942
+ if do_lower_case:
1943
+ token = token.lower()
1944
+
1945
+ # The greek letter "sigma" has 2 forms of lowercase, σ and ς respectively.
1946
+ # When used as a final letter of a word, the final form (ς) is used. Otherwise, the form (σ) is used.
1947
+ # https://latin.stackexchange.com/questions/6168/how-and-when-did-we-get-two-forms-of-sigma
1948
+ if "σ" in token or "ς" in token:
1949
+ start = (
1950
+ text[offset:].replace("ς", "σ").index(token.replace("ς", "σ"))
1951
+ + offset
1952
+ )
1953
+ else:
1954
+
1955
+ # try to fix: https://github.com/PaddlePaddle/PaddleNLP/issues/3985
1956
+ if token not in text[offset:]:
1957
+ start = -1
1958
+ else:
1959
+ start = text[offset:].index(token) + offset
1960
+
1961
+ end = start + len(token)
1962
+ char_mapping_indexes.append([start, end])
1963
+
1964
+ if start != -1:
1965
+ offset = end
1966
+
1967
+ token_mapping = []
1968
+ for index, (start, end) in enumerate(char_mapping_indexes):
1969
+ if start == -1:
1970
+ # init start
1971
+ if index == 0:
1972
+ start = 0
1973
+ else:
1974
+ start = char_mapping_indexes[index - 1][1]
1975
+
1976
+ # init end
1977
+ if index == len(char_mapping_indexes) - 1:
1978
+ end = len(char_mapping)
1979
+ else:
1980
+ # next start
1981
+ end = char_mapping_indexes[index + 1][0]
1982
+
1983
+ token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
1984
+
1985
+ return token_mapping
1986
+
1987
+ def _decode(
1988
+ self,
1989
+ token_ids: List[int],
1990
+ skip_special_tokens: bool = False,
1991
+ clean_up_tokenization_spaces: bool = True,
1992
+ spaces_between_special_tokens: bool = True,
1993
+ **kwargs,
1994
+ ) -> str:
1995
+ if isinstance(token_ids, np.ndarray):
1996
+ token_ids = token_ids.tolist()
1997
+ self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False)
1998
+ filtered_tokens = self.convert_ids_to_tokens(
1999
+ token_ids, skip_special_tokens=skip_special_tokens
2000
+ )
2001
+
2002
+ # To avoid mixing byte-level and unicode for byte-level BPT
2003
+ # we need to build string separately for added tokens and byte-level tokens
2004
+ # cf. https://github.com/huggingface/transformers/issues/1133
2005
+ sub_texts = []
2006
+ current_sub_text = []
2007
+ for token in filtered_tokens:
2008
+ if skip_special_tokens and token in self.all_special_ids:
2009
+ continue
2010
+ if token in self.added_tokens_encoder:
2011
+ if current_sub_text:
2012
+ sub_texts.append(self.convert_tokens_to_string(current_sub_text))
2013
+ current_sub_text = []
2014
+ sub_texts.append(token)
2015
+ else:
2016
+ current_sub_text.append(token)
2017
+ if current_sub_text:
2018
+ sub_texts.append(self.convert_tokens_to_string(current_sub_text))
2019
+
2020
+ if spaces_between_special_tokens:
2021
+ text = " ".join(sub_texts)
2022
+ else:
2023
+ text = "".join(sub_texts)
2024
+
2025
+ if clean_up_tokenization_spaces:
2026
+ clean_text = self.clean_up_tokenization(text)
2027
+ return clean_text
2028
+ else:
2029
+ return text
2030
+
2031
+ def decode_token(
2032
+ self,
2033
+ all_input_ids: List[int],
2034
+ prefix_offset: int = 0,
2035
+ read_offset: int = 0,
2036
+ ) -> Tuple[str, int, int]:
2037
+ """tokenizer decoding for the streaming generation use case. This method can be overrided for tokenizer that doesn't follow this API"""
2038
+ # The prefix text is necessary only to defeat cleanup algorithms in the decode
2039
+ # which decide to add a space or not depending on the surrounding ids.
2040
+ prefix_text = self.decode(
2041
+ all_input_ids[prefix_offset:read_offset], skip_special_tokens=False
2042
+ )
2043
+ new_text = self.decode(all_input_ids[prefix_offset:], skip_special_tokens=False)
2044
+
2045
+ if len(new_text) > len(prefix_text) and not new_text.endswith("�"):
2046
+ # utf-8 char at the end means it's a potential unfinished byte sequence
2047
+ # from byte fallback tokenization.
2048
+ # If it's in the middle, it's probably a real invalid id generated
2049
+ # by the model
2050
+ prefix_index = new_text.index(prefix_text)
2051
+ new_text = new_text[prefix_index + len(prefix_text) :]
2052
+ return new_text, read_offset, len(all_input_ids)
2053
+ else:
2054
+ return "", prefix_offset, read_offset
2055
+
2056
+
2057
+ def _is_control(char):
2058
+ """Checks whether `chars` is a control character."""
2059
+ # These are technically control characters but we count them as whitespace
2060
+ # characters.
2061
+ if char == "\t" or char == "\n" or char == "\r":
2062
+ return False
2063
+ cat = unicodedata.category(char)
2064
+ if cat.startswith("C"):
2065
+ return True
2066
+ return False
2067
+
2068
+
2069
+ def _is_punctuation(char):
2070
+ """Checks whether `chars` is a punctuation character."""
2071
+ cp = ord(char)
2072
+ # We treat all non-letter/number ASCII as punctuation.
2073
+ # Characters such as "^", "$", and "`" are not in the Unicode
2074
+ # Punctuation class but we treat them as punctuation anyways, for
2075
+ # consistency.
2076
+ if (
2077
+ (cp >= 33 and cp <= 47)
2078
+ or (cp >= 58 and cp <= 64)
2079
+ or (cp >= 91 and cp <= 96)
2080
+ or (cp >= 123 and cp <= 126)
2081
+ ):
2082
+ return True
2083
+ cat = unicodedata.category(char)
2084
+ if cat.startswith("P"):
2085
+ return True
2086
+ return False
2087
+
2088
+
2089
+ def _is_symbol(char):
2090
+ """Check whether CP is the codepoint of a Symbol character."""
2091
+ cp = ord(char)
2092
+ if unicodedata.category(char).startswith("S") or (
2093
+ cp in [0x00AD, 0x00B2, 0x00BA, 0x3007, 0x00B5, 0x00D8, 0x014B, 0x01B1]
2094
+ ):
2095
+ return True
2096
+ return False
2097
+
2098
+
2099
+ def _is_whitespace(char):
2100
+ """
2101
+ Checks whether `chars` is a whitespace character.
2102
+ """
2103
+ # \t, \n, and \r are technically contorl characters but we treat them
2104
+ # as whitespace since they are generally considered as such.
2105
+ if char == " " or char == "\t" or char == "\n" or char == "\r":
2106
+ return True
2107
+ cat = unicodedata.category(char)
2108
+ if cat == "Zs":
2109
+ return True
2110
+ return False
2111
+
2112
+
2113
+ def convert_to_unicode(text):
2114
+ """
2115
+ Converts `text` to Unicode (if it's not already), assuming utf-8 input.
2116
+ Args:
2117
+ text (str|bytes): Text to be converted to unicode.
2118
+ Returns:
2119
+ str: converted text.
2120
+ """
2121
+ if isinstance(text, str):
2122
+ return text
2123
+ elif isinstance(text, bytes):
2124
+ return text.decode("utf-8", "ignore")
2125
+ else:
2126
+ raise ValueError("Unsupported string type: %s" % (type(text)))
2127
+
2128
+
2129
+ def whitespace_tokenize(text):
2130
+ """
2131
+ Runs basic whitespace cleaning and splitting on a peice of text.
2132
+ Args:
2133
+ text (str): Text to be tokenized.
2134
+ Returns:
2135
+ list(str): Token list.
2136
+ """
2137
+ text = text.strip()
2138
+ if not text:
2139
+ return []
2140
+ tokens = text.split()
2141
+ return tokens