python-doctr 0.12.0__tar.gz → 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (221) hide show
  1. {python_doctr-0.12.0/python_doctr.egg-info → python_doctr-1.0.0}/PKG-INFO +18 -75
  2. {python_doctr-0.12.0 → python_doctr-1.0.0}/README.md +13 -60
  3. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/__init__.py +0 -1
  4. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/__init__.py +0 -5
  5. python_doctr-1.0.0/doctr/datasets/datasets/__init__.py +1 -0
  6. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/datasets/pytorch.py +2 -2
  7. python_doctr-1.0.0/doctr/datasets/generator/__init__.py +1 -0
  8. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/vocabs.py +0 -2
  9. python_doctr-1.0.0/doctr/file_utils.py +30 -0
  10. python_doctr-1.0.0/doctr/io/image/__init__.py +2 -0
  11. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/io/image/pytorch.py +1 -1
  12. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/_utils.py +3 -3
  13. python_doctr-1.0.0/doctr/models/classification/magc_resnet/__init__.py +1 -0
  14. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/classification/magc_resnet/pytorch.py +2 -2
  15. python_doctr-1.0.0/doctr/models/classification/mobilenet/__init__.py +1 -0
  16. python_doctr-1.0.0/doctr/models/classification/predictor/__init__.py +1 -0
  17. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/classification/predictor/pytorch.py +1 -1
  18. python_doctr-1.0.0/doctr/models/classification/resnet/__init__.py +1 -0
  19. python_doctr-1.0.0/doctr/models/classification/textnet/__init__.py +1 -0
  20. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/classification/textnet/pytorch.py +1 -1
  21. python_doctr-1.0.0/doctr/models/classification/vgg/__init__.py +1 -0
  22. python_doctr-1.0.0/doctr/models/classification/vip/__init__.py +1 -0
  23. python_doctr-1.0.0/doctr/models/classification/vip/layers/__init__.py +1 -0
  24. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/classification/vip/layers/pytorch.py +1 -1
  25. python_doctr-1.0.0/doctr/models/classification/vit/__init__.py +1 -0
  26. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/classification/vit/pytorch.py +2 -2
  27. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/classification/zoo.py +6 -11
  28. python_doctr-1.0.0/doctr/models/detection/_utils/__init__.py +2 -0
  29. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/detection/core.py +1 -1
  30. python_doctr-1.0.0/doctr/models/detection/differentiable_binarization/__init__.py +1 -0
  31. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/detection/differentiable_binarization/base.py +4 -12
  32. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/detection/differentiable_binarization/pytorch.py +3 -3
  33. python_doctr-1.0.0/doctr/models/detection/fast/__init__.py +1 -0
  34. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/detection/fast/base.py +4 -14
  35. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/detection/fast/pytorch.py +4 -4
  36. python_doctr-1.0.0/doctr/models/detection/linknet/__init__.py +1 -0
  37. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/detection/linknet/base.py +3 -12
  38. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/detection/linknet/pytorch.py +2 -2
  39. python_doctr-1.0.0/doctr/models/detection/predictor/__init__.py +1 -0
  40. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/detection/predictor/pytorch.py +1 -1
  41. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/detection/zoo.py +15 -32
  42. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/factory/hub.py +8 -21
  43. python_doctr-1.0.0/doctr/models/kie_predictor/__init__.py +1 -0
  44. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/kie_predictor/pytorch.py +2 -6
  45. python_doctr-1.0.0/doctr/models/modules/layers/__init__.py +1 -0
  46. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/modules/layers/pytorch.py +3 -3
  47. python_doctr-1.0.0/doctr/models/modules/transformer/__init__.py +1 -0
  48. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/modules/transformer/pytorch.py +2 -2
  49. python_doctr-1.0.0/doctr/models/modules/vision_transformer/__init__.py +1 -0
  50. python_doctr-1.0.0/doctr/models/predictor/__init__.py +1 -0
  51. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/predictor/base.py +3 -8
  52. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/predictor/pytorch.py +2 -5
  53. python_doctr-1.0.0/doctr/models/preprocessor/__init__.py +1 -0
  54. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/preprocessor/pytorch.py +27 -32
  55. python_doctr-1.0.0/doctr/models/recognition/crnn/__init__.py +1 -0
  56. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/recognition/crnn/pytorch.py +6 -6
  57. python_doctr-1.0.0/doctr/models/recognition/master/__init__.py +1 -0
  58. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/recognition/master/pytorch.py +5 -5
  59. python_doctr-1.0.0/doctr/models/recognition/parseq/__init__.py +1 -0
  60. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/recognition/parseq/pytorch.py +5 -5
  61. python_doctr-1.0.0/doctr/models/recognition/predictor/__init__.py +1 -0
  62. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/recognition/predictor/_utils.py +7 -16
  63. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/recognition/predictor/pytorch.py +1 -2
  64. python_doctr-1.0.0/doctr/models/recognition/sar/__init__.py +1 -0
  65. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/recognition/sar/pytorch.py +3 -3
  66. python_doctr-1.0.0/doctr/models/recognition/viptr/__init__.py +1 -0
  67. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/recognition/viptr/pytorch.py +3 -3
  68. python_doctr-1.0.0/doctr/models/recognition/vitstr/__init__.py +1 -0
  69. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/recognition/vitstr/pytorch.py +3 -3
  70. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/recognition/zoo.py +13 -13
  71. python_doctr-1.0.0/doctr/models/utils/__init__.py +1 -0
  72. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/utils/pytorch.py +1 -1
  73. python_doctr-1.0.0/doctr/transforms/functional/__init__.py +1 -0
  74. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/transforms/functional/pytorch.py +4 -4
  75. python_doctr-1.0.0/doctr/transforms/modules/__init__.py +2 -0
  76. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/transforms/modules/base.py +26 -92
  77. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/transforms/modules/pytorch.py +28 -26
  78. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/utils/geometry.py +6 -10
  79. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/utils/visualization.py +1 -1
  80. python_doctr-1.0.0/doctr/version.py +1 -0
  81. {python_doctr-0.12.0 → python_doctr-1.0.0}/pyproject.toml +5 -23
  82. {python_doctr-0.12.0 → python_doctr-1.0.0/python_doctr.egg-info}/PKG-INFO +18 -75
  83. {python_doctr-0.12.0 → python_doctr-1.0.0}/python_doctr.egg-info/SOURCES.txt +0 -31
  84. {python_doctr-0.12.0 → python_doctr-1.0.0}/python_doctr.egg-info/requires.txt +3 -23
  85. {python_doctr-0.12.0 → python_doctr-1.0.0}/setup.py +1 -1
  86. python_doctr-0.12.0/doctr/datasets/datasets/__init__.py +0 -6
  87. python_doctr-0.12.0/doctr/datasets/datasets/tensorflow.py +0 -59
  88. python_doctr-0.12.0/doctr/datasets/generator/__init__.py +0 -6
  89. python_doctr-0.12.0/doctr/datasets/generator/tensorflow.py +0 -58
  90. python_doctr-0.12.0/doctr/datasets/loader.py +0 -94
  91. python_doctr-0.12.0/doctr/file_utils.py +0 -129
  92. python_doctr-0.12.0/doctr/io/image/__init__.py +0 -8
  93. python_doctr-0.12.0/doctr/io/image/tensorflow.py +0 -101
  94. python_doctr-0.12.0/doctr/models/classification/magc_resnet/__init__.py +0 -6
  95. python_doctr-0.12.0/doctr/models/classification/magc_resnet/tensorflow.py +0 -196
  96. python_doctr-0.12.0/doctr/models/classification/mobilenet/__init__.py +0 -6
  97. python_doctr-0.12.0/doctr/models/classification/mobilenet/tensorflow.py +0 -442
  98. python_doctr-0.12.0/doctr/models/classification/predictor/__init__.py +0 -6
  99. python_doctr-0.12.0/doctr/models/classification/predictor/tensorflow.py +0 -60
  100. python_doctr-0.12.0/doctr/models/classification/resnet/__init__.py +0 -6
  101. python_doctr-0.12.0/doctr/models/classification/resnet/tensorflow.py +0 -418
  102. python_doctr-0.12.0/doctr/models/classification/textnet/__init__.py +0 -6
  103. python_doctr-0.12.0/doctr/models/classification/textnet/tensorflow.py +0 -275
  104. python_doctr-0.12.0/doctr/models/classification/vgg/__init__.py +0 -6
  105. python_doctr-0.12.0/doctr/models/classification/vgg/tensorflow.py +0 -125
  106. python_doctr-0.12.0/doctr/models/classification/vip/__init__.py +0 -4
  107. python_doctr-0.12.0/doctr/models/classification/vip/layers/__init__.py +0 -4
  108. python_doctr-0.12.0/doctr/models/classification/vit/__init__.py +0 -6
  109. python_doctr-0.12.0/doctr/models/classification/vit/tensorflow.py +0 -201
  110. python_doctr-0.12.0/doctr/models/detection/_utils/__init__.py +0 -7
  111. python_doctr-0.12.0/doctr/models/detection/_utils/tensorflow.py +0 -34
  112. python_doctr-0.12.0/doctr/models/detection/differentiable_binarization/__init__.py +0 -6
  113. python_doctr-0.12.0/doctr/models/detection/differentiable_binarization/tensorflow.py +0 -421
  114. python_doctr-0.12.0/doctr/models/detection/fast/__init__.py +0 -6
  115. python_doctr-0.12.0/doctr/models/detection/fast/tensorflow.py +0 -427
  116. python_doctr-0.12.0/doctr/models/detection/linknet/__init__.py +0 -6
  117. python_doctr-0.12.0/doctr/models/detection/linknet/tensorflow.py +0 -377
  118. python_doctr-0.12.0/doctr/models/detection/predictor/__init__.py +0 -6
  119. python_doctr-0.12.0/doctr/models/detection/predictor/tensorflow.py +0 -70
  120. python_doctr-0.12.0/doctr/models/kie_predictor/__init__.py +0 -6
  121. python_doctr-0.12.0/doctr/models/kie_predictor/tensorflow.py +0 -187
  122. python_doctr-0.12.0/doctr/models/modules/layers/__init__.py +0 -6
  123. python_doctr-0.12.0/doctr/models/modules/layers/tensorflow.py +0 -171
  124. python_doctr-0.12.0/doctr/models/modules/transformer/__init__.py +0 -6
  125. python_doctr-0.12.0/doctr/models/modules/transformer/tensorflow.py +0 -235
  126. python_doctr-0.12.0/doctr/models/modules/vision_transformer/__init__.py +0 -6
  127. python_doctr-0.12.0/doctr/models/modules/vision_transformer/tensorflow.py +0 -100
  128. python_doctr-0.12.0/doctr/models/predictor/__init__.py +0 -6
  129. python_doctr-0.12.0/doctr/models/predictor/tensorflow.py +0 -155
  130. python_doctr-0.12.0/doctr/models/preprocessor/__init__.py +0 -6
  131. python_doctr-0.12.0/doctr/models/preprocessor/tensorflow.py +0 -122
  132. python_doctr-0.12.0/doctr/models/recognition/crnn/__init__.py +0 -6
  133. python_doctr-0.12.0/doctr/models/recognition/crnn/tensorflow.py +0 -317
  134. python_doctr-0.12.0/doctr/models/recognition/master/__init__.py +0 -6
  135. python_doctr-0.12.0/doctr/models/recognition/master/tensorflow.py +0 -320
  136. python_doctr-0.12.0/doctr/models/recognition/parseq/__init__.py +0 -6
  137. python_doctr-0.12.0/doctr/models/recognition/parseq/tensorflow.py +0 -516
  138. python_doctr-0.12.0/doctr/models/recognition/predictor/__init__.py +0 -6
  139. python_doctr-0.12.0/doctr/models/recognition/predictor/tensorflow.py +0 -79
  140. python_doctr-0.12.0/doctr/models/recognition/sar/__init__.py +0 -6
  141. python_doctr-0.12.0/doctr/models/recognition/sar/tensorflow.py +0 -423
  142. python_doctr-0.12.0/doctr/models/recognition/viptr/__init__.py +0 -4
  143. python_doctr-0.12.0/doctr/models/recognition/vitstr/__init__.py +0 -6
  144. python_doctr-0.12.0/doctr/models/recognition/vitstr/tensorflow.py +0 -285
  145. python_doctr-0.12.0/doctr/models/utils/__init__.py +0 -6
  146. python_doctr-0.12.0/doctr/models/utils/tensorflow.py +0 -189
  147. python_doctr-0.12.0/doctr/transforms/functional/__init__.py +0 -6
  148. python_doctr-0.12.0/doctr/transforms/functional/tensorflow.py +0 -254
  149. python_doctr-0.12.0/doctr/transforms/modules/__init__.py +0 -8
  150. python_doctr-0.12.0/doctr/transforms/modules/tensorflow.py +0 -562
  151. python_doctr-0.12.0/doctr/version.py +0 -1
  152. {python_doctr-0.12.0 → python_doctr-1.0.0}/LICENSE +0 -0
  153. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/contrib/__init__.py +0 -0
  154. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/contrib/artefacts.py +0 -0
  155. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/contrib/base.py +0 -0
  156. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/coco_text.py +0 -0
  157. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/cord.py +0 -0
  158. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/datasets/base.py +0 -0
  159. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/detection.py +0 -0
  160. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/doc_artefacts.py +0 -0
  161. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/funsd.py +0 -0
  162. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/generator/base.py +0 -0
  163. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/generator/pytorch.py +0 -0
  164. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/ic03.py +0 -0
  165. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/ic13.py +0 -0
  166. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/iiit5k.py +0 -0
  167. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/iiithws.py +0 -0
  168. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/imgur5k.py +0 -0
  169. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/mjsynth.py +0 -0
  170. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/ocr.py +0 -0
  171. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/orientation.py +0 -0
  172. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/recognition.py +0 -0
  173. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/sroie.py +0 -0
  174. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/svhn.py +0 -0
  175. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/svt.py +0 -0
  176. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/synthtext.py +0 -0
  177. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/utils.py +0 -0
  178. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/datasets/wildreceipt.py +0 -0
  179. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/io/__init__.py +0 -0
  180. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/io/elements.py +0 -0
  181. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/io/html.py +0 -0
  182. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/io/image/base.py +0 -0
  183. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/io/pdf.py +0 -0
  184. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/io/reader.py +0 -0
  185. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/__init__.py +0 -0
  186. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/builder.py +0 -0
  187. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/classification/__init__.py +0 -0
  188. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/classification/mobilenet/pytorch.py +0 -0
  189. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/classification/resnet/pytorch.py +0 -0
  190. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/classification/vgg/pytorch.py +0 -0
  191. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/classification/vip/pytorch.py +0 -0
  192. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/core.py +0 -0
  193. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/detection/__init__.py +0 -0
  194. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/detection/_utils/base.py +0 -0
  195. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/detection/_utils/pytorch.py +0 -0
  196. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/factory/__init__.py +0 -0
  197. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/kie_predictor/base.py +0 -0
  198. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/modules/__init__.py +0 -0
  199. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/modules/vision_transformer/pytorch.py +0 -0
  200. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/recognition/__init__.py +0 -0
  201. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/recognition/core.py +0 -0
  202. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/recognition/master/base.py +0 -0
  203. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/recognition/parseq/base.py +0 -0
  204. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/recognition/utils.py +0 -0
  205. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/recognition/vitstr/base.py +0 -0
  206. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/models/zoo.py +0 -0
  207. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/py.typed +0 -0
  208. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/transforms/__init__.py +0 -0
  209. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/transforms/functional/base.py +0 -0
  210. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/utils/__init__.py +0 -0
  211. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/utils/common_types.py +0 -0
  212. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/utils/data.py +0 -0
  213. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/utils/fonts.py +0 -0
  214. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/utils/metrics.py +0 -0
  215. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/utils/multithreading.py +0 -0
  216. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/utils/reconstitution.py +0 -0
  217. {python_doctr-0.12.0 → python_doctr-1.0.0}/doctr/utils/repr.py +0 -0
  218. {python_doctr-0.12.0 → python_doctr-1.0.0}/python_doctr.egg-info/dependency_links.txt +0 -0
  219. {python_doctr-0.12.0 → python_doctr-1.0.0}/python_doctr.egg-info/top_level.txt +0 -0
  220. {python_doctr-0.12.0 → python_doctr-1.0.0}/python_doctr.egg-info/zip-safe +0 -0
  221. {python_doctr-0.12.0 → python_doctr-1.0.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: python-doctr
3
- Version: 0.12.0
3
+ Version: 1.0.0
4
4
  Summary: Document Text Recognition (docTR): deep Learning for high-performance OCR on documents.
5
5
  Author-email: Mindee <contact@mindee.com>
6
6
  Maintainer: François-Guillaume Fernandez, Charles Gaillard, Olivier Dulcy, Felix Dittrich
@@ -210,7 +210,7 @@ Project-URL: documentation, https://mindee.github.io/doctr
210
210
  Project-URL: repository, https://github.com/mindee/doctr
211
211
  Project-URL: tracker, https://github.com/mindee/doctr/issues
212
212
  Project-URL: changelog, https://mindee.github.io/doctr/changelog.html
213
- Keywords: OCR,deep learning,computer vision,tensorflow,pytorch,text detection,text recognition
213
+ Keywords: OCR,deep learning,computer vision,pytorch,text detection,text recognition
214
214
  Classifier: Development Status :: 4 - Beta
215
215
  Classifier: Intended Audience :: Developers
216
216
  Classifier: Intended Audience :: Education
@@ -226,6 +226,9 @@ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
226
226
  Requires-Python: <4,>=3.10.0
227
227
  Description-Content-Type: text/markdown
228
228
  License-File: LICENSE
229
+ Requires-Dist: torch<3.0.0,>=2.0.0
230
+ Requires-Dist: torchvision>=0.15.0
231
+ Requires-Dist: onnx<3.0.0,>=1.12.0
229
232
  Requires-Dist: numpy<3.0.0,>=1.16.0
230
233
  Requires-Dist: scipy<2.0.0,>=1.4.0
231
234
  Requires-Dist: h5py<4.0.0,>=3.1.0
@@ -241,15 +244,6 @@ Requires-Dist: defusedxml>=0.7.0
241
244
  Requires-Dist: anyascii>=0.3.2
242
245
  Requires-Dist: validators>=0.18.0
243
246
  Requires-Dist: tqdm>=4.30.0
244
- Provides-Extra: tf
245
- Requires-Dist: tensorflow[and-cuda]<3.0.0,>=2.15.0; sys_platform == "linux" and extra == "tf"
246
- Requires-Dist: tensorflow<3.0.0,>=2.15.0; sys_platform != "linux" and extra == "tf"
247
- Requires-Dist: tf-keras<3.0.0,>=2.15.0; extra == "tf"
248
- Requires-Dist: tf2onnx<2.0.0,>=1.16.0; extra == "tf"
249
- Provides-Extra: torch
250
- Requires-Dist: torch<3.0.0,>=2.0.0; extra == "torch"
251
- Requires-Dist: torchvision>=0.15.0; extra == "torch"
252
- Requires-Dist: onnx<3.0.0,>=1.12.0; extra == "torch"
253
247
  Provides-Extra: html
254
248
  Requires-Dist: weasyprint>=55.0; extra == "html"
255
249
  Provides-Extra: viz
@@ -277,10 +271,6 @@ Requires-Dist: sphinx-markdown-tables>=0.0.15; extra == "docs"
277
271
  Requires-Dist: sphinx-tabs>=3.3.0; extra == "docs"
278
272
  Requires-Dist: furo>=2022.3.4; extra == "docs"
279
273
  Provides-Extra: dev
280
- Requires-Dist: tensorflow[and-cuda]<3.0.0,>=2.15.0; sys_platform == "linux" and extra == "dev"
281
- Requires-Dist: tensorflow<3.0.0,>=2.15.0; sys_platform != "linux" and extra == "dev"
282
- Requires-Dist: tf-keras<3.0.0,>=2.15.0; extra == "dev"
283
- Requires-Dist: tf2onnx<2.0.0,>=1.16.0; extra == "dev"
284
274
  Requires-Dist: torch<3.0.0,>=2.0.0; extra == "dev"
285
275
  Requires-Dist: torchvision>=0.15.0; extra == "dev"
286
276
  Requires-Dist: onnx<3.0.0,>=1.12.0; extra == "dev"
@@ -309,10 +299,10 @@ Dynamic: license-file
309
299
  <img src="https://github.com/mindee/doctr/raw/main/docs/images/Logo_doctr.gif" width="40%">
310
300
  </p>
311
301
 
312
- [![Slack Icon](https://img.shields.io/badge/Slack-Community-4A154B?style=flat-square&logo=slack&logoColor=white)](https://slack.mindee.com) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) ![Build Status](https://github.com/mindee/doctr/workflows/builds/badge.svg) [![Docker Images](https://img.shields.io/badge/Docker-4287f5?style=flat&logo=docker&logoColor=white)](https://github.com/mindee/doctr/pkgs/container/doctr) [![codecov](https://codecov.io/gh/mindee/doctr/branch/main/graph/badge.svg?token=577MO567NM)](https://codecov.io/gh/mindee/doctr) [![CodeFactor](https://www.codefactor.io/repository/github/mindee/doctr/badge?s=bae07db86bb079ce9d6542315b8c6e70fa708a7e)](https://www.codefactor.io/repository/github/mindee/doctr) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/340a76749b634586a498e1c0ab998f08)](https://app.codacy.com/gh/mindee/doctr?utm_source=github.com&utm_medium=referral&utm_content=mindee/doctr&utm_campaign=Badge_Grade) [![Doc Status](https://github.com/mindee/doctr/workflows/doc-status/badge.svg)](https://mindee.github.io/doctr) [![Pypi](https://img.shields.io/badge/pypi-v0.12.0-blue.svg)](https://pypi.org/project/python-doctr/) [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/mindee/doctr) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mindee/notebooks/blob/main/doctr/quicktour.ipynb) [![Gurubase](https://img.shields.io/badge/Gurubase-Ask%20docTR%20Guru-006BFF)](https://gurubase.io/g/doctr)
302
+ [![Slack Icon](https://img.shields.io/badge/Slack-Community-4A154B?style=flat-square&logo=slack&logoColor=white)](https://slack.mindee.com) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) ![Build Status](https://github.com/mindee/doctr/workflows/builds/badge.svg) [![Docker Images](https://img.shields.io/badge/Docker-4287f5?style=flat&logo=docker&logoColor=white)](https://github.com/mindee/doctr/pkgs/container/doctr) [![codecov](https://codecov.io/gh/mindee/doctr/branch/main/graph/badge.svg?token=577MO567NM)](https://codecov.io/gh/mindee/doctr) [![CodeFactor](https://www.codefactor.io/repository/github/mindee/doctr/badge?s=bae07db86bb079ce9d6542315b8c6e70fa708a7e)](https://www.codefactor.io/repository/github/mindee/doctr) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/340a76749b634586a498e1c0ab998f08)](https://app.codacy.com/gh/mindee/doctr?utm_source=github.com&utm_medium=referral&utm_content=mindee/doctr&utm_campaign=Badge_Grade) [![Doc Status](https://github.com/mindee/doctr/workflows/doc-status/badge.svg)](https://mindee.github.io/doctr) [![Pypi](https://img.shields.io/badge/pypi-v1.0.0-blue.svg)](https://pypi.org/project/python-doctr/) [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/mindee/doctr) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mindee/notebooks/blob/main/doctr/quicktour.ipynb) [![Gurubase](https://img.shields.io/badge/Gurubase-Ask%20docTR%20Guru-006BFF)](https://gurubase.io/g/doctr)
313
303
 
314
304
 
315
- **Optical Character Recognition made seamless & accessible to anyone, powered by TensorFlow 2 & PyTorch**
305
+ **Optical Character Recognition made seamless & accessible to anyone, powered by PyTorch**
316
306
 
317
307
  What you can expect from this repository:
318
308
 
@@ -440,19 +430,6 @@ The KIE predictor results per page are in a dictionary format with each key repr
440
430
 
441
431
  ## Installation
442
432
 
443
- > [!WARNING]
444
- > **TensorFlow Backend Deprecation Notice**
445
- >
446
- > Using docTR with TensorFlow as a backend is deprecated and will be removed in the next major release (v1.0.0).
447
- > We **recommend switching to the PyTorch backend**, which is more actively maintained and supports the latest features and models.
448
- > Alternatively, you can use [OnnxTR](https://github.com/felixdittrich92/OnnxTR), which does **not** require TensorFlow or PyTorch.
449
- >
450
- > This decision was made based on several considerations:
451
- >
452
- > - Allows better focus on improving the core library
453
- > - Frees up resources to develop new features faster
454
- > - Enables more targeted optimizations with PyTorch
455
-
456
433
  ### Prerequisites
457
434
 
458
435
  Python 3.10 (or higher) and [pip](https://pip.pypa.io/en/stable/) are required to install docTR.
@@ -465,24 +442,15 @@ You can then install the latest release of the package using [pypi](https://pypi
465
442
  pip install python-doctr
466
443
  ```
467
444
 
468
- > :warning: Please note that the basic installation is not standalone, as it does not provide a deep learning framework, which is required for the package to run.
469
-
470
- We try to keep framework-specific dependencies to a minimum. You can install framework-specific builds as follows:
445
+ We try to keep extra dependencies to a minimum. You can install specific builds as follows:
471
446
 
472
447
  ```shell
473
- # for TensorFlow
474
- pip install "python-doctr[tf]"
475
- # for PyTorch
476
- pip install "python-doctr[torch]"
448
+ # standard build
449
+ pip install python-doctr
477
450
  # optional dependencies for visualization, html, and contrib modules can be installed as follows:
478
- pip install "python-doctr[torch,viz,html,contib]"
451
+ pip install "python-doctr[viz,html,contrib]"
479
452
  ```
480
453
 
481
- For MacBooks with M1 chip, you will need some additional packages or specific versions:
482
-
483
- - TensorFlow 2: [metal plugin](https://developer.apple.com/metal/tensorflow-plugin/)
484
- - PyTorch: [version >= 2.0.0](https://pytorch.org/get-started/locally/#start-locally)
485
-
486
454
  ### Developer mode
487
455
 
488
456
  Alternatively, you can install it from source, which will require you to install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git).
@@ -493,13 +461,10 @@ git clone https://github.com/mindee/doctr.git
493
461
  pip install -e doctr/.
494
462
  ```
495
463
 
496
- Again, if you prefer to avoid the risk of missing dependencies, you can install the TensorFlow or the PyTorch build:
464
+ Again, if you prefer to avoid the risk of missing dependencies, you can install the build:
497
465
 
498
466
  ```shell
499
- # for TensorFlow
500
- pip install -e doctr/.[tf]
501
- # for PyTorch
502
- pip install -e doctr/.[torch]
467
+ pip install -e doctr/.
503
468
  ```
504
469
 
505
470
  ## Models architectures
@@ -542,20 +507,6 @@ Check it out [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%2
542
507
 
543
508
  If you prefer to use it locally, there is an extra dependency ([Streamlit](https://streamlit.io/)) that is required.
544
509
 
545
- ##### Tensorflow version
546
-
547
- ```shell
548
- pip install -r demo/tf-requirements.txt
549
- ```
550
-
551
- Then run your app in your default browser with:
552
-
553
- ```shell
554
- USE_TF=1 streamlit run demo/app.py
555
- ```
556
-
557
- ##### PyTorch version
558
-
559
510
  ```shell
560
511
  pip install -r demo/pt-requirements.txt
561
512
  ```
@@ -563,23 +514,16 @@ pip install -r demo/pt-requirements.txt
563
514
  Then run your app in your default browser with:
564
515
 
565
516
  ```shell
566
- USE_TORCH=1 streamlit run demo/app.py
517
+ streamlit run demo/app.py
567
518
  ```
568
519
 
569
- #### TensorFlow.js
570
-
571
- Instead of having your demo actually running Python, you would prefer to run everything in your web browser?
572
- Check out our [TensorFlow.js demo](https://github.com/mindee/doctr-tfjs-demo) to get started!
573
-
574
- ![TFJS demo](https://github.com/mindee/doctr/raw/main/docs/images/demo_illustration_mini.png)
575
-
576
520
  ### Docker container
577
521
 
578
522
  We offer Docker container support for easy testing and deployment. [Here are the available docker tags.](https://github.com/mindee/doctr/pkgs/container/doctr).
579
523
 
580
524
  #### Using GPU with docTR Docker Images
581
525
 
582
- The docTR Docker images are GPU-ready and based on CUDA `12.2`. Make sure your host is **at least `12.2`**, otherwise Torch or TensorFlow won't be able to initialize the GPU.
526
+ The docTR Docker images are GPU-ready and based on CUDA `12.2`. Make sure your host is **at least `12.2`**, otherwise Torch won't be able to initialize the GPU.
583
527
  Please ensure that Docker is configured to use your GPU.
584
528
 
585
529
  To verify and configure GPU support for Docker, please follow the instructions provided in the [NVIDIA Container Toolkit Installation Guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html).
@@ -594,7 +538,7 @@ docker run -it --gpus all ghcr.io/mindee/doctr:torch-py3.9.18-2024-10 bash
594
538
 
595
539
  The Docker images for docTR follow a specific tag nomenclature: `<deps>-py<python_version>-<doctr_version|YYYY-MM>`. Here's a breakdown of the tag structure:
596
540
 
597
- - `<deps>`: `tf`, `torch`, `tf-viz-html-contrib` or `torch-viz-html-contrib`.
541
+ - `<deps>`: `torch`, `torch-viz-html-contrib`.
598
542
  - `<python_version>`: `3.9.18`, `3.10.13` or `3.11.8`.
599
543
  - `<doctr_version>`: a tag >= `v0.11.0`
600
544
  - `<YYYY-MM>`: e.g. `2014-10`
@@ -603,7 +547,6 @@ Here are examples of different image tags:
603
547
 
604
548
  | Tag | Description |
605
549
  |----------------------------|---------------------------------------------------|
606
- | `tf-py3.10.13-v0.11.0` | TensorFlow version `3.10.13` with docTR `v0.11.0`. |
607
550
  | `torch-viz-html-contrib-py3.11.8-2024-10` | Torch with extra dependencies version `3.11.8` from latest commit on `main` in `2024-10`. |
608
551
  | `torch-py3.11.8-2024-10`| PyTorch version `3.11.8` from latest commit on `main` in `2024-10`. |
609
552
 
@@ -615,10 +558,10 @@ You can also build docTR Docker images locally on your computer.
615
558
  docker build -t doctr .
616
559
  ```
617
560
 
618
- You can specify custom Python versions and docTR versions using build arguments. For example, to build a docTR image with TensorFlow, Python version `3.9.10`, and docTR version `v0.7.0`, run the following command:
561
+ You can specify custom Python versions and docTR versions using build arguments. For example, to build a docTR image with PyTorch, Python version `3.9.10`, and docTR version `v0.7.0`, run the following command:
619
562
 
620
563
  ```shell
621
- docker build -t doctr --build-arg FRAMEWORK=tf --build-arg PYTHON_VERSION=3.9.10 --build-arg DOCTR_VERSION=v0.7.0 .
564
+ docker build -t doctr --build-arg FRAMEWORK=torch --build-arg PYTHON_VERSION=3.9.10 --build-arg DOCTR_VERSION=v0.7.0 .
622
565
  ```
623
566
 
624
567
  ### Example script
@@ -2,10 +2,10 @@
2
2
  <img src="https://github.com/mindee/doctr/raw/main/docs/images/Logo_doctr.gif" width="40%">
3
3
  </p>
4
4
 
5
- [![Slack Icon](https://img.shields.io/badge/Slack-Community-4A154B?style=flat-square&logo=slack&logoColor=white)](https://slack.mindee.com) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) ![Build Status](https://github.com/mindee/doctr/workflows/builds/badge.svg) [![Docker Images](https://img.shields.io/badge/Docker-4287f5?style=flat&logo=docker&logoColor=white)](https://github.com/mindee/doctr/pkgs/container/doctr) [![codecov](https://codecov.io/gh/mindee/doctr/branch/main/graph/badge.svg?token=577MO567NM)](https://codecov.io/gh/mindee/doctr) [![CodeFactor](https://www.codefactor.io/repository/github/mindee/doctr/badge?s=bae07db86bb079ce9d6542315b8c6e70fa708a7e)](https://www.codefactor.io/repository/github/mindee/doctr) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/340a76749b634586a498e1c0ab998f08)](https://app.codacy.com/gh/mindee/doctr?utm_source=github.com&utm_medium=referral&utm_content=mindee/doctr&utm_campaign=Badge_Grade) [![Doc Status](https://github.com/mindee/doctr/workflows/doc-status/badge.svg)](https://mindee.github.io/doctr) [![Pypi](https://img.shields.io/badge/pypi-v0.12.0-blue.svg)](https://pypi.org/project/python-doctr/) [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/mindee/doctr) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mindee/notebooks/blob/main/doctr/quicktour.ipynb) [![Gurubase](https://img.shields.io/badge/Gurubase-Ask%20docTR%20Guru-006BFF)](https://gurubase.io/g/doctr)
5
+ [![Slack Icon](https://img.shields.io/badge/Slack-Community-4A154B?style=flat-square&logo=slack&logoColor=white)](https://slack.mindee.com) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) ![Build Status](https://github.com/mindee/doctr/workflows/builds/badge.svg) [![Docker Images](https://img.shields.io/badge/Docker-4287f5?style=flat&logo=docker&logoColor=white)](https://github.com/mindee/doctr/pkgs/container/doctr) [![codecov](https://codecov.io/gh/mindee/doctr/branch/main/graph/badge.svg?token=577MO567NM)](https://codecov.io/gh/mindee/doctr) [![CodeFactor](https://www.codefactor.io/repository/github/mindee/doctr/badge?s=bae07db86bb079ce9d6542315b8c6e70fa708a7e)](https://www.codefactor.io/repository/github/mindee/doctr) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/340a76749b634586a498e1c0ab998f08)](https://app.codacy.com/gh/mindee/doctr?utm_source=github.com&utm_medium=referral&utm_content=mindee/doctr&utm_campaign=Badge_Grade) [![Doc Status](https://github.com/mindee/doctr/workflows/doc-status/badge.svg)](https://mindee.github.io/doctr) [![Pypi](https://img.shields.io/badge/pypi-v1.0.0-blue.svg)](https://pypi.org/project/python-doctr/) [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/mindee/doctr) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mindee/notebooks/blob/main/doctr/quicktour.ipynb) [![Gurubase](https://img.shields.io/badge/Gurubase-Ask%20docTR%20Guru-006BFF)](https://gurubase.io/g/doctr)
6
6
 
7
7
 
8
- **Optical Character Recognition made seamless & accessible to anyone, powered by TensorFlow 2 & PyTorch**
8
+ **Optical Character Recognition made seamless & accessible to anyone, powered by PyTorch**
9
9
 
10
10
  What you can expect from this repository:
11
11
 
@@ -133,19 +133,6 @@ The KIE predictor results per page are in a dictionary format with each key repr
133
133
 
134
134
  ## Installation
135
135
 
136
- > [!WARNING]
137
- > **TensorFlow Backend Deprecation Notice**
138
- >
139
- > Using docTR with TensorFlow as a backend is deprecated and will be removed in the next major release (v1.0.0).
140
- > We **recommend switching to the PyTorch backend**, which is more actively maintained and supports the latest features and models.
141
- > Alternatively, you can use [OnnxTR](https://github.com/felixdittrich92/OnnxTR), which does **not** require TensorFlow or PyTorch.
142
- >
143
- > This decision was made based on several considerations:
144
- >
145
- > - Allows better focus on improving the core library
146
- > - Frees up resources to develop new features faster
147
- > - Enables more targeted optimizations with PyTorch
148
-
149
136
  ### Prerequisites
150
137
 
151
138
  Python 3.10 (or higher) and [pip](https://pip.pypa.io/en/stable/) are required to install docTR.
@@ -158,24 +145,15 @@ You can then install the latest release of the package using [pypi](https://pypi
158
145
  pip install python-doctr
159
146
  ```
160
147
 
161
- > :warning: Please note that the basic installation is not standalone, as it does not provide a deep learning framework, which is required for the package to run.
162
-
163
- We try to keep framework-specific dependencies to a minimum. You can install framework-specific builds as follows:
148
+ We try to keep extra dependencies to a minimum. You can install specific builds as follows:
164
149
 
165
150
  ```shell
166
- # for TensorFlow
167
- pip install "python-doctr[tf]"
168
- # for PyTorch
169
- pip install "python-doctr[torch]"
151
+ # standard build
152
+ pip install python-doctr
170
153
  # optional dependencies for visualization, html, and contrib modules can be installed as follows:
171
- pip install "python-doctr[torch,viz,html,contib]"
154
+ pip install "python-doctr[viz,html,contrib]"
172
155
  ```
173
156
 
174
- For MacBooks with M1 chip, you will need some additional packages or specific versions:
175
-
176
- - TensorFlow 2: [metal plugin](https://developer.apple.com/metal/tensorflow-plugin/)
177
- - PyTorch: [version >= 2.0.0](https://pytorch.org/get-started/locally/#start-locally)
178
-
179
157
  ### Developer mode
180
158
 
181
159
  Alternatively, you can install it from source, which will require you to install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git).
@@ -186,13 +164,10 @@ git clone https://github.com/mindee/doctr.git
186
164
  pip install -e doctr/.
187
165
  ```
188
166
 
189
- Again, if you prefer to avoid the risk of missing dependencies, you can install the TensorFlow or the PyTorch build:
167
+ Again, if you prefer to avoid the risk of missing dependencies, you can install the build:
190
168
 
191
169
  ```shell
192
- # for TensorFlow
193
- pip install -e doctr/.[tf]
194
- # for PyTorch
195
- pip install -e doctr/.[torch]
170
+ pip install -e doctr/.
196
171
  ```
197
172
 
198
173
  ## Models architectures
@@ -235,20 +210,6 @@ Check it out [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%2
235
210
 
236
211
  If you prefer to use it locally, there is an extra dependency ([Streamlit](https://streamlit.io/)) that is required.
237
212
 
238
- ##### Tensorflow version
239
-
240
- ```shell
241
- pip install -r demo/tf-requirements.txt
242
- ```
243
-
244
- Then run your app in your default browser with:
245
-
246
- ```shell
247
- USE_TF=1 streamlit run demo/app.py
248
- ```
249
-
250
- ##### PyTorch version
251
-
252
213
  ```shell
253
214
  pip install -r demo/pt-requirements.txt
254
215
  ```
@@ -256,23 +217,16 @@ pip install -r demo/pt-requirements.txt
256
217
  Then run your app in your default browser with:
257
218
 
258
219
  ```shell
259
- USE_TORCH=1 streamlit run demo/app.py
220
+ streamlit run demo/app.py
260
221
  ```
261
222
 
262
- #### TensorFlow.js
263
-
264
- Instead of having your demo actually running Python, you would prefer to run everything in your web browser?
265
- Check out our [TensorFlow.js demo](https://github.com/mindee/doctr-tfjs-demo) to get started!
266
-
267
- ![TFJS demo](https://github.com/mindee/doctr/raw/main/docs/images/demo_illustration_mini.png)
268
-
269
223
  ### Docker container
270
224
 
271
225
  We offer Docker container support for easy testing and deployment. [Here are the available docker tags.](https://github.com/mindee/doctr/pkgs/container/doctr).
272
226
 
273
227
  #### Using GPU with docTR Docker Images
274
228
 
275
- The docTR Docker images are GPU-ready and based on CUDA `12.2`. Make sure your host is **at least `12.2`**, otherwise Torch or TensorFlow won't be able to initialize the GPU.
229
+ The docTR Docker images are GPU-ready and based on CUDA `12.2`. Make sure your host is **at least `12.2`**, otherwise Torch won't be able to initialize the GPU.
276
230
  Please ensure that Docker is configured to use your GPU.
277
231
 
278
232
  To verify and configure GPU support for Docker, please follow the instructions provided in the [NVIDIA Container Toolkit Installation Guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html).
@@ -287,7 +241,7 @@ docker run -it --gpus all ghcr.io/mindee/doctr:torch-py3.9.18-2024-10 bash
287
241
 
288
242
  The Docker images for docTR follow a specific tag nomenclature: `<deps>-py<python_version>-<doctr_version|YYYY-MM>`. Here's a breakdown of the tag structure:
289
243
 
290
- - `<deps>`: `tf`, `torch`, `tf-viz-html-contrib` or `torch-viz-html-contrib`.
244
+ - `<deps>`: `torch`, `torch-viz-html-contrib`.
291
245
  - `<python_version>`: `3.9.18`, `3.10.13` or `3.11.8`.
292
246
  - `<doctr_version>`: a tag >= `v0.11.0`
293
247
  - `<YYYY-MM>`: e.g. `2014-10`
@@ -296,7 +250,6 @@ Here are examples of different image tags:
296
250
 
297
251
  | Tag | Description |
298
252
  |----------------------------|---------------------------------------------------|
299
- | `tf-py3.10.13-v0.11.0` | TensorFlow version `3.10.13` with docTR `v0.11.0`. |
300
253
  | `torch-viz-html-contrib-py3.11.8-2024-10` | Torch with extra dependencies version `3.11.8` from latest commit on `main` in `2024-10`. |
301
254
  | `torch-py3.11.8-2024-10`| PyTorch version `3.11.8` from latest commit on `main` in `2024-10`. |
302
255
 
@@ -308,10 +261,10 @@ You can also build docTR Docker images locally on your computer.
308
261
  docker build -t doctr .
309
262
  ```
310
263
 
311
- You can specify custom Python versions and docTR versions using build arguments. For example, to build a docTR image with TensorFlow, Python version `3.9.10`, and docTR version `v0.7.0`, run the following command:
264
+ You can specify custom Python versions and docTR versions using build arguments. For example, to build a docTR image with PyTorch, Python version `3.9.10`, and docTR version `v0.7.0`, run the following command:
312
265
 
313
266
  ```shell
314
- docker build -t doctr --build-arg FRAMEWORK=tf --build-arg PYTHON_VERSION=3.9.10 --build-arg DOCTR_VERSION=v0.7.0 .
267
+ docker build -t doctr --build-arg FRAMEWORK=torch --build-arg PYTHON_VERSION=3.9.10 --build-arg DOCTR_VERSION=v0.7.0 .
315
268
  ```
316
269
 
317
270
  ### Example script
@@ -1,3 +1,2 @@
1
1
  from . import io, models, datasets, contrib, transforms, utils
2
- from .file_utils import is_tf_available, is_torch_available
3
2
  from .version import __version__ # noqa: F401
@@ -1,5 +1,3 @@
1
- from doctr.file_utils import is_tf_available
2
-
3
1
  from .generator import *
4
2
  from .coco_text import *
5
3
  from .cord import *
@@ -22,6 +20,3 @@ from .synthtext import *
22
20
  from .utils import *
23
21
  from .vocabs import *
24
22
  from .wildreceipt import *
25
-
26
- if is_tf_available():
27
- from .loader import *
@@ -0,0 +1 @@
1
+ from .pytorch import *
@@ -50,9 +50,9 @@ class AbstractDataset(_AbstractDataset):
50
50
  @staticmethod
51
51
  def collate_fn(samples: list[tuple[torch.Tensor, Any]]) -> tuple[torch.Tensor, list[Any]]:
52
52
  images, targets = zip(*samples)
53
- images = torch.stack(images, dim=0)
53
+ images = torch.stack(images, dim=0) # type: ignore[assignment]
54
54
 
55
- return images, list(targets)
55
+ return images, list(targets) # type: ignore[return-value]
56
56
 
57
57
 
58
58
  class VisionDataset(AbstractDataset, _VisionDataset): # noqa: D101
@@ -0,0 +1 @@
1
+ from .pytorch import *
@@ -264,8 +264,6 @@ VOCABS["estonian"] = VOCABS["english"] + "šžõäöüŠŽÕÄÖÜ"
264
264
  VOCABS["esperanto"] = re.sub(r"[QqWwXxYy]", "", VOCABS["english"]) + "ĉĝĥĵŝŭĈĜĤĴŜŬ" + "₷"
265
265
 
266
266
  VOCABS["french"] = VOCABS["english"] + "àâéèêëîïôùûüçÀÂÉÈÊËÎÏÔÙÛÜÇ"
267
- # NOTE: legacy french is outdated, but kept for compatibility
268
- VOCABS["legacy_french"] = VOCABS["latin"] + "°" + "àâéèêëîïôùûçÀÂÉÈËÎÏÔÙÛÇ" + _BASE_VOCABS["currency"]
269
267
 
270
268
  VOCABS["finnish"] = VOCABS["english"] + "äöÄÖ"
271
269
 
@@ -0,0 +1,30 @@
1
+ # Copyright (C) 2021-2025, Mindee.
2
+
3
+ # This program is licensed under the Apache License 2.0.
4
+ # See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
5
+
6
+ import importlib.metadata
7
+ import logging
8
+
9
+ __all__ = ["requires_package", "CLASS_NAME"]
10
+
11
+ CLASS_NAME: str = "words"
12
+ ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
13
+
14
+
15
+ def requires_package(name: str, extra_message: str | None = None) -> None: # pragma: no cover
16
+ """
17
+ package requirement helper
18
+
19
+ Args:
20
+ name: name of the package
21
+ extra_message: additional message to display if the package is not found
22
+ """
23
+ try:
24
+ _pkg_version = importlib.metadata.version(name)
25
+ logging.info(f"{name} version {_pkg_version} available.")
26
+ except importlib.metadata.PackageNotFoundError:
27
+ raise ImportError(
28
+ f"\n\n{extra_message if extra_message is not None else ''} "
29
+ f"\nPlease install it with the following command: pip install {name}\n"
30
+ )
@@ -0,0 +1,2 @@
1
+ from .base import *
2
+ from .pytorch import *
@@ -95,4 +95,4 @@ def tensor_from_numpy(npy_img: np.ndarray, dtype: torch.dtype = torch.float32) -
95
95
 
96
96
  def get_img_shape(img: torch.Tensor) -> tuple[int, int]:
97
97
  """Get the shape of an image"""
98
- return img.shape[-2:]
98
+ return img.shape[-2:] # type: ignore[return-value]
@@ -63,7 +63,7 @@ def estimate_orientation(
63
63
  thresh = img.astype(np.uint8)
64
64
 
65
65
  page_orientation, orientation_confidence = general_page_orientation or (None, 0.0)
66
- if page_orientation and orientation_confidence >= min_confidence:
66
+ if page_orientation is not None and orientation_confidence >= min_confidence:
67
67
  # We rotate the image to the general orientation which improves the detection
68
68
  # No expand needed bitmap is already padded
69
69
  thresh = rotate_image(thresh, -page_orientation)
@@ -100,7 +100,7 @@ def estimate_orientation(
100
100
  estimated_angle = -round(median) if abs(median) != 0 else 0
101
101
 
102
102
  # combine with the general orientation and the estimated angle
103
- if page_orientation and orientation_confidence >= min_confidence:
103
+ if page_orientation is not None and orientation_confidence >= min_confidence:
104
104
  # special case where the estimated angle is mostly wrong:
105
105
  # case 1: - and + swapped
106
106
  # case 2: estimated angle is completely wrong
@@ -184,7 +184,7 @@ def invert_data_structure(
184
184
  dictionary of list when x is a list of dictionaries or a list of dictionaries when x is dictionary of lists
185
185
  """
186
186
  if isinstance(x, dict):
187
- assert len({len(v) for v in x.values()}) == 1, "All the lists in the dictionnary should have the same length."
187
+ assert len({len(v) for v in x.values()}) == 1, "All the lists in the dictionary should have the same length."
188
188
  return [dict(zip(x, t)) for t in zip(*x.values())]
189
189
  elif isinstance(x, list):
190
190
  return {k: [dic[k] for dic in x] for k in x[0]}
@@ -0,0 +1 @@
1
+ from .pytorch import *
@@ -14,7 +14,7 @@ from torch import nn
14
14
 
15
15
  from doctr.datasets import VOCABS
16
16
 
17
- from ..resnet.pytorch import ResNet
17
+ from ..resnet import ResNet
18
18
 
19
19
  __all__ = ["magc_resnet31"]
20
20
 
@@ -72,7 +72,7 @@ class MAGC(nn.Module):
72
72
  def forward(self, inputs: torch.Tensor) -> torch.Tensor:
73
73
  batch, _, height, width = inputs.size()
74
74
  # (N * headers, C / headers, H , W)
75
- x = inputs.view(batch * self.headers, self.single_header_inplanes, height, width)
75
+ x = inputs.contiguous().view(batch * self.headers, self.single_header_inplanes, height, width)
76
76
  shortcut = x
77
77
  # (N * headers, C / headers, H * W)
78
78
  shortcut = shortcut.view(batch * self.headers, self.single_header_inplanes, height * width)
@@ -0,0 +1 @@
1
+ from .pytorch import *
@@ -0,0 +1 @@
1
+ from .pytorch import *
@@ -35,7 +35,7 @@ class OrientationPredictor(nn.Module):
35
35
  @torch.inference_mode()
36
36
  def forward(
37
37
  self,
38
- inputs: list[np.ndarray | torch.Tensor],
38
+ inputs: list[np.ndarray],
39
39
  ) -> list[list[int] | list[float]]:
40
40
  # Dimension check
41
41
  if any(input.ndim != 3 for input in inputs):
@@ -0,0 +1 @@
1
+ from .pytorch import *
@@ -0,0 +1 @@
1
+ from .pytorch import *
@@ -11,7 +11,7 @@ from torch import nn
11
11
 
12
12
  from doctr.datasets import VOCABS
13
13
 
14
- from ...modules.layers.pytorch import FASTConvLayer
14
+ from ...modules.layers import FASTConvLayer
15
15
  from ...utils import conv_sequence_pt, load_pretrained_params
16
16
 
17
17
  __all__ = ["textnet_tiny", "textnet_small", "textnet_base"]
@@ -0,0 +1 @@
1
+ from .pytorch import *
@@ -0,0 +1 @@
1
+ from .pytorch import *
@@ -0,0 +1 @@
1
+ from .pytorch import *
@@ -433,7 +433,7 @@ class LePEAttention(nn.Module):
433
433
  Returns:
434
434
  A float tensor of shape (b, h, w, c).
435
435
  """
436
- b_merged = int(img_splits_hw.shape[0] / (h * w / h_sp / w_sp))
436
+ b_merged = img_splits_hw.shape[0] // ((h * w) // (h_sp * w_sp))
437
437
  img = img_splits_hw.view(b_merged, h // h_sp, w // w_sp, h_sp, w_sp, -1)
438
438
  # contiguous() required to ensure the tensor has a contiguous memory layout
439
439
  # after permute, allowing the subsequent view operation to work correctly.
@@ -0,0 +1 @@
1
+ from .pytorch import *
@@ -11,9 +11,9 @@ from torch import nn
11
11
 
12
12
  from doctr.datasets import VOCABS
13
13
  from doctr.models.modules.transformer import EncoderBlock
14
- from doctr.models.modules.vision_transformer.pytorch import PatchEmbedding
14
+ from doctr.models.modules.vision_transformer import PatchEmbedding
15
15
 
16
- from ...utils.pytorch import load_pretrained_params
16
+ from ...utils import load_pretrained_params
17
17
 
18
18
  __all__ = ["vit_s", "vit_b"]
19
19
 
@@ -5,7 +5,7 @@
5
5
 
6
6
  from typing import Any
7
7
 
8
- from doctr.file_utils import is_tf_available, is_torch_available
8
+ from doctr.models.utils import _CompiledModule
9
9
 
10
10
  from .. import classification
11
11
  from ..preprocessor import PreProcessor
@@ -30,11 +30,10 @@ ARCHS: list[str] = [
30
30
  "vgg16_bn_r",
31
31
  "vit_s",
32
32
  "vit_b",
33
+ "vip_tiny",
34
+ "vip_base",
33
35
  ]
34
36
 
35
- if is_torch_available():
36
- ARCHS.extend(["vip_tiny", "vip_base"])
37
-
38
37
  ORIENTATION_ARCHS: list[str] = ["mobilenet_v3_small_crop_orientation", "mobilenet_v3_small_page_orientation"]
39
38
 
40
39
 
@@ -52,12 +51,8 @@ def _orientation_predictor(
52
51
  # Load directly classifier from backbone
53
52
  _model = classification.__dict__[arch](pretrained=pretrained)
54
53
  else:
55
- allowed_archs = [classification.MobileNetV3]
56
- if is_torch_available():
57
- # Adding the type for torch compiled models to the allowed architectures
58
- from doctr.models.utils import _CompiledModule
59
-
60
- allowed_archs.append(_CompiledModule)
54
+ # Adding the type for torch compiled models to the allowed architectures
55
+ allowed_archs = [classification.MobileNetV3, _CompiledModule]
61
56
 
62
57
  if not isinstance(arch, tuple(allowed_archs)):
63
58
  raise ValueError(f"unknown architecture: {type(arch)}")
@@ -66,7 +61,7 @@ def _orientation_predictor(
66
61
  kwargs["mean"] = kwargs.get("mean", _model.cfg["mean"])
67
62
  kwargs["std"] = kwargs.get("std", _model.cfg["std"])
68
63
  kwargs["batch_size"] = kwargs.get("batch_size", 128 if model_type == "crop" else 4)
69
- input_shape = _model.cfg["input_shape"][:-1] if is_tf_available() else _model.cfg["input_shape"][1:]
64
+ input_shape = _model.cfg["input_shape"][1:]
70
65
  predictor = OrientationPredictor(
71
66
  PreProcessor(input_shape, preserve_aspect_ratio=True, symmetric_pad=True, **kwargs), _model
72
67
  )
@@ -0,0 +1,2 @@
1
+ from .base import *
2
+ from .pytorch import *
@@ -53,7 +53,7 @@ class DetectionPostProcessor(NestedObject):
53
53
 
54
54
  else:
55
55
  mask: np.ndarray = np.zeros((h, w), np.int32)
56
- cv2.fillPoly(mask, [points.astype(np.int32)], 1.0) # type: ignore[call-overload]
56
+ cv2.fillPoly(mask, [points.astype(np.int32)], 1.0)
57
57
  product = pred * mask
58
58
  return np.sum(product) / np.count_nonzero(product)
59
59