python-doctr 0.10.0__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (162) hide show
  1. doctr/contrib/__init__.py +1 -0
  2. doctr/contrib/artefacts.py +7 -9
  3. doctr/contrib/base.py +8 -17
  4. doctr/datasets/cord.py +8 -7
  5. doctr/datasets/datasets/__init__.py +4 -4
  6. doctr/datasets/datasets/base.py +16 -16
  7. doctr/datasets/datasets/pytorch.py +12 -12
  8. doctr/datasets/datasets/tensorflow.py +10 -10
  9. doctr/datasets/detection.py +6 -9
  10. doctr/datasets/doc_artefacts.py +3 -4
  11. doctr/datasets/funsd.py +7 -6
  12. doctr/datasets/generator/__init__.py +4 -4
  13. doctr/datasets/generator/base.py +16 -17
  14. doctr/datasets/generator/pytorch.py +1 -3
  15. doctr/datasets/generator/tensorflow.py +1 -3
  16. doctr/datasets/ic03.py +4 -5
  17. doctr/datasets/ic13.py +4 -5
  18. doctr/datasets/iiit5k.py +6 -5
  19. doctr/datasets/iiithws.py +4 -5
  20. doctr/datasets/imgur5k.py +6 -5
  21. doctr/datasets/loader.py +4 -7
  22. doctr/datasets/mjsynth.py +6 -5
  23. doctr/datasets/ocr.py +3 -4
  24. doctr/datasets/orientation.py +3 -4
  25. doctr/datasets/recognition.py +3 -4
  26. doctr/datasets/sroie.py +6 -5
  27. doctr/datasets/svhn.py +6 -5
  28. doctr/datasets/svt.py +4 -5
  29. doctr/datasets/synthtext.py +4 -5
  30. doctr/datasets/utils.py +34 -29
  31. doctr/datasets/vocabs.py +17 -7
  32. doctr/datasets/wildreceipt.py +14 -10
  33. doctr/file_utils.py +2 -7
  34. doctr/io/elements.py +59 -79
  35. doctr/io/html.py +1 -3
  36. doctr/io/image/__init__.py +3 -3
  37. doctr/io/image/base.py +2 -5
  38. doctr/io/image/pytorch.py +3 -12
  39. doctr/io/image/tensorflow.py +2 -11
  40. doctr/io/pdf.py +5 -7
  41. doctr/io/reader.py +5 -11
  42. doctr/models/_utils.py +14 -22
  43. doctr/models/builder.py +30 -48
  44. doctr/models/classification/magc_resnet/__init__.py +3 -3
  45. doctr/models/classification/magc_resnet/pytorch.py +10 -13
  46. doctr/models/classification/magc_resnet/tensorflow.py +8 -11
  47. doctr/models/classification/mobilenet/__init__.py +3 -3
  48. doctr/models/classification/mobilenet/pytorch.py +5 -17
  49. doctr/models/classification/mobilenet/tensorflow.py +8 -21
  50. doctr/models/classification/predictor/__init__.py +4 -4
  51. doctr/models/classification/predictor/pytorch.py +6 -8
  52. doctr/models/classification/predictor/tensorflow.py +6 -8
  53. doctr/models/classification/resnet/__init__.py +4 -4
  54. doctr/models/classification/resnet/pytorch.py +21 -31
  55. doctr/models/classification/resnet/tensorflow.py +20 -31
  56. doctr/models/classification/textnet/__init__.py +3 -3
  57. doctr/models/classification/textnet/pytorch.py +10 -17
  58. doctr/models/classification/textnet/tensorflow.py +8 -15
  59. doctr/models/classification/vgg/__init__.py +3 -3
  60. doctr/models/classification/vgg/pytorch.py +5 -7
  61. doctr/models/classification/vgg/tensorflow.py +9 -12
  62. doctr/models/classification/vit/__init__.py +3 -3
  63. doctr/models/classification/vit/pytorch.py +8 -14
  64. doctr/models/classification/vit/tensorflow.py +6 -12
  65. doctr/models/classification/zoo.py +19 -14
  66. doctr/models/core.py +3 -3
  67. doctr/models/detection/_utils/__init__.py +4 -4
  68. doctr/models/detection/_utils/base.py +4 -7
  69. doctr/models/detection/_utils/pytorch.py +1 -5
  70. doctr/models/detection/_utils/tensorflow.py +1 -5
  71. doctr/models/detection/core.py +2 -8
  72. doctr/models/detection/differentiable_binarization/__init__.py +4 -4
  73. doctr/models/detection/differentiable_binarization/base.py +7 -17
  74. doctr/models/detection/differentiable_binarization/pytorch.py +27 -30
  75. doctr/models/detection/differentiable_binarization/tensorflow.py +15 -25
  76. doctr/models/detection/fast/__init__.py +4 -4
  77. doctr/models/detection/fast/base.py +6 -14
  78. doctr/models/detection/fast/pytorch.py +24 -31
  79. doctr/models/detection/fast/tensorflow.py +14 -26
  80. doctr/models/detection/linknet/__init__.py +4 -4
  81. doctr/models/detection/linknet/base.py +6 -15
  82. doctr/models/detection/linknet/pytorch.py +24 -27
  83. doctr/models/detection/linknet/tensorflow.py +14 -23
  84. doctr/models/detection/predictor/__init__.py +5 -5
  85. doctr/models/detection/predictor/pytorch.py +6 -7
  86. doctr/models/detection/predictor/tensorflow.py +5 -6
  87. doctr/models/detection/zoo.py +27 -7
  88. doctr/models/factory/hub.py +3 -7
  89. doctr/models/kie_predictor/__init__.py +5 -5
  90. doctr/models/kie_predictor/base.py +4 -5
  91. doctr/models/kie_predictor/pytorch.py +18 -19
  92. doctr/models/kie_predictor/tensorflow.py +13 -14
  93. doctr/models/modules/layers/__init__.py +3 -3
  94. doctr/models/modules/layers/pytorch.py +6 -9
  95. doctr/models/modules/layers/tensorflow.py +5 -7
  96. doctr/models/modules/transformer/__init__.py +3 -3
  97. doctr/models/modules/transformer/pytorch.py +12 -13
  98. doctr/models/modules/transformer/tensorflow.py +9 -10
  99. doctr/models/modules/vision_transformer/__init__.py +3 -3
  100. doctr/models/modules/vision_transformer/pytorch.py +2 -3
  101. doctr/models/modules/vision_transformer/tensorflow.py +3 -3
  102. doctr/models/predictor/__init__.py +5 -5
  103. doctr/models/predictor/base.py +28 -29
  104. doctr/models/predictor/pytorch.py +12 -13
  105. doctr/models/predictor/tensorflow.py +8 -9
  106. doctr/models/preprocessor/__init__.py +4 -4
  107. doctr/models/preprocessor/pytorch.py +13 -17
  108. doctr/models/preprocessor/tensorflow.py +10 -14
  109. doctr/models/recognition/core.py +3 -7
  110. doctr/models/recognition/crnn/__init__.py +4 -4
  111. doctr/models/recognition/crnn/pytorch.py +20 -28
  112. doctr/models/recognition/crnn/tensorflow.py +11 -23
  113. doctr/models/recognition/master/__init__.py +3 -3
  114. doctr/models/recognition/master/base.py +3 -7
  115. doctr/models/recognition/master/pytorch.py +22 -24
  116. doctr/models/recognition/master/tensorflow.py +12 -22
  117. doctr/models/recognition/parseq/__init__.py +3 -3
  118. doctr/models/recognition/parseq/base.py +3 -7
  119. doctr/models/recognition/parseq/pytorch.py +26 -26
  120. doctr/models/recognition/parseq/tensorflow.py +16 -22
  121. doctr/models/recognition/predictor/__init__.py +5 -5
  122. doctr/models/recognition/predictor/_utils.py +7 -10
  123. doctr/models/recognition/predictor/pytorch.py +6 -6
  124. doctr/models/recognition/predictor/tensorflow.py +5 -6
  125. doctr/models/recognition/sar/__init__.py +4 -4
  126. doctr/models/recognition/sar/pytorch.py +20 -21
  127. doctr/models/recognition/sar/tensorflow.py +12 -21
  128. doctr/models/recognition/utils.py +5 -10
  129. doctr/models/recognition/vitstr/__init__.py +4 -4
  130. doctr/models/recognition/vitstr/base.py +3 -7
  131. doctr/models/recognition/vitstr/pytorch.py +18 -20
  132. doctr/models/recognition/vitstr/tensorflow.py +12 -20
  133. doctr/models/recognition/zoo.py +22 -11
  134. doctr/models/utils/__init__.py +4 -4
  135. doctr/models/utils/pytorch.py +14 -17
  136. doctr/models/utils/tensorflow.py +17 -16
  137. doctr/models/zoo.py +1 -5
  138. doctr/transforms/functional/__init__.py +3 -3
  139. doctr/transforms/functional/base.py +4 -11
  140. doctr/transforms/functional/pytorch.py +20 -28
  141. doctr/transforms/functional/tensorflow.py +10 -22
  142. doctr/transforms/modules/__init__.py +4 -4
  143. doctr/transforms/modules/base.py +48 -55
  144. doctr/transforms/modules/pytorch.py +58 -22
  145. doctr/transforms/modules/tensorflow.py +18 -32
  146. doctr/utils/common_types.py +8 -9
  147. doctr/utils/data.py +8 -12
  148. doctr/utils/fonts.py +2 -7
  149. doctr/utils/geometry.py +16 -47
  150. doctr/utils/metrics.py +17 -37
  151. doctr/utils/multithreading.py +4 -6
  152. doctr/utils/reconstitution.py +9 -13
  153. doctr/utils/repr.py +2 -3
  154. doctr/utils/visualization.py +16 -29
  155. doctr/version.py +1 -1
  156. {python_doctr-0.10.0.dist-info → python_doctr-0.11.0.dist-info}/METADATA +54 -52
  157. python_doctr-0.11.0.dist-info/RECORD +173 -0
  158. {python_doctr-0.10.0.dist-info → python_doctr-0.11.0.dist-info}/WHEEL +1 -1
  159. python_doctr-0.10.0.dist-info/RECORD +0 -173
  160. {python_doctr-0.10.0.dist-info → python_doctr-0.11.0.dist-info}/LICENSE +0 -0
  161. {python_doctr-0.10.0.dist-info → python_doctr-0.11.0.dist-info}/top_level.txt +0 -0
  162. {python_doctr-0.10.0.dist-info → python_doctr-0.11.0.dist-info}/zip-safe +0 -0
@@ -1,10 +1,10 @@
1
- # Copyright (C) 2021-2024, Mindee.
1
+ # Copyright (C) 2021-2025, Mindee.
2
2
 
3
3
  # This program is licensed under the Apache License 2.0.
4
4
  # See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
5
5
  import colorsys
6
6
  from copy import deepcopy
7
- from typing import Any, Dict, List, Optional, Tuple, Union
7
+ from typing import Any
8
8
 
9
9
  import cv2
10
10
  import matplotlib.patches as patches
@@ -19,9 +19,9 @@ __all__ = ["visualize_page", "visualize_kie_page", "draw_boxes"]
19
19
 
20
20
  def rect_patch(
21
21
  geometry: BoundingBox,
22
- page_dimensions: Tuple[int, int],
23
- label: Optional[str] = None,
24
- color: Tuple[float, float, float] = (0, 0, 0),
22
+ page_dimensions: tuple[int, int],
23
+ label: str | None = None,
24
+ color: tuple[float, float, float] = (0, 0, 0),
25
25
  alpha: float = 0.3,
26
26
  linewidth: int = 2,
27
27
  fill: bool = True,
@@ -30,7 +30,6 @@ def rect_patch(
30
30
  """Create a matplotlib rectangular patch for the element
31
31
 
32
32
  Args:
33
- ----
34
33
  geometry: bounding box of the element
35
34
  page_dimensions: dimensions of the Page in format (height, width)
36
35
  label: label to display when hovered
@@ -41,7 +40,6 @@ def rect_patch(
41
40
  preserve_aspect_ratio: pass True if you passed True to the predictor
42
41
 
43
42
  Returns:
44
- -------
45
43
  a rectangular Patch
46
44
  """
47
45
  if len(geometry) != 2 or any(not isinstance(elt, tuple) or len(elt) != 2 for elt in geometry):
@@ -70,9 +68,9 @@ def rect_patch(
70
68
 
71
69
  def polygon_patch(
72
70
  geometry: np.ndarray,
73
- page_dimensions: Tuple[int, int],
74
- label: Optional[str] = None,
75
- color: Tuple[float, float, float] = (0, 0, 0),
71
+ page_dimensions: tuple[int, int],
72
+ label: str | None = None,
73
+ color: tuple[float, float, float] = (0, 0, 0),
76
74
  alpha: float = 0.3,
77
75
  linewidth: int = 2,
78
76
  fill: bool = True,
@@ -81,7 +79,6 @@ def polygon_patch(
81
79
  """Create a matplotlib polygon patch for the element
82
80
 
83
81
  Args:
84
- ----
85
82
  geometry: bounding box of the element
86
83
  page_dimensions: dimensions of the Page in format (height, width)
87
84
  label: label to display when hovered
@@ -92,7 +89,6 @@ def polygon_patch(
92
89
  preserve_aspect_ratio: pass True if you passed True to the predictor
93
90
 
94
91
  Returns:
95
- -------
96
92
  a polygon Patch
97
93
  """
98
94
  if not geometry.shape == (4, 2):
@@ -114,20 +110,18 @@ def polygon_patch(
114
110
 
115
111
 
116
112
  def create_obj_patch(
117
- geometry: Union[BoundingBox, Polygon4P, np.ndarray],
118
- page_dimensions: Tuple[int, int],
113
+ geometry: BoundingBox | Polygon4P | np.ndarray,
114
+ page_dimensions: tuple[int, int],
119
115
  **kwargs: Any,
120
116
  ) -> patches.Patch:
121
117
  """Create a matplotlib patch for the element
122
118
 
123
119
  Args:
124
- ----
125
120
  geometry: bounding box (straight or rotated) of the element
126
121
  page_dimensions: dimensions of the page in format (height, width)
127
122
  **kwargs: keyword arguments for the patch
128
123
 
129
124
  Returns:
130
- -------
131
125
  a matplotlib Patch
132
126
  """
133
127
  if isinstance(geometry, tuple):
@@ -140,15 +134,13 @@ def create_obj_patch(
140
134
  raise ValueError("invalid geometry format")
141
135
 
142
136
 
143
- def get_colors(num_colors: int) -> List[Tuple[float, float, float]]:
137
+ def get_colors(num_colors: int) -> list[tuple[float, float, float]]:
144
138
  """Generate num_colors color for matplotlib
145
139
 
146
140
  Args:
147
- ----
148
141
  num_colors: number of colors to generate
149
142
 
150
143
  Returns:
151
- -------
152
144
  colors: list of generated colors
153
145
  """
154
146
  colors = []
@@ -161,7 +153,7 @@ def get_colors(num_colors: int) -> List[Tuple[float, float, float]]:
161
153
 
162
154
 
163
155
  def visualize_page(
164
- page: Dict[str, Any],
156
+ page: dict[str, Any],
165
157
  image: np.ndarray,
166
158
  words_only: bool = True,
167
159
  display_artefacts: bool = True,
@@ -183,7 +175,6 @@ def visualize_page(
183
175
  >>> plt.show()
184
176
 
185
177
  Args:
186
- ----
187
178
  page: the exported Page of a Document
188
179
  image: np array of the page, needs to have the same shape than page['dimensions']
189
180
  words_only: whether only words should be displayed
@@ -194,7 +185,6 @@ def visualize_page(
194
185
  **kwargs: keyword arguments for the polygon patch
195
186
 
196
187
  Returns:
197
- -------
198
188
  the matplotlib figure
199
189
  """
200
190
  # Get proper scale and aspect ratio
@@ -207,7 +197,7 @@ def visualize_page(
207
197
  ax.axis("off")
208
198
 
209
199
  if interactive:
210
- artists: List[patches.Patch] = [] # instantiate an empty list of patches (to be drawn on the page)
200
+ artists: list[patches.Patch] = [] # instantiate an empty list of patches (to be drawn on the page)
211
201
 
212
202
  for block in page["blocks"]:
213
203
  if not words_only:
@@ -287,7 +277,7 @@ def visualize_page(
287
277
 
288
278
 
289
279
  def visualize_kie_page(
290
- page: Dict[str, Any],
280
+ page: dict[str, Any],
291
281
  image: np.ndarray,
292
282
  words_only: bool = False,
293
283
  display_artefacts: bool = True,
@@ -309,7 +299,6 @@ def visualize_kie_page(
309
299
  >>> plt.show()
310
300
 
311
301
  Args:
312
- ----
313
302
  page: the exported Page of a Document
314
303
  image: np array of the page, needs to have the same shape than page['dimensions']
315
304
  words_only: whether only words should be displayed
@@ -320,7 +309,6 @@ def visualize_kie_page(
320
309
  **kwargs: keyword arguments for the polygon patch
321
310
 
322
311
  Returns:
323
- -------
324
312
  the matplotlib figure
325
313
  """
326
314
  # Get proper scale and aspect ratio
@@ -333,7 +321,7 @@ def visualize_kie_page(
333
321
  ax.axis("off")
334
322
 
335
323
  if interactive:
336
- artists: List[patches.Patch] = [] # instantiate an empty list of patches (to be drawn on the page)
324
+ artists: list[patches.Patch] = [] # instantiate an empty list of patches (to be drawn on the page)
337
325
 
338
326
  colors = {k: color for color, k in zip(get_colors(len(page["predictions"])), page["predictions"])}
339
327
  for key, value in page["predictions"].items():
@@ -363,11 +351,10 @@ def visualize_kie_page(
363
351
  return fig
364
352
 
365
353
 
366
- def draw_boxes(boxes: np.ndarray, image: np.ndarray, color: Optional[Tuple[int, int, int]] = None, **kwargs) -> None:
354
+ def draw_boxes(boxes: np.ndarray, image: np.ndarray, color: tuple[int, int, int] | None = None, **kwargs) -> None:
367
355
  """Draw an array of relative straight boxes on an image
368
356
 
369
357
  Args:
370
- ----
371
358
  boxes: array of relative boxes, of shape (*, 4)
372
359
  image: np array, float32 or uint8
373
360
  color: color to use for bounding box edges
doctr/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = 'v0.10.0'
1
+ __version__ = 'v0.11.0'
@@ -1,10 +1,10 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: python-doctr
3
- Version: 0.10.0
3
+ Version: 0.11.0
4
4
  Summary: Document Text Recognition (docTR): deep Learning for high-performance OCR on documents.
5
5
  Author-email: Mindee <contact@mindee.com>
6
6
  Maintainer: François-Guillaume Fernandez, Charles Gaillard, Olivier Dulcy, Felix Dittrich
7
- License: Apache License
7
+ License: Apache License
8
8
  Version 2.0, January 2004
9
9
  http://www.apache.org/licenses/
10
10
 
@@ -219,11 +219,11 @@ Classifier: License :: OSI Approved :: Apache Software License
219
219
  Classifier: Natural Language :: English
220
220
  Classifier: Operating System :: OS Independent
221
221
  Classifier: Programming Language :: Python :: 3
222
- Classifier: Programming Language :: Python :: 3.9
223
222
  Classifier: Programming Language :: Python :: 3.10
224
223
  Classifier: Programming Language :: Python :: 3.11
224
+ Classifier: Programming Language :: Python :: 3.12
225
225
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
226
- Requires-Python: <4,>=3.9.0
226
+ Requires-Python: <4,>=3.10.0
227
227
  Description-Content-Type: text/markdown
228
228
  License-File: LICENSE
229
229
  Requires-Dist: numpy<3.0.0,>=1.16.0
@@ -240,10 +240,44 @@ Requires-Dist: Pillow>=9.2.0
240
240
  Requires-Dist: defusedxml>=0.7.0
241
241
  Requires-Dist: anyascii>=0.3.2
242
242
  Requires-Dist: tqdm>=4.30.0
243
+ Provides-Extra: tf
244
+ Requires-Dist: tensorflow[and-cuda]<3.0.0,>=2.15.0; sys_platform == "linux" and extra == "tf"
245
+ Requires-Dist: tensorflow<3.0.0,>=2.15.0; sys_platform != "linux" and extra == "tf"
246
+ Requires-Dist: tf-keras<3.0.0,>=2.15.0; extra == "tf"
247
+ Requires-Dist: tf2onnx<2.0.0,>=1.16.0; extra == "tf"
248
+ Provides-Extra: torch
249
+ Requires-Dist: torch<3.0.0,>=2.0.0; extra == "torch"
250
+ Requires-Dist: torchvision>=0.15.0; extra == "torch"
251
+ Requires-Dist: onnx<3.0.0,>=1.12.0; extra == "torch"
252
+ Provides-Extra: html
253
+ Requires-Dist: weasyprint>=55.0; extra == "html"
254
+ Provides-Extra: viz
255
+ Requires-Dist: matplotlib>=3.1.0; extra == "viz"
256
+ Requires-Dist: mplcursors>=0.3; extra == "viz"
243
257
  Provides-Extra: contrib
244
258
  Requires-Dist: onnxruntime>=1.11.0; extra == "contrib"
259
+ Provides-Extra: testing
260
+ Requires-Dist: pytest>=5.3.2; extra == "testing"
261
+ Requires-Dist: coverage[toml]>=4.5.4; extra == "testing"
262
+ Requires-Dist: onnxruntime>=1.11.0; extra == "testing"
263
+ Requires-Dist: requests>=2.20.0; extra == "testing"
264
+ Requires-Dist: psutil>=5.9.5; extra == "testing"
265
+ Provides-Extra: quality
266
+ Requires-Dist: ruff>=0.1.5; extra == "quality"
267
+ Requires-Dist: mypy>=0.812; extra == "quality"
268
+ Requires-Dist: pre-commit>=2.17.0; extra == "quality"
269
+ Provides-Extra: docs
270
+ Requires-Dist: sphinx!=3.5.0,>=3.0.0; extra == "docs"
271
+ Requires-Dist: sphinxemoji>=0.1.8; extra == "docs"
272
+ Requires-Dist: sphinx-copybutton>=0.3.1; extra == "docs"
273
+ Requires-Dist: docutils<0.22; extra == "docs"
274
+ Requires-Dist: recommonmark>=0.7.1; extra == "docs"
275
+ Requires-Dist: sphinx-markdown-tables>=0.0.15; extra == "docs"
276
+ Requires-Dist: sphinx-tabs>=3.3.0; extra == "docs"
277
+ Requires-Dist: furo>=2022.3.4; extra == "docs"
245
278
  Provides-Extra: dev
246
- Requires-Dist: tensorflow<3.0.0,>=2.15.0; extra == "dev"
279
+ Requires-Dist: tensorflow[and-cuda]<3.0.0,>=2.15.0; sys_platform == "linux" and extra == "dev"
280
+ Requires-Dist: tensorflow<3.0.0,>=2.15.0; sys_platform != "linux" and extra == "dev"
247
281
  Requires-Dist: tf-keras<3.0.0,>=2.15.0; extra == "dev"
248
282
  Requires-Dist: tf2onnx<2.0.0,>=1.16.0; extra == "dev"
249
283
  Requires-Dist: torch<3.0.0,>=2.0.0; extra == "dev"
@@ -268,44 +302,12 @@ Requires-Dist: recommonmark>=0.7.1; extra == "dev"
268
302
  Requires-Dist: sphinx-markdown-tables>=0.0.15; extra == "dev"
269
303
  Requires-Dist: sphinx-tabs>=3.3.0; extra == "dev"
270
304
  Requires-Dist: furo>=2022.3.4; extra == "dev"
271
- Provides-Extra: docs
272
- Requires-Dist: sphinx!=3.5.0,>=3.0.0; extra == "docs"
273
- Requires-Dist: sphinxemoji>=0.1.8; extra == "docs"
274
- Requires-Dist: sphinx-copybutton>=0.3.1; extra == "docs"
275
- Requires-Dist: docutils<0.22; extra == "docs"
276
- Requires-Dist: recommonmark>=0.7.1; extra == "docs"
277
- Requires-Dist: sphinx-markdown-tables>=0.0.15; extra == "docs"
278
- Requires-Dist: sphinx-tabs>=3.3.0; extra == "docs"
279
- Requires-Dist: furo>=2022.3.4; extra == "docs"
280
- Provides-Extra: html
281
- Requires-Dist: weasyprint>=55.0; extra == "html"
282
- Provides-Extra: quality
283
- Requires-Dist: ruff>=0.1.5; extra == "quality"
284
- Requires-Dist: mypy>=0.812; extra == "quality"
285
- Requires-Dist: pre-commit>=2.17.0; extra == "quality"
286
- Provides-Extra: testing
287
- Requires-Dist: pytest>=5.3.2; extra == "testing"
288
- Requires-Dist: coverage[toml]>=4.5.4; extra == "testing"
289
- Requires-Dist: onnxruntime>=1.11.0; extra == "testing"
290
- Requires-Dist: requests>=2.20.0; extra == "testing"
291
- Requires-Dist: psutil>=5.9.5; extra == "testing"
292
- Provides-Extra: tf
293
- Requires-Dist: tensorflow<3.0.0,>=2.15.0; extra == "tf"
294
- Requires-Dist: tf-keras<3.0.0,>=2.15.0; extra == "tf"
295
- Requires-Dist: tf2onnx<2.0.0,>=1.16.0; extra == "tf"
296
- Provides-Extra: torch
297
- Requires-Dist: torch<3.0.0,>=2.0.0; extra == "torch"
298
- Requires-Dist: torchvision>=0.15.0; extra == "torch"
299
- Requires-Dist: onnx<3.0.0,>=1.12.0; extra == "torch"
300
- Provides-Extra: viz
301
- Requires-Dist: matplotlib>=3.1.0; extra == "viz"
302
- Requires-Dist: mplcursors>=0.3; extra == "viz"
303
305
 
304
306
  <p align="center">
305
307
  <img src="https://github.com/mindee/doctr/raw/main/docs/images/Logo_doctr.gif" width="40%">
306
308
  </p>
307
309
 
308
- [![Slack Icon](https://img.shields.io/badge/Slack-Community-4A154B?style=flat-square&logo=slack&logoColor=white)](https://slack.mindee.com) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) ![Build Status](https://github.com/mindee/doctr/workflows/builds/badge.svg) [![Docker Images](https://img.shields.io/badge/Docker-4287f5?style=flat&logo=docker&logoColor=white)](https://github.com/mindee/doctr/pkgs/container/doctr) [![codecov](https://codecov.io/gh/mindee/doctr/branch/main/graph/badge.svg?token=577MO567NM)](https://codecov.io/gh/mindee/doctr) [![CodeFactor](https://www.codefactor.io/repository/github/mindee/doctr/badge?s=bae07db86bb079ce9d6542315b8c6e70fa708a7e)](https://www.codefactor.io/repository/github/mindee/doctr) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/340a76749b634586a498e1c0ab998f08)](https://app.codacy.com/gh/mindee/doctr?utm_source=github.com&utm_medium=referral&utm_content=mindee/doctr&utm_campaign=Badge_Grade) [![Doc Status](https://github.com/mindee/doctr/workflows/doc-status/badge.svg)](https://mindee.github.io/doctr) [![Pypi](https://img.shields.io/badge/pypi-v0.9.0-blue.svg)](https://pypi.org/project/python-doctr/) [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/mindee/doctr) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mindee/notebooks/blob/main/doctr/quicktour.ipynb)
310
+ [![Slack Icon](https://img.shields.io/badge/Slack-Community-4A154B?style=flat-square&logo=slack&logoColor=white)](https://slack.mindee.com) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) ![Build Status](https://github.com/mindee/doctr/workflows/builds/badge.svg) [![Docker Images](https://img.shields.io/badge/Docker-4287f5?style=flat&logo=docker&logoColor=white)](https://github.com/mindee/doctr/pkgs/container/doctr) [![codecov](https://codecov.io/gh/mindee/doctr/branch/main/graph/badge.svg?token=577MO567NM)](https://codecov.io/gh/mindee/doctr) [![CodeFactor](https://www.codefactor.io/repository/github/mindee/doctr/badge?s=bae07db86bb079ce9d6542315b8c6e70fa708a7e)](https://www.codefactor.io/repository/github/mindee/doctr) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/340a76749b634586a498e1c0ab998f08)](https://app.codacy.com/gh/mindee/doctr?utm_source=github.com&utm_medium=referral&utm_content=mindee/doctr&utm_campaign=Badge_Grade) [![Doc Status](https://github.com/mindee/doctr/workflows/doc-status/badge.svg)](https://mindee.github.io/doctr) [![Pypi](https://img.shields.io/badge/pypi-v0.11.0-blue.svg)](https://pypi.org/project/python-doctr/) [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/mindee/doctr) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mindee/notebooks/blob/main/doctr/quicktour.ipynb) [![Gurubase](https://img.shields.io/badge/Gurubase-Ask%20docTR%20Guru-006BFF)](https://gurubase.io/g/doctr)
309
311
 
310
312
 
311
313
  **Optical Character Recognition made seamless & accessible to anyone, powered by TensorFlow 2 & PyTorch**
@@ -438,7 +440,7 @@ The KIE predictor results per page are in a dictionary format with each key repr
438
440
 
439
441
  ### Prerequisites
440
442
 
441
- Python 3.9 (or higher) and [pip](https://pip.pypa.io/en/stable/) are required to install docTR.
443
+ Python 3.10 (or higher) and [pip](https://pip.pypa.io/en/stable/) are required to install docTR.
442
444
 
443
445
  ### Latest release
444
446
 
@@ -557,37 +559,37 @@ Check out our [TensorFlow.js demo](https://github.com/mindee/doctr-tfjs-demo) to
557
559
 
558
560
  ### Docker container
559
561
 
560
- [We offer Docker container support for easy testing and deployment](https://github.com/mindee/doctr/pkgs/container/doctr).
562
+ We offer Docker container support for easy testing and deployment. [Here are the available docker tags.](https://github.com/mindee/doctr/pkgs/container/doctr).
561
563
 
562
564
  #### Using GPU with docTR Docker Images
563
565
 
564
- The docTR Docker images are GPU-ready and based on CUDA `11.8`.
565
- However, to use GPU support with these Docker images, please ensure that Docker is configured to use your GPU.
566
+ The docTR Docker images are GPU-ready and based on CUDA `12.2`. Make sure your host is **at least `12.2`**, otherwise Torch or TensorFlow won't be able to initialize the GPU.
567
+ Please ensure that Docker is configured to use your GPU.
566
568
 
567
569
  To verify and configure GPU support for Docker, please follow the instructions provided in the [NVIDIA Container Toolkit Installation Guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html).
568
570
 
569
571
  Once Docker is configured to use GPUs, you can run docTR Docker containers with GPU support:
570
572
 
571
573
  ```shell
572
- docker run -it --gpus all ghcr.io/mindee/doctr:tf-py3.8.18-gpu-2023-09 bash
574
+ docker run -it --gpus all ghcr.io/mindee/doctr:torch-py3.9.18-2024-10 bash
573
575
  ```
574
576
 
575
577
  #### Available Tags
576
578
 
577
- The Docker images for docTR follow a specific tag nomenclature: `<framework>-py<python_version>-<system>-<doctr_version|YYYY-MM>`. Here's a breakdown of the tag structure:
579
+ The Docker images for docTR follow a specific tag nomenclature: `<deps>-py<python_version>-<doctr_version|YYYY-MM>`. Here's a breakdown of the tag structure:
578
580
 
579
- - `<framework>`: `tf` (TensorFlow) or `torch` (PyTorch).
580
- - `<python_version>`: `3.8.18`, `3.9.18`, or `3.10.13`.
581
- - `<system>`: `cpu` or `gpu`
582
- - `<doctr_version>`: a tag >= `v0.7.1`
583
- - `<YYYY-MM>`: e.g. `2023-09`
581
+ - `<deps>`: `tf`, `torch`, `tf-viz-html-contrib` or `torch-viz-html-contrib`.
582
+ - `<python_version>`: `3.9.18`, `3.10.13` or `3.11.8`.
583
+ - `<doctr_version>`: a tag >= `v0.11.0`
584
+ - `<YYYY-MM>`: e.g. `2014-10`
584
585
 
585
586
  Here are examples of different image tags:
586
587
 
587
588
  | Tag | Description |
588
589
  |----------------------------|---------------------------------------------------|
589
- | `tf-py3.8.18-cpu-v0.7.1` | TensorFlow version `3.8.18` with docTR `v0.7.1`. |
590
- | `torch-py3.9.18-gpu-2023-09`| PyTorch version `3.9.18` with GPU support and a monthly build from `2023-09`. |
590
+ | `tf-py3.10.13-v0.11.0` | TensorFlow version `3.10.13` with docTR `v0.11.0`. |
591
+ | `torch-viz-html-contrib-py3.11.8-2024-10` | Torch with extra dependencies version `3.11.8` from latest commit on `main` in `2024-10`. |
592
+ | `torch-py3.11.8-2024-10`| PyTorch version `3.11.8` from latest commit on `main` in `2024-10`. |
591
593
 
592
594
  #### Building Docker Images Locally
593
595
 
@@ -0,0 +1,173 @@
1
+ doctr/__init__.py,sha256=q-1tv1hf-BRaZtxsrbPVxYNL6ZtyIOSDvlZOSt85TmU,170
2
+ doctr/file_utils.py,sha256=IACtJvyOWm54l2zLb_K3uszYTomGPFFA6pMyokvCMxU,4227
3
+ doctr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ doctr/version.py,sha256=e0CpFyMYqpqhIfpkFC1B5x2yb-TjoiRUJv_M-6zAUlU,24
5
+ doctr/contrib/__init__.py,sha256=EKeAGa3TOuJVWR4H_HJvuKO5VPEnJWXW305JMw3ufic,40
6
+ doctr/contrib/artefacts.py,sha256=sIVEx21GIXKCaYG4RVRvXRmjyGkAdTI9O8GB8vIw8kc,5291
7
+ doctr/contrib/base.py,sha256=g0VAg51NZNB5etQe3gZy-7kSm_NLzg26j7Jb4v8lLBk,3273
8
+ doctr/datasets/__init__.py,sha256=umI2ABbgWIKuhswl8RGaF6CefFiI8DdEGVb0Kbd8aZA,574
9
+ doctr/datasets/cord.py,sha256=VGN6nWLhhiwrXVRBO_oAE_G9TD7Kps9MAjCyiV3WrKM,5270
10
+ doctr/datasets/detection.py,sha256=CXz_qzMw5SjjhEGtgl83YZIpN1sLXUqDKXm1rAfQNBU,3531
11
+ doctr/datasets/doc_artefacts.py,sha256=h8NtwwZmC_xIlpJyJRi83dVuOFZPF_FhtK1v7f7aXlY,3230
12
+ doctr/datasets/funsd.py,sha256=-87Di2sQC41kh60ygQ8_M3Y7DRUhW4K6a6Im7DRrzf0,4698
13
+ doctr/datasets/ic03.py,sha256=4empqiMDUUDJCwvrJV9xswVRjlzuRTYp7xKUsS4TDGo,5543
14
+ doctr/datasets/ic13.py,sha256=ognjXcWf-fZd_CrNsL5qX5bUTPXqN_wl3r1vped-v5M,4483
15
+ doctr/datasets/iiit5k.py,sha256=bflr8qr17749fC2QdVqmXs0MmffEHA83BFUFi6WgQCE,4405
16
+ doctr/datasets/iiithws.py,sha256=cgitit-ePAtZFTO6tRZFNua7MuCJuXYzgkgHybqjnzk,2768
17
+ doctr/datasets/imgur5k.py,sha256=iD5xiqsoSrA7k9Ux7MElj0f9HIiFYOo-wi12CTFh28Q,7211
18
+ doctr/datasets/loader.py,sha256=NqidaahuHFHbXcZFwyrfQVdTsVGpBbg5to6O9PkFCPo,2769
19
+ doctr/datasets/mjsynth.py,sha256=RuVtTjLtIe0tEOF5AibwmMx8Z_hdu3gE04jxQ1iMCz0,4075
20
+ doctr/datasets/ocr.py,sha256=v_F1rTp03wNLE2nLhIqW7T9SQSTHf2ok1Eq-pTCX_ys,2522
21
+ doctr/datasets/orientation.py,sha256=HaF2dKyK7kDUFluY5B2mBtxw8nyO9UNOsbP1dDrD8Lk,1091
22
+ doctr/datasets/recognition.py,sha256=37WIgV2gWT10emPsQop6FUCDCMnA-yAHPLxMJU4gvcE,1859
23
+ doctr/datasets/sroie.py,sha256=bKrsvw98zcEfOw5Re0F7-06MzJRv-E73t7BYwmSokoA,4430
24
+ doctr/datasets/svhn.py,sha256=n86WrKu5a5zPWrEk9HsCEjMR4s87cYijcVIbqmniWX0,5763
25
+ doctr/datasets/svt.py,sha256=yJ3x8UxKHGXvRfASskXG69NEhEqGzFX4XQ_wp1qN5jg,5020
26
+ doctr/datasets/synthtext.py,sha256=GQTj_rPL9QpqzwtT_IXO4yWh9AIo-k07BEnxav-R95Q,5891
27
+ doctr/datasets/utils.py,sha256=boEQRq91P0yLjH3kAS8EHUo3W3_mKL9sFJTb5yByQtE,8000
28
+ doctr/datasets/vocabs.py,sha256=4QPcHJUyDOfs3cVnhF86Q5DVXCrCc6_Pp6Vnf6R5VI0,4610
29
+ doctr/datasets/wildreceipt.py,sha256=ipnLArx_CGDmmkegm6rQUxVGHzFF0EzJREcezY0ap7s,5213
30
+ doctr/datasets/datasets/__init__.py,sha256=CijDtIRsWiCPpUJmDrJGhkA-RAoEUds7eDCuCBhkJnI,200
31
+ doctr/datasets/datasets/base.py,sha256=wxpVVYPLnuPBYFGIR-5vG_FDEzT8wx57xPaUipiM-6c,4826
32
+ doctr/datasets/datasets/pytorch.py,sha256=KLdB_3jBRxrdCfzbPnr9TceDSfDV92TlpoZDDhz_1W8,1972
33
+ doctr/datasets/datasets/tensorflow.py,sha256=rCCzSwo0e2PJsP6iEs7DP-ym-JCypAzHXKmaJ_ge1os,1966
34
+ doctr/datasets/generator/__init__.py,sha256=CijDtIRsWiCPpUJmDrJGhkA-RAoEUds7eDCuCBhkJnI,200
35
+ doctr/datasets/generator/base.py,sha256=xMymMfWhzQwsfpWGG6a7KxPP8T262ilUWrE0OelT90k,5706
36
+ doctr/datasets/generator/pytorch.py,sha256=-6WD696LOS09Caq0ohHlivK22RTK2Ke3YwqAx8n8qQ8,2116
37
+ doctr/datasets/generator/tensorflow.py,sha256=OZ7uecBSZtiS31UxnB-w6pwr2hWyrZlzq0sD3ZRmk88,2211
38
+ doctr/io/__init__.py,sha256=kS7tKGFvzxOCWBOun-Y8n9CsziwRKNynjwpZEUUI03M,106
39
+ doctr/io/elements.py,sha256=4NDuYC0KX74f2-ZVdqcN1GY66f6IPl_q5fDOmn0xvM4,24739
40
+ doctr/io/html.py,sha256=3wSKH5qD03f2RqxA7D_d8oEBRVdTs4lLVvFZCkR2jrc,698
41
+ doctr/io/pdf.py,sha256=1fbmMKU4XAk4sY-wVES48_5EwMdGz4SiilAFcaPq94M,1308
42
+ doctr/io/reader.py,sha256=yES-J4KVIMqG2X-LHriqLm1R6kyQWcNlOAnfkW9Vjsw,2732
43
+ doctr/io/image/__init__.py,sha256=EOWEVxpbu7V5chNglIMiL0ppj5USs1EhCUYX968trPs,193
44
+ doctr/io/image/base.py,sha256=fD6hfJ6myvxo_5LSUVbzOqExK-aSE0qi_l0MY3Pm0fQ,1681
45
+ doctr/io/image/pytorch.py,sha256=eVQKAgTesQOIYhb4bACRNgQ9ofMwdF-719mxqKesMX4,3171
46
+ doctr/io/image/tensorflow.py,sha256=AvmthcYAt2NLNTdocoYyksVRcln8tAnJmFxXZA_VAGE,3104
47
+ doctr/models/__init__.py,sha256=yn_mXUL8B5L27Uaat1rLGRQHgLR8VLVxzBuPfNuN1YE,124
48
+ doctr/models/_utils.py,sha256=vZOb-A__hvJ7y0u1pEsVD9qQY870nkt2ZmQRhR3Llm0,7358
49
+ doctr/models/builder.py,sha256=UKZBuKutxA0iVM7i8owFdKiwQp9KwyC5Hb4Aio0l0hg,20370
50
+ doctr/models/core.py,sha256=CAgymBtEsVAJ70SHdtaBroM67nbNf6osXN6dWC6qAF4,482
51
+ doctr/models/zoo.py,sha256=89E60O61USlQ9a1bTP5SRY2_Gjk4pTvk3TNoCQC4AiI,9276
52
+ doctr/models/classification/__init__.py,sha256=HeErE29Bs1-91FtS9HqNghHe89XZGzI_11MO_E6GJ7s,154
53
+ doctr/models/classification/zoo.py,sha256=F9Ia4XBHT2dyPRtkSTJM2KzxbA_SkWty6RGy62RWPFQ,4437
54
+ doctr/models/classification/magc_resnet/__init__.py,sha256=JEQlG6MD2UQJ9I5Mtqa8ffKbySiE7A7jigFjJ705URA,172
55
+ doctr/models/classification/magc_resnet/pytorch.py,sha256=NRIEG_h17Iyl5uVBQGBShPu5oug03T_8_K0Z9JA9iVw,5535
56
+ doctr/models/classification/magc_resnet/tensorflow.py,sha256=tpXo9PFljQwssExcY_2y86iR4eJ8PZVLgQMJlLd22hc,6699
57
+ doctr/models/classification/mobilenet/__init__.py,sha256=JEQlG6MD2UQJ9I5Mtqa8ffKbySiE7A7jigFjJ705URA,172
58
+ doctr/models/classification/mobilenet/pytorch.py,sha256=9EXXoD82OdG1n8UzONaHabhOv0pHbvL4SQbPVkTvGus,9238
59
+ doctr/models/classification/mobilenet/tensorflow.py,sha256=qNLGy_3IlY2mBqMpQ7d0ZDIXYBVyRIRn6leD-_MiaIM,15756
60
+ doctr/models/classification/predictor/__init__.py,sha256=CijDtIRsWiCPpUJmDrJGhkA-RAoEUds7eDCuCBhkJnI,200
61
+ doctr/models/classification/predictor/pytorch.py,sha256=EZi9YwJhPjnryLzaAWedqy9vaxoLgW5cFh3xi40u20E,2560
62
+ doctr/models/classification/predictor/tensorflow.py,sha256=TrH5hydWNpNFwoeRaOL6SsLXxxtzTg3_KRrPeYPfHmE,2301
63
+ doctr/models/classification/resnet/__init__.py,sha256=CijDtIRsWiCPpUJmDrJGhkA-RAoEUds7eDCuCBhkJnI,200
64
+ doctr/models/classification/resnet/pytorch.py,sha256=Cpy9Dtl5wvFy6fYFIiRbPxbZPLuyfZ55AqxSDR1aT-o,12308
65
+ doctr/models/classification/resnet/tensorflow.py,sha256=D7YS4dJO1kctQlT8Azn9tpTifX1Vwt10MxOfbvboEZc,13676
66
+ doctr/models/classification/textnet/__init__.py,sha256=JEQlG6MD2UQJ9I5Mtqa8ffKbySiE7A7jigFjJ705URA,172
67
+ doctr/models/classification/textnet/pytorch.py,sha256=sqR0oUSzsITVB70rw9r3ASjSRsGx8EqBP-wg9vRGfa0,10046
68
+ doctr/models/classification/textnet/tensorflow.py,sha256=l6hPXy3-kipptPDUJjccMxYebIb0tzGbWEgIrjYg1RI,9986
69
+ doctr/models/classification/vgg/__init__.py,sha256=JEQlG6MD2UQJ9I5Mtqa8ffKbySiE7A7jigFjJ705URA,172
70
+ doctr/models/classification/vgg/pytorch.py,sha256=QKt_lgrR3WUNZd5o8FZwJpGMqoG6Etp9f1EFznddKZs,3090
71
+ doctr/models/classification/vgg/tensorflow.py,sha256=RZO4cM1IZAP0cWHJMdRbg-JbnTrHNxrMPK50EXkvbbw,4322
72
+ doctr/models/classification/vit/__init__.py,sha256=JEQlG6MD2UQJ9I5Mtqa8ffKbySiE7A7jigFjJ705URA,172
73
+ doctr/models/classification/vit/pytorch.py,sha256=eDc2erLBNnIpDZ1z4ZpWFFAf_f5lg7QplDNPfOVGal4,6001
74
+ doctr/models/classification/vit/tensorflow.py,sha256=wnYRc09UDmgHLxqv9O9C0PwmCi3sW2EjL8XVp865tS4,6087
75
+ doctr/models/detection/__init__.py,sha256=RqSz5beehLiqhW0PwFLFmCfTyMjofO-0umcQJLDMHjY,105
76
+ doctr/models/detection/core.py,sha256=-hOsFM199Qmicz95Ns3ClUNPmERkumK34aDPWT8ETuQ,3475
77
+ doctr/models/detection/zoo.py,sha256=VNb4kGaivy5hqY9CQR45y7VXwTCnGl_XCtGP9WS_qVo,4194
78
+ doctr/models/detection/_utils/__init__.py,sha256=-uBG9mM73HDxY1fBYVOO63l8zbj6Q1xOHMeoUJ1MknM,192
79
+ doctr/models/detection/_utils/base.py,sha256=fi8XLUVcWG4J-dhxHeTuB6gYL_brt02T0HIP0TVR8jQ,2475
80
+ doctr/models/detection/_utils/pytorch.py,sha256=81AlwGUZGmqCF18Svh_Mwm96-MPXYg-iR6xHXCEE3u0,1021
81
+ doctr/models/detection/_utils/tensorflow.py,sha256=_QUhLVGNWKLM3vpNflMnofpBypRECu25xaGSeKztNN8,937
82
+ doctr/models/detection/differentiable_binarization/__init__.py,sha256=CijDtIRsWiCPpUJmDrJGhkA-RAoEUds7eDCuCBhkJnI,200
83
+ doctr/models/detection/differentiable_binarization/base.py,sha256=BRzaSOyTvuHDioE1KLScxO-4yACFTCSAqeI926kzQp8,16241
84
+ doctr/models/detection/differentiable_binarization/pytorch.py,sha256=OGTPhV9YQDk4FvVN0qmzoap1FJ3Ac1imYtdb1UjXktU,16132
85
+ doctr/models/detection/differentiable_binarization/tensorflow.py,sha256=bke71SCYEKtWABPEq_3kO0O9S3KFYrj_DWPOoPGNbBg,15118
86
+ doctr/models/detection/fast/__init__.py,sha256=CijDtIRsWiCPpUJmDrJGhkA-RAoEUds7eDCuCBhkJnI,200
87
+ doctr/models/detection/fast/base.py,sha256=3D6Q1TsTlcUJZRAWSL01x-G3nTKxCIDLqHd_ai9lZmY,10758
88
+ doctr/models/detection/fast/pytorch.py,sha256=4omkaLQySnu42z067cf-26FyKa-cK1pQNEqZqiKHpjw,16236
89
+ doctr/models/detection/fast/tensorflow.py,sha256=NrUaRBPFjwD2OEEvE8o52zKvej5Ubj-m9NQsv6jkiwU,15707
90
+ doctr/models/detection/linknet/__init__.py,sha256=CijDtIRsWiCPpUJmDrJGhkA-RAoEUds7eDCuCBhkJnI,200
91
+ doctr/models/detection/linknet/base.py,sha256=aX0n0_dacD-J0GaZeKQNO11oGnbtW81T2SBL5kw762Y,10439
92
+ doctr/models/detection/linknet/pytorch.py,sha256=6KPNnedlWZNPM3LiSaIpgQ9vbrzA3sBUDJlP5KHOg7o,13984
93
+ doctr/models/detection/linknet/tensorflow.py,sha256=HwCswDBFmvXIiNf79pv9J3eucS1zOu79PX0BLRjiBRU,13101
94
+ doctr/models/detection/predictor/__init__.py,sha256=CijDtIRsWiCPpUJmDrJGhkA-RAoEUds7eDCuCBhkJnI,200
95
+ doctr/models/detection/predictor/pytorch.py,sha256=6ODigirC6bWd7x06De8Fp3oP5P-B2FjgaatLJP1-95k,2642
96
+ doctr/models/detection/predictor/tensorflow.py,sha256=KUDXbODUCZ5gRq8Gm5ZOXvdt9FD__WBkXT4d0f9flOI,2401
97
+ doctr/models/factory/__init__.py,sha256=cKPoH2V2157lLMTR2zsljG3_IQHziodqR-XK_LG0D_I,19
98
+ doctr/models/factory/hub.py,sha256=A6LNNIQoe4QdyFwdEzKhghJZ--ICGOSA6wUTbxmDzWc,7401
99
+ doctr/models/kie_predictor/__init__.py,sha256=CijDtIRsWiCPpUJmDrJGhkA-RAoEUds7eDCuCBhkJnI,200
100
+ doctr/models/kie_predictor/base.py,sha256=c8mmLJMLlpnVb5GNDP3hm-oefZ42fbk2dkmKMYVf8Gc,2291
101
+ doctr/models/kie_predictor/pytorch.py,sha256=iwfIwsNpQIepgM9_rG5Mx8MeIc1HHl4YJGIkyQ2z6S0,7991
102
+ doctr/models/kie_predictor/tensorflow.py,sha256=ejSB5_02uyscER2R6RZsPVNEuYOJHjW9mWu4vMOb15s,7802
103
+ doctr/models/modules/__init__.py,sha256=pouP7obVTu4p6aHkyaqa1yHKbynpvT0Hgo-LO_1U2R4,83
104
+ doctr/models/modules/layers/__init__.py,sha256=JEQlG6MD2UQJ9I5Mtqa8ffKbySiE7A7jigFjJ705URA,172
105
+ doctr/models/modules/layers/pytorch.py,sha256=e5o6pS2eqpFbgDsVnHyiB9SLPan72iraqufC6r2yKMY,6805
106
+ doctr/models/modules/layers/tensorflow.py,sha256=9qDHq-8d833ISuf4mdeJyOsFrRly16B54a0dSWySD34,7105
107
+ doctr/models/modules/transformer/__init__.py,sha256=JEQlG6MD2UQJ9I5Mtqa8ffKbySiE7A7jigFjJ705URA,172
108
+ doctr/models/modules/transformer/pytorch.py,sha256=yv9W39pjPwuVNXXPV34WACrK5FUyYvcPYHYoKTcdJXg,7718
109
+ doctr/models/modules/transformer/tensorflow.py,sha256=MIFMtvhkWm17vdy-JHPtx3YfOZoiuETw-oYFayC-WkU,9044
110
+ doctr/models/modules/vision_transformer/__init__.py,sha256=JEQlG6MD2UQJ9I5Mtqa8ffKbySiE7A7jigFjJ705URA,172
111
+ doctr/models/modules/vision_transformer/pytorch.py,sha256=Tpl-zdePOXj_uHJvnfsg-IAZ5cb3klE2rvJKugppjaI,3943
112
+ doctr/models/modules/vision_transformer/tensorflow.py,sha256=W9bOfbb510ZXTrDF3nDSKNhHk-Sn8g8LttR1ZVALvwk,4190
113
+ doctr/models/predictor/__init__.py,sha256=CijDtIRsWiCPpUJmDrJGhkA-RAoEUds7eDCuCBhkJnI,200
114
+ doctr/models/predictor/base.py,sha256=YTDOfuoHfFqp_meuK23MiZpyPVE4nROHJ3AH__388Ns,8718
115
+ doctr/models/predictor/pytorch.py,sha256=E5gP5fPB7fLW1yVn7ZYGp2XFjTuXH4ZGSnIalVWVCrQ,6523
116
+ doctr/models/predictor/tensorflow.py,sha256=gx6NmLJrWNWWp5pT3KOzh-zGLE-ONDo8grGHQdSSRUs,6383
117
+ doctr/models/preprocessor/__init__.py,sha256=CijDtIRsWiCPpUJmDrJGhkA-RAoEUds7eDCuCBhkJnI,200
118
+ doctr/models/preprocessor/pytorch.py,sha256=JxTMyoGuhYLuULoT3ZgKg3FYwTcKg0Fz56-PoLNghqk,4955
119
+ doctr/models/preprocessor/tensorflow.py,sha256=-LX-ke4T7Zh93j8c_V8_fI_NPxsMdWRvkieKZXhTMbQ,4605
120
+ doctr/models/recognition/__init__.py,sha256=902nfVyvjOuUGHDKSGZgoS0fKC52J3jcUJQJhIpvOIY,124
121
+ doctr/models/recognition/core.py,sha256=VLfNOFc9lx6YOLCOK3f7lRuIS9lMXDSvs6je3FmfaZ0,1524
122
+ doctr/models/recognition/utils.py,sha256=X6cG-rB_FaGijPnhw5bB6Git5wjQm2TztbirYdy71FU,3486
123
+ doctr/models/recognition/zoo.py,sha256=nqJdHNNibb0ThywDSwXM5nACpy54k6zke_st0JJ-gxA,3063
124
+ doctr/models/recognition/crnn/__init__.py,sha256=CijDtIRsWiCPpUJmDrJGhkA-RAoEUds7eDCuCBhkJnI,200
125
+ doctr/models/recognition/crnn/pytorch.py,sha256=yiexQ6sB6ZkYDHZ3LjI-Of7x48dOr6g2y37L_BmC9X4,11867
126
+ doctr/models/recognition/crnn/tensorflow.py,sha256=wU2SAWNpQkVHvV8Hg_wzDsCZgbjAQXELWtX35KtuUJE,11696
127
+ doctr/models/recognition/master/__init__.py,sha256=JEQlG6MD2UQJ9I5Mtqa8ffKbySiE7A7jigFjJ705URA,172
128
+ doctr/models/recognition/master/base.py,sha256=SFo8AyoSC8sCkelPiVXB6dBVcdAmZVObCzzel0_RXM4,1471
129
+ doctr/models/recognition/master/pytorch.py,sha256=SXjxnVHpB8DtyjT_Se2OaHpowLhIk_PSrRjXLV4Wneo,12467
130
+ doctr/models/recognition/master/tensorflow.py,sha256=QWYij6WEhScyrFWOM94_DUD3CeDrwg92NIrZPnDPTNQ,12207
131
+ doctr/models/recognition/parseq/__init__.py,sha256=JEQlG6MD2UQJ9I5Mtqa8ffKbySiE7A7jigFjJ705URA,172
132
+ doctr/models/recognition/parseq/base.py,sha256=C-J0mm1kWmiVH3X_A1GZizmtqzVYzkFb8Z2bb9e5uww,1465
133
+ doctr/models/recognition/parseq/pytorch.py,sha256=LMxnMyj-GelztL4ZGxC_89NMi3T8PfLGNWaMw6oq2Qc,20165
134
+ doctr/models/recognition/parseq/tensorflow.py,sha256=zvfCRs_Ga0NaE9IJgpPQ8nL-jrNrlpLIkPEJUCth7jY,21701
135
+ doctr/models/recognition/predictor/__init__.py,sha256=CijDtIRsWiCPpUJmDrJGhkA-RAoEUds7eDCuCBhkJnI,200
136
+ doctr/models/recognition/predictor/_utils.py,sha256=sFwFCU9q1cpnDQwm-jilAQYd_MOd2zErhrxemZ-2qm4,3312
137
+ doctr/models/recognition/predictor/pytorch.py,sha256=MD0oHLrvbsCZrIS_t2s76aTVJczjfntnwKaU1-xWqws,2803
138
+ doctr/models/recognition/predictor/tensorflow.py,sha256=-4qjw0JA3JuMfvOhlSjEFh6uQcvqSxwOrGxhpmNk1rQ,2514
139
+ doctr/models/recognition/sar/__init__.py,sha256=CijDtIRsWiCPpUJmDrJGhkA-RAoEUds7eDCuCBhkJnI,200
140
+ doctr/models/recognition/sar/pytorch.py,sha256=y80zMlHILM45lxc70HkdJc70ER4l30EJw3L0OPtb1ao,15319
141
+ doctr/models/recognition/sar/tensorflow.py,sha256=YCiiAO5tlEY2AnUA9IFXJiBYrUns8ZrUfSdmsRnvzno,15145
142
+ doctr/models/recognition/vitstr/__init__.py,sha256=CijDtIRsWiCPpUJmDrJGhkA-RAoEUds7eDCuCBhkJnI,200
143
+ doctr/models/recognition/vitstr/base.py,sha256=5ZjYF0WPgH5sSSaoHeoiw4Os2IZey-souszvkYKgzV8,1419
144
+ doctr/models/recognition/vitstr/pytorch.py,sha256=mwgfQZ-XjI8o6_koZnit7p7nt9iMnBBqrRYCfJByDzM,9760
145
+ doctr/models/recognition/vitstr/tensorflow.py,sha256=BsXiQPHHpIR7mqJkvzfaoTIWv9SbtaDP01q7Fs8OA3M,9788
146
+ doctr/models/utils/__init__.py,sha256=CijDtIRsWiCPpUJmDrJGhkA-RAoEUds7eDCuCBhkJnI,200
147
+ doctr/models/utils/pytorch.py,sha256=ClXBs05UdCdHGbiBKmArmRtha5rs6p36xOV_ZWw5yrU,5488
148
+ doctr/models/utils/tensorflow.py,sha256=QKWDaQVg2uNKkx2Z2A3nht908KunRBqcMZ14S2oxa0c,6357
149
+ doctr/transforms/__init__.py,sha256=0VHbvUZ7llFd1e_7_JdWTaxYMCvaR6KbUJaYJequmQI,23
150
+ doctr/transforms/functional/__init__.py,sha256=JEQlG6MD2UQJ9I5Mtqa8ffKbySiE7A7jigFjJ705URA,172
151
+ doctr/transforms/functional/base.py,sha256=K3taCHB1WGIWJ1FE47VPONZFPr2iAU1wgy6S8fRuODA,6842
152
+ doctr/transforms/functional/pytorch.py,sha256=1oqGHESipDsX0_7VlHymH9-dr75t7ynfndSleCJaXQ4,5110
153
+ doctr/transforms/functional/tensorflow.py,sha256=IEVdcZFDmIZUINNI1S6RT8VI6IsZlOP1rvl7McsXihM,9823
154
+ doctr/transforms/modules/__init__.py,sha256=o2az0yTmrnpI3z5ifzywkRRb79aN1-6sFpR3GVj8QdA,221
155
+ doctr/transforms/modules/base.py,sha256=PZeSQ_RLFu1IevlaJPFSJ_4-wKTl_-79Jvdgcc27e9Q,9874
156
+ doctr/transforms/modules/pytorch.py,sha256=E_L3DhrMKEUlxhB3KTty6fxs0pGtlRXO0EtWeFDDwXI,11862
157
+ doctr/transforms/modules/tensorflow.py,sha256=btfrSJSu6CC7zpbceITU8T2Nsi3Zdpva5MtRx9Dl02I,20252
158
+ doctr/utils/__init__.py,sha256=uQY9ibZ24V896fmihIsK23QOIZdKtk0HyKoCVJ_lLuM,95
159
+ doctr/utils/common_types.py,sha256=ebBlz_61A2gUp_CD2R_8o1pJ5CpCSJimJWzAyGas1uQ,534
160
+ doctr/utils/data.py,sha256=7vY2yr7XvXvlzNMfI2ERqobRJZsXABhLmlQbQQUW1L0,4188
161
+ doctr/utils/fonts.py,sha256=1vEakG5wfOe-XmsXC63Pi_nox6XZaoHcDsJPjafGo-Q,1265
162
+ doctr/utils/geometry.py,sha256=5rTRHKprLuGnKeWdn2b9lzcu6AoVtGw0JINqHRH1vEU,19175
163
+ doctr/utils/metrics.py,sha256=u2d9nz5NY3gHKpmj5DlcK9OjMlB6zwoZrr-vxVntvp4,20249
164
+ doctr/utils/multithreading.py,sha256=FinxQSGRpMs6WL8paixeOGd5CNAkG2VYqC-5d1izYBE,1972
165
+ doctr/utils/reconstitution.py,sha256=mWEGVPhtxnVDKbUDr8E1tad4HaGISGC3DenJvm8I0uM,7256
166
+ doctr/utils/repr.py,sha256=zw4vLHFIqpTGqsEFosGLkM7aLAjy6RLu_yUz5SyI_YA,2087
167
+ doctr/utils/visualization.py,sha256=ywnhKjZ3RsIKHrxGiAu-O8FYn2xPLNNU3CxOe8psz0A,13092
168
+ python_doctr-0.11.0.dist-info/LICENSE,sha256=75RTSsXOsAYhGpxsHc9U41ep6GS7vrUPufeekgoeOXM,11336
169
+ python_doctr-0.11.0.dist-info/METADATA,sha256=gb5X7HqbCN5mbUmJHN8gyz859xLAjRgZwUGEAZ2g4kY,33941
170
+ python_doctr-0.11.0.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
171
+ python_doctr-0.11.0.dist-info/top_level.txt,sha256=lCgp4pmjPI3HYph62XhfzA3jRwM715kGtJPmqIUJ9t8,6
172
+ python_doctr-0.11.0.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
173
+ python_doctr-0.11.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.2.0)
2
+ Generator: setuptools (75.8.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5