python-doctr 0.7.0__py3-none-any.whl → 0.8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (137) hide show
  1. doctr/datasets/__init__.py +2 -0
  2. doctr/datasets/cord.py +6 -4
  3. doctr/datasets/datasets/base.py +3 -2
  4. doctr/datasets/datasets/pytorch.py +4 -2
  5. doctr/datasets/datasets/tensorflow.py +4 -2
  6. doctr/datasets/detection.py +6 -3
  7. doctr/datasets/doc_artefacts.py +2 -1
  8. doctr/datasets/funsd.py +7 -8
  9. doctr/datasets/generator/base.py +3 -2
  10. doctr/datasets/generator/pytorch.py +3 -1
  11. doctr/datasets/generator/tensorflow.py +3 -1
  12. doctr/datasets/ic03.py +3 -2
  13. doctr/datasets/ic13.py +2 -1
  14. doctr/datasets/iiit5k.py +6 -4
  15. doctr/datasets/iiithws.py +2 -1
  16. doctr/datasets/imgur5k.py +3 -2
  17. doctr/datasets/loader.py +4 -2
  18. doctr/datasets/mjsynth.py +2 -1
  19. doctr/datasets/ocr.py +2 -1
  20. doctr/datasets/orientation.py +40 -0
  21. doctr/datasets/recognition.py +3 -2
  22. doctr/datasets/sroie.py +2 -1
  23. doctr/datasets/svhn.py +2 -1
  24. doctr/datasets/svt.py +3 -2
  25. doctr/datasets/synthtext.py +2 -1
  26. doctr/datasets/utils.py +27 -11
  27. doctr/datasets/vocabs.py +26 -1
  28. doctr/datasets/wildreceipt.py +111 -0
  29. doctr/file_utils.py +3 -1
  30. doctr/io/elements.py +52 -35
  31. doctr/io/html.py +5 -3
  32. doctr/io/image/base.py +5 -4
  33. doctr/io/image/pytorch.py +12 -7
  34. doctr/io/image/tensorflow.py +11 -6
  35. doctr/io/pdf.py +5 -4
  36. doctr/io/reader.py +13 -5
  37. doctr/models/_utils.py +30 -53
  38. doctr/models/artefacts/barcode.py +4 -3
  39. doctr/models/artefacts/face.py +4 -2
  40. doctr/models/builder.py +58 -43
  41. doctr/models/classification/__init__.py +1 -0
  42. doctr/models/classification/magc_resnet/pytorch.py +5 -2
  43. doctr/models/classification/magc_resnet/tensorflow.py +5 -2
  44. doctr/models/classification/mobilenet/pytorch.py +16 -4
  45. doctr/models/classification/mobilenet/tensorflow.py +29 -20
  46. doctr/models/classification/predictor/pytorch.py +3 -2
  47. doctr/models/classification/predictor/tensorflow.py +2 -1
  48. doctr/models/classification/resnet/pytorch.py +23 -13
  49. doctr/models/classification/resnet/tensorflow.py +33 -26
  50. doctr/models/classification/textnet/__init__.py +6 -0
  51. doctr/models/classification/textnet/pytorch.py +275 -0
  52. doctr/models/classification/textnet/tensorflow.py +267 -0
  53. doctr/models/classification/vgg/pytorch.py +4 -2
  54. doctr/models/classification/vgg/tensorflow.py +5 -2
  55. doctr/models/classification/vit/pytorch.py +9 -3
  56. doctr/models/classification/vit/tensorflow.py +9 -3
  57. doctr/models/classification/zoo.py +7 -2
  58. doctr/models/core.py +1 -1
  59. doctr/models/detection/__init__.py +1 -0
  60. doctr/models/detection/_utils/pytorch.py +7 -1
  61. doctr/models/detection/_utils/tensorflow.py +7 -3
  62. doctr/models/detection/core.py +9 -3
  63. doctr/models/detection/differentiable_binarization/base.py +37 -25
  64. doctr/models/detection/differentiable_binarization/pytorch.py +80 -104
  65. doctr/models/detection/differentiable_binarization/tensorflow.py +74 -55
  66. doctr/models/detection/fast/__init__.py +6 -0
  67. doctr/models/detection/fast/base.py +256 -0
  68. doctr/models/detection/fast/pytorch.py +442 -0
  69. doctr/models/detection/fast/tensorflow.py +428 -0
  70. doctr/models/detection/linknet/base.py +12 -5
  71. doctr/models/detection/linknet/pytorch.py +28 -15
  72. doctr/models/detection/linknet/tensorflow.py +68 -88
  73. doctr/models/detection/predictor/pytorch.py +16 -6
  74. doctr/models/detection/predictor/tensorflow.py +13 -5
  75. doctr/models/detection/zoo.py +19 -16
  76. doctr/models/factory/hub.py +20 -10
  77. doctr/models/kie_predictor/base.py +2 -1
  78. doctr/models/kie_predictor/pytorch.py +28 -36
  79. doctr/models/kie_predictor/tensorflow.py +27 -27
  80. doctr/models/modules/__init__.py +1 -0
  81. doctr/models/modules/layers/__init__.py +6 -0
  82. doctr/models/modules/layers/pytorch.py +166 -0
  83. doctr/models/modules/layers/tensorflow.py +175 -0
  84. doctr/models/modules/transformer/pytorch.py +24 -22
  85. doctr/models/modules/transformer/tensorflow.py +6 -4
  86. doctr/models/modules/vision_transformer/pytorch.py +2 -4
  87. doctr/models/modules/vision_transformer/tensorflow.py +2 -4
  88. doctr/models/obj_detection/faster_rcnn/pytorch.py +4 -2
  89. doctr/models/predictor/base.py +14 -3
  90. doctr/models/predictor/pytorch.py +26 -29
  91. doctr/models/predictor/tensorflow.py +25 -22
  92. doctr/models/preprocessor/pytorch.py +14 -9
  93. doctr/models/preprocessor/tensorflow.py +10 -5
  94. doctr/models/recognition/core.py +4 -1
  95. doctr/models/recognition/crnn/pytorch.py +23 -16
  96. doctr/models/recognition/crnn/tensorflow.py +25 -17
  97. doctr/models/recognition/master/base.py +4 -1
  98. doctr/models/recognition/master/pytorch.py +20 -9
  99. doctr/models/recognition/master/tensorflow.py +20 -8
  100. doctr/models/recognition/parseq/base.py +4 -1
  101. doctr/models/recognition/parseq/pytorch.py +28 -22
  102. doctr/models/recognition/parseq/tensorflow.py +22 -11
  103. doctr/models/recognition/predictor/_utils.py +3 -2
  104. doctr/models/recognition/predictor/pytorch.py +3 -2
  105. doctr/models/recognition/predictor/tensorflow.py +2 -1
  106. doctr/models/recognition/sar/pytorch.py +14 -7
  107. doctr/models/recognition/sar/tensorflow.py +23 -14
  108. doctr/models/recognition/utils.py +5 -1
  109. doctr/models/recognition/vitstr/base.py +4 -1
  110. doctr/models/recognition/vitstr/pytorch.py +22 -13
  111. doctr/models/recognition/vitstr/tensorflow.py +21 -10
  112. doctr/models/recognition/zoo.py +4 -2
  113. doctr/models/utils/pytorch.py +24 -6
  114. doctr/models/utils/tensorflow.py +22 -3
  115. doctr/models/zoo.py +21 -3
  116. doctr/transforms/functional/base.py +8 -3
  117. doctr/transforms/functional/pytorch.py +23 -6
  118. doctr/transforms/functional/tensorflow.py +25 -5
  119. doctr/transforms/modules/base.py +12 -5
  120. doctr/transforms/modules/pytorch.py +10 -12
  121. doctr/transforms/modules/tensorflow.py +17 -9
  122. doctr/utils/common_types.py +1 -1
  123. doctr/utils/data.py +4 -2
  124. doctr/utils/fonts.py +3 -2
  125. doctr/utils/geometry.py +95 -26
  126. doctr/utils/metrics.py +36 -22
  127. doctr/utils/multithreading.py +5 -3
  128. doctr/utils/repr.py +3 -1
  129. doctr/utils/visualization.py +31 -8
  130. doctr/version.py +1 -1
  131. {python_doctr-0.7.0.dist-info → python_doctr-0.8.1.dist-info}/METADATA +67 -31
  132. python_doctr-0.8.1.dist-info/RECORD +173 -0
  133. {python_doctr-0.7.0.dist-info → python_doctr-0.8.1.dist-info}/WHEEL +1 -1
  134. python_doctr-0.7.0.dist-info/RECORD +0 -161
  135. {python_doctr-0.7.0.dist-info → python_doctr-0.8.1.dist-info}/LICENSE +0 -0
  136. {python_doctr-0.7.0.dist-info → python_doctr-0.8.1.dist-info}/top_level.txt +0 -0
  137. {python_doctr-0.7.0.dist-info → python_doctr-0.8.1.dist-info}/zip-safe +0 -0
@@ -1,4 +1,4 @@
1
- # Copyright (C) 2021-2023, Mindee.
1
+ # Copyright (C) 2021-2024, Mindee.
2
2
 
3
3
  # This program is licensed under the Apache License 2.0.
4
4
  # See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
@@ -34,6 +34,7 @@ def rect_patch(
34
34
  """Create a matplotlib rectangular patch for the element
35
35
 
36
36
  Args:
37
+ ----
37
38
  geometry: bounding box of the element
38
39
  page_dimensions: dimensions of the Page in format (height, width)
39
40
  label: label to display when hovered
@@ -44,9 +45,9 @@ def rect_patch(
44
45
  preserve_aspect_ratio: pass True if you passed True to the predictor
45
46
 
46
47
  Returns:
48
+ -------
47
49
  a rectangular Patch
48
50
  """
49
-
50
51
  if len(geometry) != 2 or any(not isinstance(elt, tuple) or len(elt) != 2 for elt in geometry):
51
52
  raise ValueError("invalid geometry format")
52
53
 
@@ -84,6 +85,7 @@ def polygon_patch(
84
85
  """Create a matplotlib polygon patch for the element
85
86
 
86
87
  Args:
88
+ ----
87
89
  geometry: bounding box of the element
88
90
  page_dimensions: dimensions of the Page in format (height, width)
89
91
  label: label to display when hovered
@@ -94,9 +96,9 @@ def polygon_patch(
94
96
  preserve_aspect_ratio: pass True if you passed True to the predictor
95
97
 
96
98
  Returns:
99
+ -------
97
100
  a polygon Patch
98
101
  """
99
-
100
102
  if not geometry.shape == (4, 2):
101
103
  raise ValueError("invalid geometry format")
102
104
 
@@ -123,15 +125,18 @@ def create_obj_patch(
123
125
  """Create a matplotlib patch for the element
124
126
 
125
127
  Args:
128
+ ----
126
129
  geometry: bounding box (straight or rotated) of the element
127
130
  page_dimensions: dimensions of the page in format (height, width)
131
+ **kwargs: keyword arguments for the patch
128
132
 
129
133
  Returns:
134
+ -------
130
135
  a matplotlib Patch
131
136
  """
132
137
  if isinstance(geometry, tuple):
133
138
  if len(geometry) == 2: # straight word BB (2 pts)
134
- return rect_patch(geometry, page_dimensions, **kwargs) # type: ignore[arg-type]
139
+ return rect_patch(geometry, page_dimensions, **kwargs)
135
140
  elif len(geometry) == 4: # rotated word BB (4 pts)
136
141
  return polygon_patch(np.asarray(geometry), page_dimensions, **kwargs)
137
142
  elif isinstance(geometry, np.ndarray) and geometry.shape == (4, 2): # rotated line
@@ -143,9 +148,11 @@ def get_colors(num_colors: int) -> List[Tuple[float, float, float]]:
143
148
  """Generate num_colors color for matplotlib
144
149
 
145
150
  Args:
151
+ ----
146
152
  num_colors: number of colors to generate
147
153
 
148
154
  Returns:
155
+ -------
149
156
  colors: list of generated colors
150
157
  """
151
158
  colors = []
@@ -180,6 +187,7 @@ def visualize_page(
180
187
  >>> plt.show()
181
188
 
182
189
  Args:
190
+ ----
183
191
  page: the exported Page of a Document
184
192
  image: np array of the page, needs to have the same shape than page['dimensions']
185
193
  words_only: whether only words should be displayed
@@ -187,6 +195,11 @@ def visualize_page(
187
195
  scale: figsize of the largest windows side
188
196
  interactive: whether the plot should be interactive
189
197
  add_labels: for static plot, adds text labels on top of bounding box
198
+ **kwargs: keyword arguments for the polygon patch
199
+
200
+ Returns:
201
+ -------
202
+ the matplotlib figure
190
203
  """
191
204
  # Get proper scale and aspect ratio
192
205
  h, w = image.shape[:2]
@@ -283,15 +296,16 @@ def synthesize_page(
283
296
  """Draw a the content of the element page (OCR response) on a blank page.
284
297
 
285
298
  Args:
299
+ ----
286
300
  page: exported Page object to represent
287
301
  draw_proba: if True, draw words in colors to represent confidence. Blue: p=1, red: p=0
288
302
  font_size: size of the font, default font = 13
289
303
  font_family: family of the font
290
304
 
291
- Return:
305
+ Returns:
306
+ -------
292
307
  the synthesized page
293
308
  """
294
-
295
309
  # Draw template
296
310
  h, w = page["dimensions"]
297
311
  response = 255 * np.ones((h, w, 3), dtype=np.int32)
@@ -354,6 +368,7 @@ def visualize_kie_page(
354
368
  >>> plt.show()
355
369
 
356
370
  Args:
371
+ ----
357
372
  page: the exported Page of a Document
358
373
  image: np array of the page, needs to have the same shape than page['dimensions']
359
374
  words_only: whether only words should be displayed
@@ -361,6 +376,11 @@ def visualize_kie_page(
361
376
  scale: figsize of the largest windows side
362
377
  interactive: whether the plot should be interactive
363
378
  add_labels: for static plot, adds text labels on top of bounding box
379
+ **kwargs: keyword arguments for the polygon patch
380
+
381
+ Returns:
382
+ -------
383
+ the matplotlib figure
364
384
  """
365
385
  # Get proper scale and aspect ratio
366
386
  h, w = image.shape[:2]
@@ -408,15 +428,16 @@ def synthesize_kie_page(
408
428
  """Draw a the content of the element page (OCR response) on a blank page.
409
429
 
410
430
  Args:
431
+ ----
411
432
  page: exported Page object to represent
412
433
  draw_proba: if True, draw words in colors to represent confidence. Blue: p=1, red: p=0
413
434
  font_size: size of the font, default font = 13
414
435
  font_family: family of the font
415
436
 
416
- Return:
437
+ Returns:
438
+ -------
417
439
  the synthesized page
418
440
  """
419
-
420
441
  # Draw template
421
442
  h, w = page["dimensions"]
422
443
  response = 255 * np.ones((h, w, 3), dtype=np.int32)
@@ -459,9 +480,11 @@ def draw_boxes(boxes: np.ndarray, image: np.ndarray, color: Optional[Tuple[int,
459
480
  """Draw an array of relative straight boxes on an image
460
481
 
461
482
  Args:
483
+ ----
462
484
  boxes: array of relative boxes, of shape (*, 4)
463
485
  image: np array, float32 or uint8
464
486
  color: color to use for bounding box edges
487
+ **kwargs: keyword arguments from `matplotlib.pyplot.plot`
465
488
  """
466
489
  h, w = image.shape[:2]
467
490
  # Convert boxes to absolute coords
doctr/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = 'v0.7.0'
1
+ __version__ = 'v0.8.1'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: python-doctr
3
- Version: 0.7.0
3
+ Version: 0.8.1
4
4
  Summary: Document Text Recognition (docTR): deep Learning for high-performance OCR on documents.
5
5
  Author-email: Mindee <contact@mindee.com>
6
6
  Maintainer: François-Guillaume Fernandez, Charles Gaillard, Olivier Dulcy, Felix Dittrich
@@ -209,7 +209,7 @@ License: Apache License
209
209
  Project-URL: documentation, https://mindee.github.io/doctr
210
210
  Project-URL: repository, https://github.com/mindee/doctr
211
211
  Project-URL: tracker, https://github.com/mindee/doctr/issues
212
- Project-URL: changelog, https://github.com/mindee/doctr/latest/changelog.html
212
+ Project-URL: changelog, https://mindee.github.io/doctr/changelog.html
213
213
  Keywords: OCR,deep learning,computer vision,tensorflow,pytorch,text detection,text recognition
214
214
  Classifier: Development Status :: 4 - Beta
215
215
  Classifier: Intended Audience :: Developers
@@ -236,17 +236,17 @@ Requires-Dist: pyclipper <2.0.0,>=1.2.0
236
236
  Requires-Dist: shapely <3.0.0,>=1.6.0
237
237
  Requires-Dist: langdetect <2.0.0,>=1.0.9
238
238
  Requires-Dist: rapidfuzz <4.0.0,>=3.0.0
239
+ Requires-Dist: huggingface-hub <1.0.0,>=0.20.0
239
240
  Requires-Dist: matplotlib >=3.1.0
240
241
  Requires-Dist: weasyprint >=55.0
241
- Requires-Dist: Pillow >=10.0.0
242
+ Requires-Dist: Pillow >=9.2.0
242
243
  Requires-Dist: defusedxml >=0.7.0
243
244
  Requires-Dist: mplcursors >=0.3
244
245
  Requires-Dist: unidecode >=1.0.0
245
246
  Requires-Dist: tqdm >=4.30.0
246
- Requires-Dist: huggingface-hub >=0.5.0
247
247
  Provides-Extra: dev
248
- Requires-Dist: tensorflow <3.0.0,>=2.11.0 ; extra == 'dev'
249
- Requires-Dist: tf2onnx <2.0.0,>=1.15.1 ; extra == 'dev'
248
+ Requires-Dist: tensorflow <2.16.0,>=2.11.0 ; extra == 'dev'
249
+ Requires-Dist: tf2onnx <2.0.0,>=1.16.0 ; extra == 'dev'
250
250
  Requires-Dist: torch <3.0.0,>=1.12.0 ; extra == 'dev'
251
251
  Requires-Dist: torchvision >=0.13.0 ; extra == 'dev'
252
252
  Requires-Dist: onnx <3.0.0,>=1.12.0 ; extra == 'dev'
@@ -256,11 +256,8 @@ Requires-Dist: hdf5storage >=0.1.18 ; extra == 'dev'
256
256
  Requires-Dist: onnxruntime >=1.11.0 ; extra == 'dev'
257
257
  Requires-Dist: requests >=2.20.0 ; extra == 'dev'
258
258
  Requires-Dist: psutil >=5.9.5 ; extra == 'dev'
259
- Requires-Dist: ruff >=0.0.260 ; extra == 'dev'
260
- Requires-Dist: isort >=5.7.0 ; extra == 'dev'
261
- Requires-Dist: black >=22.1 ; extra == 'dev'
259
+ Requires-Dist: ruff >=0.1.5 ; extra == 'dev'
262
260
  Requires-Dist: mypy >=0.812 ; extra == 'dev'
263
- Requires-Dist: pydocstyle[toml] >=6.1.1 ; extra == 'dev'
264
261
  Requires-Dist: pre-commit >=2.17.0 ; extra == 'dev'
265
262
  Requires-Dist: sphinx !=3.5.0,>=3.0.0 ; extra == 'dev'
266
263
  Requires-Dist: sphinxemoji >=0.1.8 ; extra == 'dev'
@@ -280,11 +277,8 @@ Requires-Dist: sphinx-markdown-tables >=0.0.15 ; extra == 'docs'
280
277
  Requires-Dist: sphinx-tabs >=3.3.0 ; extra == 'docs'
281
278
  Requires-Dist: furo >=2022.3.4 ; extra == 'docs'
282
279
  Provides-Extra: quality
283
- Requires-Dist: ruff >=0.0.260 ; extra == 'quality'
284
- Requires-Dist: isort >=5.7.0 ; extra == 'quality'
285
- Requires-Dist: black >=22.1 ; extra == 'quality'
280
+ Requires-Dist: ruff >=0.1.5 ; extra == 'quality'
286
281
  Requires-Dist: mypy >=0.812 ; extra == 'quality'
287
- Requires-Dist: pydocstyle[toml] >=6.1.1 ; extra == 'quality'
288
282
  Requires-Dist: pre-commit >=2.17.0 ; extra == 'quality'
289
283
  Provides-Extra: testing
290
284
  Requires-Dist: pytest >=5.3.2 ; extra == 'testing'
@@ -294,18 +288,19 @@ Requires-Dist: onnxruntime >=1.11.0 ; extra == 'testing'
294
288
  Requires-Dist: requests >=2.20.0 ; extra == 'testing'
295
289
  Requires-Dist: psutil >=5.9.5 ; extra == 'testing'
296
290
  Provides-Extra: tf
297
- Requires-Dist: tensorflow <3.0.0,>=2.11.0 ; extra == 'tf'
298
- Requires-Dist: tf2onnx <2.0.0,>=1.15.1 ; extra == 'tf'
291
+ Requires-Dist: tensorflow <2.16.0,>=2.11.0 ; extra == 'tf'
292
+ Requires-Dist: tf2onnx <2.0.0,>=1.16.0 ; extra == 'tf'
299
293
  Provides-Extra: torch
300
294
  Requires-Dist: torch <3.0.0,>=1.12.0 ; extra == 'torch'
301
295
  Requires-Dist: torchvision >=0.13.0 ; extra == 'torch'
302
296
  Requires-Dist: onnx <3.0.0,>=1.12.0 ; extra == 'torch'
303
297
 
304
298
  <p align="center">
305
- <img src="https://github.com/mindee/doctr/releases/download/v0.3.1/Logo_doctr.gif?raw=True" width="40%">
299
+ <img src="https://github.com/mindee/doctr/raw/main/docs/images/Logo_doctr.gif" width="40%">
306
300
  </p>
307
301
 
308
- [![Slack Icon](https://img.shields.io/badge/Slack-Community-4A154B?style=flat-square&logo=slack&logoColor=white)](https://slack.mindee.com) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) ![Build Status](https://github.com/mindee/doctr/workflows/builds/badge.svg) [![codecov](https://codecov.io/gh/mindee/doctr/branch/main/graph/badge.svg?token=577MO567NM)](https://codecov.io/gh/mindee/doctr) [![CodeFactor](https://www.codefactor.io/repository/github/mindee/doctr/badge?s=bae07db86bb079ce9d6542315b8c6e70fa708a7e)](https://www.codefactor.io/repository/github/mindee/doctr) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/340a76749b634586a498e1c0ab998f08)](https://app.codacy.com/gh/mindee/doctr?utm_source=github.com&utm_medium=referral&utm_content=mindee/doctr&utm_campaign=Badge_Grade) [![Doc Status](https://github.com/mindee/doctr/workflows/doc-status/badge.svg)](https://mindee.github.io/doctr) [![Pypi](https://img.shields.io/badge/pypi-v0.6.0-blue.svg)](https://pypi.org/project/python-doctr/) [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/mindee/doctr) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mindee/notebooks/blob/main/doctr/quicktour.ipynb)
302
+ [![Slack Icon](https://img.shields.io/badge/Slack-Community-4A154B?style=flat-square&logo=slack&logoColor=white)](https://slack.mindee.com) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) ![Build Status](https://github.com/mindee/doctr/workflows/builds/badge.svg) [![Docker Images](https://img.shields.io/badge/Docker-4287f5?style=flat&logo=docker&logoColor=white)](https://github.com/mindee/doctr/pkgs/container/doctr) [![codecov](https://codecov.io/gh/mindee/doctr/branch/main/graph/badge.svg?token=577MO567NM)](https://codecov.io/gh/mindee/doctr) [![CodeFactor](https://www.codefactor.io/repository/github/mindee/doctr/badge?s=bae07db86bb079ce9d6542315b8c6e70fa708a7e)](https://www.codefactor.io/repository/github/mindee/doctr) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/340a76749b634586a498e1c0ab998f08)](https://app.codacy.com/gh/mindee/doctr?utm_source=github.com&utm_medium=referral&utm_content=mindee/doctr&utm_campaign=Badge_Grade) [![Doc Status](https://github.com/mindee/doctr/workflows/doc-status/badge.svg)](https://mindee.github.io/doctr) [![Pypi](https://img.shields.io/badge/pypi-v0.8.0-blue.svg)](https://pypi.org/project/python-doctr/) [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/mindee/doctr) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mindee/notebooks/blob/main/doctr/quicktour.ipynb)
303
+
309
304
 
310
305
  **Optical Character Recognition made seamless & accessible to anyone, powered by TensorFlow 2 & PyTorch**
311
306
 
@@ -314,7 +309,7 @@ What you can expect from this repository:
314
309
  - efficient ways to parse textual information (localize and identify each word) from your documents
315
310
  - guidance on how to integrate this in your current architecture
316
311
 
317
- ![OCR_example](https://github.com/mindee/doctr/releases/download/v0.2.0/ocr.png?raw=True)
312
+ ![OCR_example](https://github.com/mindee/doctr/raw/main/docs/images/ocr.png)
318
313
 
319
314
  ## Quick Tour
320
315
 
@@ -377,10 +372,10 @@ If both options are set to False, the predictor will always fit and return rotat
377
372
  To interpret your model's predictions, you can visualize them interactively as follows:
378
373
 
379
374
  ```python
380
- result.show(doc)
375
+ result.show()
381
376
  ```
382
377
 
383
- ![Visualization sample](https://github.com/mindee/doctr/releases/download/v0.1.1/doctr_example_script.gif?raw=True)
378
+ ![Visualization sample](https://github.com/mindee/doctr/raw/main/docs/images/doctr_example_script.gif)
384
379
 
385
380
  Or even rebuild the original document from its predictions:
386
381
 
@@ -391,7 +386,7 @@ synthetic_pages = result.synthesize()
391
386
  plt.imshow(synthetic_pages[0]); plt.axis('off'); plt.show()
392
387
  ```
393
388
 
394
- ![Synthesis sample](https://github.com/mindee/doctr/releases/download/v0.3.1/synthesized_sample.png?raw=True)
389
+ ![Synthesis sample](https://github.com/mindee/doctr/raw/main/docs/images/synthesized_sample.png)
395
390
 
396
391
  The `ocr_predictor` returns a `Document` object with a nested structure (with `Page`, `Block`, `Line`, `Word`, `Artefact`).
397
392
  To get a better understanding of our document model, check our [documentation](https://mindee.github.io/doctr/modules/io.html#document-structure):
@@ -404,7 +399,7 @@ json_output = result.export()
404
399
 
405
400
  ### Use the KIE predictor
406
401
 
407
- The KIE predictor is a more flexible predictor compared to OCR as your detection model can detect multiple classes in a document. For example, you can have a detection model to detect just dates and adresses in a document.
402
+ The KIE predictor is a more flexible predictor compared to OCR as your detection model can detect multiple classes in a document. For example, you can have a detection model to detect just dates and addresses in a document.
408
403
 
409
404
  The KIE predictor makes it possible to use detector with multiple classes with a recognition model and to have the whole pipeline already setup for you.
410
405
 
@@ -430,7 +425,7 @@ The KIE predictor results per page are in a dictionary format with each key repr
430
425
 
431
426
  ### If you are looking for support from the Mindee team
432
427
 
433
- [![Bad OCR test detection image asking the developer if they need help](https://github.com/mindee/doctr/releases/download/v0.5.1/doctr-need-help.png?raw=True)](https://mindee.com/product/doctr)
428
+ [![Bad OCR test detection image asking the developer if they need help](https://github.com/mindee/doctr/raw/main/docs/images/doctr-need-help.png)](https://mindee.com/product/doctr)
434
429
 
435
430
  ## Installation
436
431
 
@@ -438,7 +433,7 @@ The KIE predictor results per page are in a dictionary format with each key repr
438
433
 
439
434
  Python 3.8 (or higher) and [pip](https://pip.pypa.io/en/stable/) are required to install docTR.
440
435
 
441
- Since we use [weasyprint](https://weasyprint.readthedocs.io/), you will need extra dependencies if you are not running Linux.
436
+ Since we use [weasyprint](https://weasyprint.org/), you will need extra dependencies if you are not running Linux.
442
437
 
443
438
  For MacOS users, you can install them as follows:
444
439
 
@@ -499,6 +494,7 @@ Credits where it's due: this repository is implementing, among others, architect
499
494
 
500
495
  - DBNet: [Real-time Scene Text Detection with Differentiable Binarization](https://arxiv.org/pdf/1911.08947.pdf).
501
496
  - LinkNet: [LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation](https://arxiv.org/pdf/1707.03718.pdf)
497
+ - FAST: [FAST: Faster Arbitrarily-Shaped Text Detector with Minimalist Kernel Representation](https://arxiv.org/pdf/2111.02394.pdf)
502
498
 
503
499
  ### Text Recognition
504
500
 
@@ -518,7 +514,7 @@ The full package documentation is available [here](https://mindee.github.io/doct
518
514
 
519
515
  A minimal demo app is provided for you to play with our end-to-end OCR models!
520
516
 
521
- ![Demo app](https://github.com/mindee/doctr/releases/download/v0.3.0/demo_update.png?raw=True)
517
+ ![Demo app](https://github.com/mindee/doctr/raw/main/docs/images/demo_update.png)
522
518
 
523
519
  #### Live demo
524
520
 
@@ -558,14 +554,54 @@ USE_TORCH=1 streamlit run demo/app.py
558
554
  Instead of having your demo actually running Python, you would prefer to run everything in your web browser?
559
555
  Check out our [TensorFlow.js demo](https://github.com/mindee/doctr-tfjs-demo) to get started!
560
556
 
561
- ![TFJS demo](https://github.com/mindee/doctr-tfjs-demo/releases/download/v0.1-models/demo_illustration_mini.png?raw=True)
557
+ ![TFJS demo](https://github.com/mindee/doctr/raw/main/docs/images/demo_illustration_mini.png)
562
558
 
563
559
  ### Docker container
564
560
 
565
- If you wish to deploy containerized environments, you can use the provided Dockerfile to build a docker image:
561
+ [We offer Docker container support for easy testing and deployment](https://github.com/mindee/doctr/pkgs/container/doctr).
562
+
563
+ #### Using GPU with docTR Docker Images
564
+
565
+ The docTR Docker images are GPU-ready and based on CUDA `11.8`.
566
+ However, to use GPU support with these Docker images, please ensure that Docker is configured to use your GPU.
567
+
568
+ To verify and configure GPU support for Docker, please follow the instructions provided in the [NVIDIA Container Toolkit Installation Guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html).
569
+
570
+ Once Docker is configured to use GPUs, you can run docTR Docker containers with GPU support:
571
+
572
+ ```shell
573
+ docker run -it --gpus all ghcr.io/mindee/doctr:tf-py3.8.18-gpu-2023-09 bash
574
+ ```
575
+
576
+ #### Available Tags
577
+
578
+ The Docker images for docTR follow a specific tag nomenclature: `<framework>-py<python_version>-<system>-<doctr_version|YYYY-MM>`. Here's a breakdown of the tag structure:
579
+
580
+ - `<framework>`: `tf` (TensorFlow) or `torch` (PyTorch).
581
+ - `<python_version>`: `3.8.18`, `3.9.18`, or `3.10.13`.
582
+ - `<system>`: `cpu` or `gpu`
583
+ - `<doctr_version>`: a tag >= `v0.7.1`
584
+ - `<YYYY-MM>`: e.g. `2023-09`
585
+
586
+ Here are examples of different image tags:
587
+
588
+ | Tag | Description |
589
+ |----------------------------|---------------------------------------------------|
590
+ | `tf-py3.8.18-cpu-v0.7.1` | TensorFlow version `3.8.18` with docTR `v0.7.1`. |
591
+ | `torch-py3.9.18-gpu-2023-09`| PyTorch version `3.9.18` with GPU support and a monthly build from `2023-09`. |
592
+
593
+ #### Building Docker Images Locally
594
+
595
+ You can also build docTR Docker images locally on your computer.
596
+
597
+ ```shell
598
+ docker build -t doctr .
599
+ ```
600
+
601
+ You can specify custom Python versions and docTR versions using build arguments. For example, to build a docTR image with TensorFlow, Python version `3.9.10`, and docTR version `v0.7.0`, run the following command:
566
602
 
567
603
  ```shell
568
- docker build . -t <YOUR_IMAGE_TAG>
604
+ docker build -t doctr --build-arg FRAMEWORK=tf --build-arg PYTHON_VERSION=3.9.10 --build-arg DOCTR_VERSION=v0.7.0 .
569
605
  ```
570
606
 
571
607
  ### Example script
@@ -638,8 +674,8 @@ If you wish to cite this project, feel free to use this [BibTeX](http://www.bibt
638
674
 
639
675
  If you scrolled down to this section, you most likely appreciate open source. Do you feel like extending the range of our supported characters? Or perhaps submitting a paper implementation? Or contributing in any other way?
640
676
 
641
- You're in luck, we compiled a short guide (cf. [`CONTRIBUTING`](CONTRIBUTING.md)) for you to easily do so!
677
+ You're in luck, we compiled a short guide (cf. [`CONTRIBUTING`](https://mindee.github.io/doctr/contributing/contributing.html)) for you to easily do so!
642
678
 
643
679
  ## License
644
680
 
645
- Distributed under the Apache 2.0 License. See [`LICENSE`](LICENSE) for more information.
681
+ Distributed under the Apache 2.0 License. See [`LICENSE`](https://github.com/mindee/doctr?tab=Apache-2.0-1-ov-file#readme) for more information.
@@ -0,0 +1,173 @@
1
+ doctr/__init__.py,sha256=m6lezpfDosfTVFssFVrN7aH0tDzM4h9OgCCi0Nevq8g,161
2
+ doctr/file_utils.py,sha256=P6Ld5_rFSMwv1m91yhARdJgF7KIXWzgUJXUXaUiUNgc,3156
3
+ doctr/version.py,sha256=M-PQbswhc8YrXcs4GoOEbQ4Ze9_y4MlJlfbrXibvsTs,23
4
+ doctr/datasets/__init__.py,sha256=umI2ABbgWIKuhswl8RGaF6CefFiI8DdEGVb0Kbd8aZA,574
5
+ doctr/datasets/cord.py,sha256=p9ObLgqV3uB7TYoS5Puag0q-JtFTPrXUztkxL36U69U,4746
6
+ doctr/datasets/detection.py,sha256=H6inFO6rjdvU_Asm9UTod5r5bjjpmJJWGityv0RTJ8M,3607
7
+ doctr/datasets/doc_artefacts.py,sha256=KKOlwE1oUG-sbC43an-TTh2m9PopuryXtUWM471TgO4,3258
8
+ doctr/datasets/funsd.py,sha256=RtWztUkgPmzjmNbZi55OU8mKzK8fvLSJXpHb3K9ccNg,4174
9
+ doctr/datasets/ic03.py,sha256=2HEHvW9tLYFiSEaPeNM4vrqL3ICjth6LUUXPcHjrHjQ,5066
10
+ doctr/datasets/ic13.py,sha256=5qjGMmotEOo_8N2gp0XUdZPW5t2gvVe-cTestlfD6Mc,4010
11
+ doctr/datasets/iiit5k.py,sha256=7y4pv4WG-FdXCn7aXLsUodXnk63gRBR8325HfqqlQ3k,3936
12
+ doctr/datasets/iiithws.py,sha256=MFWgIW5bNJSvxWU-USZvbYVHNlkBsnzzMaSGrbut-zQ,2778
13
+ doctr/datasets/imgur5k.py,sha256=UrDisvRDFJpuD2utLwUDgqVQEZCdesbVIR6upoG1tu4,6705
14
+ doctr/datasets/loader.py,sha256=px4IeA8ttqf61b6sRcUtQiXS_UBDhPmeiv6DZ7zuZTk,3044
15
+ doctr/datasets/mjsynth.py,sha256=Sybpaxiib8jDOc33OQgl2gGQ4XX8kKsnZaNokKmt08o,4063
16
+ doctr/datasets/ocr.py,sha256=wSAU62NUdFgt52vxo65bXPsuKeVWArlAkD5kxWKypiM,2550
17
+ doctr/datasets/orientation.py,sha256=PZfSQGfBSqzwRlg84L7BA7Lb2jseBvxkKqzh36TtFXk,1113
18
+ doctr/datasets/recognition.py,sha256=bXNkHqJUgPx10lhPfMBK7B0wmpLd20-MFbuGJXAGW2w,1881
19
+ doctr/datasets/sroie.py,sha256=bAkPLmw9aVSu_MyEix_FKFW0pbYye1w16vIkumnQ4E8,3939
20
+ doctr/datasets/svhn.py,sha256=Q4M84eRGWLWQ5Bsw0zvouhHTUQl46B9-pS06ZYKT5j8,5251
21
+ doctr/datasets/svt.py,sha256=eos2IUqeM8AW98zJ4PjHQ-hM0hUiJ-cumFhctQrpZp4,4551
22
+ doctr/datasets/synthtext.py,sha256=Q0WKA_UJtgjdBaHHQ888n6ltT-NBuf5kTYQv5SB40IQ,5387
23
+ doctr/datasets/utils.py,sha256=_5gV_Ti3OfkOjIRsS3hud-V7RcNNVKfgx2AndyEVu6g,7551
24
+ doctr/datasets/vocabs.py,sha256=uJ-y5qm76o5Wd-JZ023zmFRXTpGzb26Sn-gJt2FVOb0,3121
25
+ doctr/datasets/wildreceipt.py,sha256=HvnAaxo9lLwC8UMUYYKKJo6HkG8xm2yIHopBsN5G1LA,4566
26
+ doctr/datasets/datasets/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
27
+ doctr/datasets/datasets/base.py,sha256=TUK8GMosZnkTBsJm8zOc7AIy3FUMIV2vOTu3YbTjnSQ,4874
28
+ doctr/datasets/datasets/pytorch.py,sha256=ZMSJcYS3v_Mdzqd4OxW2AIZEf4K2T3nuEp7MbQuy2bo,1981
29
+ doctr/datasets/datasets/tensorflow.py,sha256=Ivx_T6o2ttHXjyUy5wi0LpsmKZYOVb7xL1fHKvRlE80,1975
30
+ doctr/datasets/generator/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
31
+ doctr/datasets/generator/base.py,sha256=TLSPPTUKIOdkXp5SooYDZbX0hyOZMBMY11Di5jTOLnc,5659
32
+ doctr/datasets/generator/pytorch.py,sha256=HUmdHUm7rU84gXv18BeXdYTDBCHabtw21Xdpm-p02ik,2134
33
+ doctr/datasets/generator/tensorflow.py,sha256=Yj9vgEjdNnOwrM4Ew2w5TfkEwNXgy6ACZuEnExZcUMs,2229
34
+ doctr/io/__init__.py,sha256=kS7tKGFvzxOCWBOun-Y8n9CsziwRKNynjwpZEUUI03M,106
35
+ doctr/io/elements.py,sha256=IGKU2TGQFeJYsv6mHt-mJAQRIwnmi5jJ6tj5ou-QRAY,23241
36
+ doctr/io/html.py,sha256=PY3bJBn7M2V4fDaSS5RV_9cr-EC4SCDWOnm3VqiBM8U,715
37
+ doctr/io/pdf.py,sha256=vCZFNB0jabldMG2yLu5_fa1OQY7-LGVyCgS2nBP3Hjk,1320
38
+ doctr/io/reader.py,sha256=9PEGgvarm7IzxbQgpzev91CYODVrBSA9MNAP1di96gM,2540
39
+ doctr/io/image/__init__.py,sha256=SqJtZIvr9dIDPLW39kHIX_MxufCKbI54aX28VrjeauE,193
40
+ doctr/io/image/base.py,sha256=g6kdcoIEQVN0dhFsVUA-gwGedhRhyKjFVrPu9QbR_UQ,1740
41
+ doctr/io/image/pytorch.py,sha256=dVGXIU2ZgeGDLkOgJ55RneVf5wyJUv9CZbIDh9SVjqA,3254
42
+ doctr/io/image/tensorflow.py,sha256=4bdeGDo13EFDImNHkxInTSGpzBU4WaXtxPg-hh4trU4,3207
43
+ doctr/models/__init__.py,sha256=SNHAyfMOn_18tjh8nmdl4vv7XLW2JDXX4fdbiDKcZdA,148
44
+ doctr/models/_utils.py,sha256=4whOjayC7ZZFd0rp84sAmnM8F4rLXYrlNFeGIQsuKys,5759
45
+ doctr/models/builder.py,sha256=QcHEuTycC5oH2QAUqZJi1gO5AmFsNkMpAzrGgth4DtM,17890
46
+ doctr/models/core.py,sha256=SMXYuX1o_Q2zrjcF-vzfqj7IkLKlDyzEOc-4HeiEZ8g,501
47
+ doctr/models/zoo.py,sha256=6VeOSI_1y8ecvpmOSSLJpEc9Ut1LKqPAsgPQyOCqL_w,9322
48
+ doctr/models/artefacts/__init__.py,sha256=ZPEGVgF1rk1JudUb_9EWijngdQRGsAthWdth28Hjb1U,43
49
+ doctr/models/artefacts/barcode.py,sha256=46QPq7J0i9PG4qtYIsRbGhoJMiNzTht2TCOKDCJiPsU,2721
50
+ doctr/models/artefacts/face.py,sha256=oN2tD6QO7bp79dib9IXK2Y4NB67AIMuoO92n6E1oKqI,1893
51
+ doctr/models/classification/__init__.py,sha256=HeErE29Bs1-91FtS9HqNghHe89XZGzI_11MO_E6GJ7s,154
52
+ doctr/models/classification/zoo.py,sha256=ZlQATwhEMj3dwRyJUisPC0XByq6z2I5GdfeidDsm8DQ,2489
53
+ doctr/models/classification/magc_resnet/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
54
+ doctr/models/classification/magc_resnet/pytorch.py,sha256=UY65c3_Ux2o4hOm_USEQYP7O69bj76qbUB-PCb_0Lng,5603
55
+ doctr/models/classification/magc_resnet/tensorflow.py,sha256=4UDrEcom_2wcyE1QjwmT-u6rtpzQ5ViyB1U6HxpT_XI,6423
56
+ doctr/models/classification/mobilenet/__init__.py,sha256=FBZ2YT2Cq3mj6vpDC3ff5TcMpagNWFhwxQ_brdsgBqo,172
57
+ doctr/models/classification/mobilenet/pytorch.py,sha256=yio6IMHP658AnRVW_gtUjiT9EWr3Byf96YIG3Mp3nrw,8009
58
+ doctr/models/classification/mobilenet/tensorflow.py,sha256=VauT18woqAl1UqI8mxt1xklvEV5MVK-fdLkAdRinJWA,14364
59
+ doctr/models/classification/predictor/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
60
+ doctr/models/classification/predictor/pytorch.py,sha256=Jah8NsP7eCJFw8-y2tJAxkhjmoqxx7WpW5-uUZ1I0sU,1883
61
+ doctr/models/classification/predictor/tensorflow.py,sha256=PNOJGkyajRbuRrw2qrbZE0AbzFgXujVQF8gIRuEhhnk,1698
62
+ doctr/models/classification/resnet/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
63
+ doctr/models/classification/resnet/pytorch.py,sha256=VVkNit3HEezRfOPw8wfuiEEAUCEnYSauCvWaCFF3cwo,12442
64
+ doctr/models/classification/resnet/tensorflow.py,sha256=jBGiL6Mucnq7JGkyIa4Y9A6BQz2ol88cm-eBxJjsTPo,13185
65
+ doctr/models/classification/textnet/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
66
+ doctr/models/classification/textnet/pytorch.py,sha256=69vbyqRktq-k-JLiKTjkPZCtkP2trIj4VFfnfLxvf6M,10163
67
+ doctr/models/classification/textnet/tensorflow.py,sha256=XB1O6vw7Swf0zPgYaVzQd3mWcVMVZiYsWbh8I-WZSqo,9789
68
+ doctr/models/classification/vgg/__init__.py,sha256=FBZ2YT2Cq3mj6vpDC3ff5TcMpagNWFhwxQ_brdsgBqo,172
69
+ doctr/models/classification/vgg/pytorch.py,sha256=b_q9oWmtlazD4uk9DFYezWgsgAwwN-3ewEz15E2cJR4,3136
70
+ doctr/models/classification/vgg/tensorflow.py,sha256=mVuyIXtX7iu622K0GwXkALOM7gzFtlGX9IABLP2NR2Y,4090
71
+ doctr/models/classification/vit/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
72
+ doctr/models/classification/vit/pytorch.py,sha256=770ZrCPX7LlVUjE9XNFrzcb2i_0lHStJ8Q4vXEhXEHs,6096
73
+ doctr/models/classification/vit/tensorflow.py,sha256=TtglXtKAE6y_gfzk8DOhUwoQNIMhK86tmhCB1SbT-k0,5869
74
+ doctr/models/detection/__init__.py,sha256=RqSz5beehLiqhW0PwFLFmCfTyMjofO-0umcQJLDMHjY,105
75
+ doctr/models/detection/core.py,sha256=K2uQTIu3ttgxj7YF7i1a-X6djIGCSFjZnQQ57JQBDv0,3566
76
+ doctr/models/detection/zoo.py,sha256=c0wm0g6ihkCkSR1G7iaY-IyAnfbEKpa-jbY6kVv4zrY,3032
77
+ doctr/models/detection/_utils/__init__.py,sha256=jDHErtF1nkN-uICx8prmdvmGTSoN6U27ZVmHLoqtcNo,131
78
+ doctr/models/detection/_utils/pytorch.py,sha256=UZ-PK5Uw0dVN978JGj5MVtF7kLXTL4EtugCoq_VVkVk,1063
79
+ doctr/models/detection/_utils/tensorflow.py,sha256=9D2ita4ZqJus2byLe7bkSIhyYExAiOLAGBbC7-oRZDU,979
80
+ doctr/models/detection/differentiable_binarization/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
81
+ doctr/models/detection/differentiable_binarization/base.py,sha256=ucjNmclcN0SfTBZxgucaGC1qWqI3UvBLHfMGzR6LsZI,16323
82
+ doctr/models/detection/differentiable_binarization/pytorch.py,sha256=-bByMRDipo_0WIXuFPf9DUPfWduVgLd0UIn48GP3f94,15983
83
+ doctr/models/detection/differentiable_binarization/tensorflow.py,sha256=l4QltrgDMLK_eY0dxEaCDzrB8rlhVpwUmOAPNIzd_70,14506
84
+ doctr/models/detection/fast/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
85
+ doctr/models/detection/fast/base.py,sha256=AL3OZ0_OOxOgeE8EJO8EO7RR1c-TVUKepuXlHpG2a74,10818
86
+ doctr/models/detection/fast/pytorch.py,sha256=u6yjf3zYNrF-qdPoDlUXlWx9cbrw2CH6oX0tfzs3zRI,15920
87
+ doctr/models/detection/fast/tensorflow.py,sha256=yz7Eb9siNSVba4USDHJIOE9BLXFNDEvA_ZZsLcOqmvQ,15571
88
+ doctr/models/detection/linknet/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
89
+ doctr/models/detection/linknet/base.py,sha256=51teowVwuJ_PKJnL7die_KWLNpdnZTIYVh1TXstfIOs,10508
90
+ doctr/models/detection/linknet/pytorch.py,sha256=sodWXaCDv1taRl3g6lgwxitvhU-ZszfN-OIofsorkp8,13810
91
+ doctr/models/detection/linknet/tensorflow.py,sha256=PK3adzBG6wz_SA5lMrh0KBKpbDu-e3FaKwTZ8-ZaN-s,12914
92
+ doctr/models/detection/predictor/__init__.py,sha256=lwmH917kRdbUUBsE02fELIuXQNRNePpIj3iK43ey6Bg,159
93
+ doctr/models/detection/predictor/pytorch.py,sha256=bKSOe5Gfo5ctvqGAle3CyCKMP-zZpdIH-h-j0D7bBbA,2083
94
+ doctr/models/detection/predictor/tensorflow.py,sha256=ZVpRrxsje91InWJrOSOfxdtTdvZg-0IXwEBVJBktBRA,1868
95
+ doctr/models/factory/__init__.py,sha256=cKPoH2V2157lLMTR2zsljG3_IQHziodqR-XK_LG0D_I,19
96
+ doctr/models/factory/hub.py,sha256=iyktX-LE1wQmtvoFKmRHS2AofkloGBvni6TH0aF_xRI,7918
97
+ doctr/models/kie_predictor/__init__.py,sha256=lwmH917kRdbUUBsE02fELIuXQNRNePpIj3iK43ey6Bg,159
98
+ doctr/models/kie_predictor/base.py,sha256=XHnTOzaFiqJiwb6nA7osjFwEHfeTVi4FwfjRDIFJNzU,1784
99
+ doctr/models/kie_predictor/pytorch.py,sha256=OW0BAVT11R7PKCminD8VbyZ6En12TyaE103zMrSeG4s,6940
100
+ doctr/models/kie_predictor/tensorflow.py,sha256=JAbHyhEE-OEx1r3NIqWYTlhAb9ECY7ZfW5Jc4d-LwVw,6697
101
+ doctr/models/modules/__init__.py,sha256=pouP7obVTu4p6aHkyaqa1yHKbynpvT0Hgo-LO_1U2R4,83
102
+ doctr/models/modules/layers/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
103
+ doctr/models/modules/layers/pytorch.py,sha256=wKdfoUDCn_jYOgZiLVc4_4K0DlkYY9If4-NhJwUBYOY,6938
104
+ doctr/models/modules/layers/tensorflow.py,sha256=yslZpOtR2fP5Do7fOxR-GD08DPm6mCLo2fDMhp23QTI,7278
105
+ doctr/models/modules/transformer/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
106
+ doctr/models/modules/transformer/pytorch.py,sha256=Bn6KPvhBdtS2MlRQmQT7c_d63maRfwfMias3P8eJ9fA,7725
107
+ doctr/models/modules/transformer/tensorflow.py,sha256=NTF-Q6ClUIMdSWDqus6kPZjOlKC3XcJ3HqUeyZTqtnU,9113
108
+ doctr/models/modules/vision_transformer/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
109
+ doctr/models/modules/vision_transformer/pytorch.py,sha256=D6BbqNPV_7OFRogM0iaiWbn_6uLe3Thbo6GKRSYpfTQ,3970
110
+ doctr/models/modules/vision_transformer/tensorflow.py,sha256=PaDbTtCc5YGqZNd_NFMdxeq6oNxs0WtVGYwhLCjJWFY,4199
111
+ doctr/models/obj_detection/__init__.py,sha256=7TJnvLcLYaQtnrXaiBS38qvELgSC-hW6jIhsIfNXob4,27
112
+ doctr/models/obj_detection/faster_rcnn/__init__.py,sha256=LOFUrXC37tQ8hDYF_xTxiD11YEgnLsW2wY0_MJDKszk,144
113
+ doctr/models/obj_detection/faster_rcnn/pytorch.py,sha256=xT1U-Wo0tJLcRXe7QOwoaDDYeJKRqHAM5-TsmSGDJG0,2855
114
+ doctr/models/predictor/__init__.py,sha256=lwmH917kRdbUUBsE02fELIuXQNRNePpIj3iK43ey6Bg,159
115
+ doctr/models/predictor/base.py,sha256=IL2WNF0kTkFKERAMAwur29ptDtvp7aYbc6WStTLvt9A,6688
116
+ doctr/models/predictor/pytorch.py,sha256=Dwf23IXE_q6RL3rrsbvK9U1yeoeP27M1mwntdoR4lQs,5954
117
+ doctr/models/predictor/tensorflow.py,sha256=pYaTv3y_ELgMfgmMCzL2lPzFu2VvNwsoTzaElshuBj0,5800
118
+ doctr/models/preprocessor/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
119
+ doctr/models/preprocessor/pytorch.py,sha256=C6s07Xaky0NqCEpjfZB3-mgegkT16dwBXVRaFCfvAN0,4993
120
+ doctr/models/preprocessor/tensorflow.py,sha256=i_Crf-ZGRzC2cMLQDg9P5aHEfmK_xtF1_HXzu-Ul-4M,4483
121
+ doctr/models/recognition/__init__.py,sha256=902nfVyvjOuUGHDKSGZgoS0fKC52J3jcUJQJhIpvOIY,124
122
+ doctr/models/recognition/core.py,sha256=dbg8SebgfK8CPHXR-7rzmCI9XMLXmWW0jLd1yLLv_34,1593
123
+ doctr/models/recognition/utils.py,sha256=GhNehWmCjl3GJ1ZFneA3cBRrZZk36856uU5i727FaQg,3550
124
+ doctr/models/recognition/zoo.py,sha256=MakzszAsbiAgAJS4AhA02F6dWG47qTc1DklNXoey8JQ,2505
125
+ doctr/models/recognition/crnn/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
126
+ doctr/models/recognition/crnn/pytorch.py,sha256=AE8Ey-Z5VZNGUldL-crbMdyKI__OUMBmn8nYC2790Pc,11802
127
+ doctr/models/recognition/crnn/tensorflow.py,sha256=dcT1X_zLmEqPiWG628lQTe9WMmfEWubXgCWFYs1BhJo,11666
128
+ doctr/models/recognition/master/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
129
+ doctr/models/recognition/master/base.py,sha256=5yQ0mUaS_ZWmUUzTAobgAlNS3Vp90PFvrzAcQXUF758,1540
130
+ doctr/models/recognition/master/pytorch.py,sha256=Endn_S7svrN27IGdXDgAXXlZ_p0_IpasjvPPiJuxSiI,12318
131
+ doctr/models/recognition/master/tensorflow.py,sha256=rbrPMz49ySW8Wpd72dBNOH8dvcoAl3NwBi2ID7qVkxA,12140
132
+ doctr/models/recognition/parseq/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
133
+ doctr/models/recognition/parseq/base.py,sha256=8MMqibB8zZLw2qU-iyx79Zpr4MdEtbnF3f3ikfLrBjU,1534
134
+ doctr/models/recognition/parseq/pytorch.py,sha256=lXpjXfgRRLzyHdCJCoc_0xsNN8_67ywoBb6tgoYCnj0,19868
135
+ doctr/models/recognition/parseq/tensorflow.py,sha256=YNvJoddq8jYtxsW-wEsw1-p0a8gnkxOCEgklATQ2M-0,21558
136
+ doctr/models/recognition/predictor/__init__.py,sha256=lwmH917kRdbUUBsE02fELIuXQNRNePpIj3iK43ey6Bg,159
137
+ doctr/models/recognition/predictor/_utils.py,sha256=y6hDoGS8reluLmx8JmTxM2f1uhlYnjOouh0BOr6wNTA,3389
138
+ doctr/models/recognition/predictor/pytorch.py,sha256=snMHU0GopDEJ9HDdzpVxuvfJxVL-91Le-rc_dSqKCA0,2785
139
+ doctr/models/recognition/predictor/tensorflow.py,sha256=o4Mhbxf9BUofqTV863U7-Zi0H77imX3LfhqzYLc2m4k,2549
140
+ doctr/models/recognition/sar/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
141
+ doctr/models/recognition/sar/pytorch.py,sha256=IuZ2KQO-2Du6FKRoJQud90fwNEhTFQy7e8t7pZaCuQE,15102
142
+ doctr/models/recognition/sar/tensorflow.py,sha256=wkOlGdqK8NA_PYLQhcrgiv3Rqmeoj_HAi0Ku29QD5ds,15249
143
+ doctr/models/recognition/vitstr/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
144
+ doctr/models/recognition/vitstr/base.py,sha256=Xt7hq45tq999boF0XgW62x_cX5wJXx7VLxWA9H06U_o,1488
145
+ doctr/models/recognition/vitstr/pytorch.py,sha256=8IxKWHt2uy6yXCsT_JTiccFoPToYKENH0H3tP-yTmHI,9596
146
+ doctr/models/recognition/vitstr/tensorflow.py,sha256=_8k6Jxd715uH8lsBqUCn4C_3tlgE75h_BXt4AlfYrk8,9671
147
+ doctr/models/utils/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
148
+ doctr/models/utils/pytorch.py,sha256=bO8a432TzpHTuqAsFuAi2ld6NOT12E2rlghQlW4nAjg,5494
149
+ doctr/models/utils/tensorflow.py,sha256=VccfK6cyPoWuWGWAWbl17MkLa1srtHU-HJCbb_MXePw,6322
150
+ doctr/transforms/__init__.py,sha256=0VHbvUZ7llFd1e_7_JdWTaxYMCvaR6KbUJaYJequmQI,23
151
+ doctr/transforms/functional/__init__.py,sha256=FBZ2YT2Cq3mj6vpDC3ff5TcMpagNWFhwxQ_brdsgBqo,172
152
+ doctr/transforms/functional/base.py,sha256=mFb2XGEX6g-36k_FSMhjj1MvW4ZXaqxv1GROCSc1cUg,6969
153
+ doctr/transforms/functional/pytorch.py,sha256=aonwJAzPECgYeEWiigAQfbJHP8my_60Ul0x8LGdD-TI,5015
154
+ doctr/transforms/functional/tensorflow.py,sha256=35dYnCtA9A9SvjndEvckxD8rK_uZ1_4BTgBZ7WiBtGI,9959
155
+ doctr/transforms/modules/__init__.py,sha256=a4GXc5YZWt26eeBKo2HqLmbDn1_qo-uko6GoPNrniC0,221
156
+ doctr/transforms/modules/base.py,sha256=_WboS3OoaM3yVBEApGG36RE61v8rzgSKZOSSsdVXgOU,9126
157
+ doctr/transforms/modules/pytorch.py,sha256=sPJDRoAgfd9XSjI7DKV-3uZrcD-t6TH4jvL4Mi5yBP4,8606
158
+ doctr/transforms/modules/tensorflow.py,sha256=5rQ_NcerIlsUWdFicApbDOdvmKZDD9bbojvXuWBOTTE,17959
159
+ doctr/utils/__init__.py,sha256=uQY9ibZ24V896fmihIsK23QOIZdKtk0HyKoCVJ_lLuM,95
160
+ doctr/utils/common_types.py,sha256=KXG-4mvL1MPmkrjuhCs8vAfiaBmdGRmt2yQcNlgALM8,584
161
+ doctr/utils/data.py,sha256=26iN_Ra1OJD_LHIEbefADMxU2yVtCpu3gYdhCW5K9B4,4280
162
+ doctr/utils/fonts.py,sha256=Ugjac4WPEJLsAf4U8j0f6DIoOpER_w13jHZ_GyvD0Xs,1224
163
+ doctr/utils/geometry.py,sha256=Cfdw0kdH_K3qFMoioGlKdDgrRhgD2DhxXjy_lhIbpVQ,15685
164
+ doctr/utils/metrics.py,sha256=hYRRlIW-e8onLPsYvnJL9HzBtwZT3x-p_yu52INz4uw,25935
165
+ doctr/utils/multithreading.py,sha256=iEM6o_qjutH-CxFTz7K1VQseYpVaHH3Hpw_yNDoQBSw,1989
166
+ doctr/utils/repr.py,sha256=3GdMquo1NtwNkQPoB-nmDm_AFmU3sLc4T3VfGck9uoQ,2111
167
+ doctr/utils/visualization.py,sha256=iIO6mEqqVKvkxGpDQJomJmGeplCxAuwuS8Vur0vEtYg,17758
168
+ python_doctr-0.8.1.dist-info/LICENSE,sha256=75RTSsXOsAYhGpxsHc9U41ep6GS7vrUPufeekgoeOXM,11336
169
+ python_doctr-0.8.1.dist-info/METADATA,sha256=E2AtXeYk0nHqDzCDhfnGceCC4rR2HFU9lZvLDKYVIa4,33155
170
+ python_doctr-0.8.1.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
171
+ python_doctr-0.8.1.dist-info/top_level.txt,sha256=lCgp4pmjPI3HYph62XhfzA3jRwM715kGtJPmqIUJ9t8,6
172
+ python_doctr-0.8.1.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
173
+ python_doctr-0.8.1.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: bdist_wheel (0.41.2)
2
+ Generator: bdist_wheel (0.42.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5