python-doctr 0.9.0__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (162) hide show
  1. doctr/contrib/__init__.py +1 -0
  2. doctr/contrib/artefacts.py +7 -9
  3. doctr/contrib/base.py +8 -17
  4. doctr/datasets/cord.py +17 -7
  5. doctr/datasets/datasets/__init__.py +4 -4
  6. doctr/datasets/datasets/base.py +16 -16
  7. doctr/datasets/datasets/pytorch.py +12 -12
  8. doctr/datasets/datasets/tensorflow.py +10 -10
  9. doctr/datasets/detection.py +6 -9
  10. doctr/datasets/doc_artefacts.py +3 -4
  11. doctr/datasets/funsd.py +17 -6
  12. doctr/datasets/generator/__init__.py +4 -4
  13. doctr/datasets/generator/base.py +16 -17
  14. doctr/datasets/generator/pytorch.py +1 -3
  15. doctr/datasets/generator/tensorflow.py +1 -3
  16. doctr/datasets/ic03.py +14 -5
  17. doctr/datasets/ic13.py +13 -5
  18. doctr/datasets/iiit5k.py +31 -20
  19. doctr/datasets/iiithws.py +4 -5
  20. doctr/datasets/imgur5k.py +15 -5
  21. doctr/datasets/loader.py +4 -7
  22. doctr/datasets/mjsynth.py +6 -5
  23. doctr/datasets/ocr.py +3 -4
  24. doctr/datasets/orientation.py +3 -4
  25. doctr/datasets/recognition.py +3 -4
  26. doctr/datasets/sroie.py +16 -5
  27. doctr/datasets/svhn.py +16 -5
  28. doctr/datasets/svt.py +14 -5
  29. doctr/datasets/synthtext.py +14 -5
  30. doctr/datasets/utils.py +37 -27
  31. doctr/datasets/vocabs.py +21 -7
  32. doctr/datasets/wildreceipt.py +25 -10
  33. doctr/file_utils.py +18 -4
  34. doctr/io/elements.py +69 -81
  35. doctr/io/html.py +1 -3
  36. doctr/io/image/__init__.py +3 -3
  37. doctr/io/image/base.py +2 -5
  38. doctr/io/image/pytorch.py +3 -12
  39. doctr/io/image/tensorflow.py +2 -11
  40. doctr/io/pdf.py +5 -7
  41. doctr/io/reader.py +5 -11
  42. doctr/models/_utils.py +14 -22
  43. doctr/models/builder.py +32 -50
  44. doctr/models/classification/magc_resnet/__init__.py +3 -3
  45. doctr/models/classification/magc_resnet/pytorch.py +10 -13
  46. doctr/models/classification/magc_resnet/tensorflow.py +21 -17
  47. doctr/models/classification/mobilenet/__init__.py +3 -3
  48. doctr/models/classification/mobilenet/pytorch.py +7 -17
  49. doctr/models/classification/mobilenet/tensorflow.py +22 -29
  50. doctr/models/classification/predictor/__init__.py +4 -4
  51. doctr/models/classification/predictor/pytorch.py +13 -11
  52. doctr/models/classification/predictor/tensorflow.py +13 -11
  53. doctr/models/classification/resnet/__init__.py +4 -4
  54. doctr/models/classification/resnet/pytorch.py +21 -31
  55. doctr/models/classification/resnet/tensorflow.py +41 -39
  56. doctr/models/classification/textnet/__init__.py +3 -3
  57. doctr/models/classification/textnet/pytorch.py +10 -17
  58. doctr/models/classification/textnet/tensorflow.py +19 -20
  59. doctr/models/classification/vgg/__init__.py +3 -3
  60. doctr/models/classification/vgg/pytorch.py +5 -7
  61. doctr/models/classification/vgg/tensorflow.py +18 -15
  62. doctr/models/classification/vit/__init__.py +3 -3
  63. doctr/models/classification/vit/pytorch.py +8 -14
  64. doctr/models/classification/vit/tensorflow.py +16 -16
  65. doctr/models/classification/zoo.py +36 -19
  66. doctr/models/core.py +3 -3
  67. doctr/models/detection/_utils/__init__.py +4 -4
  68. doctr/models/detection/_utils/base.py +4 -7
  69. doctr/models/detection/_utils/pytorch.py +1 -5
  70. doctr/models/detection/_utils/tensorflow.py +1 -5
  71. doctr/models/detection/core.py +2 -8
  72. doctr/models/detection/differentiable_binarization/__init__.py +4 -4
  73. doctr/models/detection/differentiable_binarization/base.py +7 -17
  74. doctr/models/detection/differentiable_binarization/pytorch.py +27 -30
  75. doctr/models/detection/differentiable_binarization/tensorflow.py +49 -37
  76. doctr/models/detection/fast/__init__.py +4 -4
  77. doctr/models/detection/fast/base.py +6 -14
  78. doctr/models/detection/fast/pytorch.py +24 -31
  79. doctr/models/detection/fast/tensorflow.py +28 -37
  80. doctr/models/detection/linknet/__init__.py +4 -4
  81. doctr/models/detection/linknet/base.py +6 -15
  82. doctr/models/detection/linknet/pytorch.py +24 -27
  83. doctr/models/detection/linknet/tensorflow.py +36 -33
  84. doctr/models/detection/predictor/__init__.py +5 -5
  85. doctr/models/detection/predictor/pytorch.py +6 -7
  86. doctr/models/detection/predictor/tensorflow.py +7 -8
  87. doctr/models/detection/zoo.py +27 -7
  88. doctr/models/factory/hub.py +8 -13
  89. doctr/models/kie_predictor/__init__.py +5 -5
  90. doctr/models/kie_predictor/base.py +8 -5
  91. doctr/models/kie_predictor/pytorch.py +22 -19
  92. doctr/models/kie_predictor/tensorflow.py +21 -15
  93. doctr/models/modules/layers/__init__.py +3 -3
  94. doctr/models/modules/layers/pytorch.py +6 -9
  95. doctr/models/modules/layers/tensorflow.py +5 -7
  96. doctr/models/modules/transformer/__init__.py +3 -3
  97. doctr/models/modules/transformer/pytorch.py +12 -13
  98. doctr/models/modules/transformer/tensorflow.py +9 -12
  99. doctr/models/modules/vision_transformer/__init__.py +3 -3
  100. doctr/models/modules/vision_transformer/pytorch.py +3 -4
  101. doctr/models/modules/vision_transformer/tensorflow.py +4 -4
  102. doctr/models/predictor/__init__.py +5 -5
  103. doctr/models/predictor/base.py +52 -41
  104. doctr/models/predictor/pytorch.py +16 -13
  105. doctr/models/predictor/tensorflow.py +16 -10
  106. doctr/models/preprocessor/__init__.py +4 -4
  107. doctr/models/preprocessor/pytorch.py +13 -17
  108. doctr/models/preprocessor/tensorflow.py +11 -15
  109. doctr/models/recognition/core.py +3 -7
  110. doctr/models/recognition/crnn/__init__.py +4 -4
  111. doctr/models/recognition/crnn/pytorch.py +20 -28
  112. doctr/models/recognition/crnn/tensorflow.py +19 -29
  113. doctr/models/recognition/master/__init__.py +3 -3
  114. doctr/models/recognition/master/base.py +3 -7
  115. doctr/models/recognition/master/pytorch.py +22 -24
  116. doctr/models/recognition/master/tensorflow.py +21 -26
  117. doctr/models/recognition/parseq/__init__.py +3 -3
  118. doctr/models/recognition/parseq/base.py +3 -7
  119. doctr/models/recognition/parseq/pytorch.py +26 -26
  120. doctr/models/recognition/parseq/tensorflow.py +26 -30
  121. doctr/models/recognition/predictor/__init__.py +5 -5
  122. doctr/models/recognition/predictor/_utils.py +7 -10
  123. doctr/models/recognition/predictor/pytorch.py +6 -6
  124. doctr/models/recognition/predictor/tensorflow.py +5 -6
  125. doctr/models/recognition/sar/__init__.py +4 -4
  126. doctr/models/recognition/sar/pytorch.py +20 -21
  127. doctr/models/recognition/sar/tensorflow.py +19 -24
  128. doctr/models/recognition/utils.py +5 -10
  129. doctr/models/recognition/vitstr/__init__.py +4 -4
  130. doctr/models/recognition/vitstr/base.py +3 -7
  131. doctr/models/recognition/vitstr/pytorch.py +18 -20
  132. doctr/models/recognition/vitstr/tensorflow.py +21 -24
  133. doctr/models/recognition/zoo.py +22 -11
  134. doctr/models/utils/__init__.py +4 -4
  135. doctr/models/utils/pytorch.py +13 -16
  136. doctr/models/utils/tensorflow.py +31 -30
  137. doctr/models/zoo.py +1 -5
  138. doctr/transforms/functional/__init__.py +3 -3
  139. doctr/transforms/functional/base.py +4 -11
  140. doctr/transforms/functional/pytorch.py +21 -29
  141. doctr/transforms/functional/tensorflow.py +10 -22
  142. doctr/transforms/modules/__init__.py +4 -4
  143. doctr/transforms/modules/base.py +48 -55
  144. doctr/transforms/modules/pytorch.py +65 -28
  145. doctr/transforms/modules/tensorflow.py +33 -44
  146. doctr/utils/common_types.py +8 -9
  147. doctr/utils/data.py +8 -12
  148. doctr/utils/fonts.py +2 -7
  149. doctr/utils/geometry.py +120 -64
  150. doctr/utils/metrics.py +18 -38
  151. doctr/utils/multithreading.py +4 -6
  152. doctr/utils/reconstitution.py +157 -75
  153. doctr/utils/repr.py +2 -3
  154. doctr/utils/visualization.py +16 -29
  155. doctr/version.py +1 -1
  156. {python_doctr-0.9.0.dist-info → python_doctr-0.11.0.dist-info}/METADATA +59 -57
  157. python_doctr-0.11.0.dist-info/RECORD +173 -0
  158. {python_doctr-0.9.0.dist-info → python_doctr-0.11.0.dist-info}/WHEEL +1 -1
  159. python_doctr-0.9.0.dist-info/RECORD +0 -173
  160. {python_doctr-0.9.0.dist-info → python_doctr-0.11.0.dist-info}/LICENSE +0 -0
  161. {python_doctr-0.9.0.dist-info → python_doctr-0.11.0.dist-info}/top_level.txt +0 -0
  162. {python_doctr-0.9.0.dist-info → python_doctr-0.11.0.dist-info}/zip-safe +0 -0
doctr/datasets/sroie.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright (C) 2021-2024, Mindee.
1
+ # Copyright (C) 2021-2025, Mindee.
2
2
 
3
3
  # This program is licensed under the Apache License 2.0.
4
4
  # See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
@@ -6,7 +6,7 @@
6
6
  import csv
7
7
  import os
8
8
  from pathlib import Path
9
- from typing import Any, Dict, List, Tuple, Union
9
+ from typing import Any
10
10
 
11
11
  import numpy as np
12
12
  from tqdm import tqdm
@@ -29,10 +29,10 @@ class SROIE(VisionDataset):
29
29
  >>> img, target = train_set[0]
30
30
 
31
31
  Args:
32
- ----
33
32
  train: whether the subset should be the training one
34
33
  use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
35
34
  recognition_task: whether the dataset should be used for recognition task
35
+ detection_task: whether the dataset should be used for detection task
36
36
  **kwargs: keyword arguments from `VisionDataset`.
37
37
  """
38
38
 
@@ -52,6 +52,7 @@ class SROIE(VisionDataset):
52
52
  train: bool = True,
53
53
  use_polygons: bool = False,
54
54
  recognition_task: bool = False,
55
+ detection_task: bool = False,
55
56
  **kwargs: Any,
56
57
  ) -> None:
57
58
  url, sha256, name = self.TRAIN if train else self.TEST
@@ -63,13 +64,21 @@ class SROIE(VisionDataset):
63
64
  pre_transforms=convert_target_to_relative if not recognition_task else None,
64
65
  **kwargs,
65
66
  )
67
+ if recognition_task and detection_task:
68
+ raise ValueError(
69
+ "`recognition_task` and `detection_task` cannot be set to True simultaneously. "
70
+ + "To get the whole dataset with boxes and labels leave both parameters to False."
71
+ )
72
+
66
73
  self.train = train
67
74
 
68
75
  tmp_root = os.path.join(self.root, "images")
69
- self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
76
+ self.data: list[tuple[str | np.ndarray, str | dict[str, Any] | np.ndarray]] = []
70
77
  np_dtype = np.float32
71
78
 
72
- for img_path in tqdm(iterable=os.listdir(tmp_root), desc="Unpacking SROIE", total=len(os.listdir(tmp_root))):
79
+ for img_path in tqdm(
80
+ iterable=os.listdir(tmp_root), desc="Preparing and Loading SROIE", total=len(os.listdir(tmp_root))
81
+ ):
73
82
  # File existence check
74
83
  if not os.path.exists(os.path.join(tmp_root, img_path)):
75
84
  raise FileNotFoundError(f"unable to locate {os.path.join(tmp_root, img_path)}")
@@ -94,6 +103,8 @@ class SROIE(VisionDataset):
94
103
  for crop, label in zip(crops, labels):
95
104
  if crop.shape[0] > 0 and crop.shape[1] > 0 and len(label) > 0:
96
105
  self.data.append((crop, label))
106
+ elif detection_task:
107
+ self.data.append((img_path, coords))
97
108
  else:
98
109
  self.data.append((img_path, dict(boxes=coords, labels=labels)))
99
110
 
doctr/datasets/svhn.py CHANGED
@@ -1,10 +1,10 @@
1
- # Copyright (C) 2021-2024, Mindee.
1
+ # Copyright (C) 2021-2025, Mindee.
2
2
 
3
3
  # This program is licensed under the Apache License 2.0.
4
4
  # See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
5
5
 
6
6
  import os
7
- from typing import Any, Dict, List, Tuple, Union
7
+ from typing import Any
8
8
 
9
9
  import h5py
10
10
  import numpy as np
@@ -28,10 +28,10 @@ class SVHN(VisionDataset):
28
28
  >>> img, target = train_set[0]
29
29
 
30
30
  Args:
31
- ----
32
31
  train: whether the subset should be the training one
33
32
  use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
34
33
  recognition_task: whether the dataset should be used for recognition task
34
+ detection_task: whether the dataset should be used for detection task
35
35
  **kwargs: keyword arguments from `VisionDataset`.
36
36
  """
37
37
 
@@ -52,6 +52,7 @@ class SVHN(VisionDataset):
52
52
  train: bool = True,
53
53
  use_polygons: bool = False,
54
54
  recognition_task: bool = False,
55
+ detection_task: bool = False,
55
56
  **kwargs: Any,
56
57
  ) -> None:
57
58
  url, sha256, name = self.TRAIN if train else self.TEST
@@ -63,8 +64,14 @@ class SVHN(VisionDataset):
63
64
  pre_transforms=convert_target_to_relative if not recognition_task else None,
64
65
  **kwargs,
65
66
  )
67
+ if recognition_task and detection_task:
68
+ raise ValueError(
69
+ "`recognition_task` and `detection_task` cannot be set to True simultaneously. "
70
+ + "To get the whole dataset with boxes and labels leave both parameters to False."
71
+ )
72
+
66
73
  self.train = train
67
- self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
74
+ self.data: list[tuple[str | np.ndarray, str | dict[str, Any] | np.ndarray]] = []
68
75
  np_dtype = np.float32
69
76
 
70
77
  tmp_root = os.path.join(self.root, "train" if train else "test")
@@ -73,7 +80,9 @@ class SVHN(VisionDataset):
73
80
  with h5py.File(os.path.join(tmp_root, "digitStruct.mat"), "r") as f:
74
81
  img_refs = f["digitStruct/name"]
75
82
  box_refs = f["digitStruct/bbox"]
76
- for img_ref, box_ref in tqdm(iterable=zip(img_refs, box_refs), desc="Unpacking SVHN", total=len(img_refs)):
83
+ for img_ref, box_ref in tqdm(
84
+ iterable=zip(img_refs, box_refs), desc="Preparing and Loading SVHN", total=len(img_refs)
85
+ ):
77
86
  # convert ascii matrix to string
78
87
  img_name = "".join(map(chr, f[img_ref[0]][()].flatten()))
79
88
 
@@ -122,6 +131,8 @@ class SVHN(VisionDataset):
122
131
  for crop, label in zip(crops, label_targets):
123
132
  if crop.shape[0] > 0 and crop.shape[1] > 0 and len(label) > 0:
124
133
  self.data.append((crop, label))
134
+ elif detection_task:
135
+ self.data.append((img_name, box_targets))
125
136
  else:
126
137
  self.data.append((img_name, dict(boxes=box_targets, labels=label_targets)))
127
138
 
doctr/datasets/svt.py CHANGED
@@ -1,10 +1,10 @@
1
- # Copyright (C) 2021-2024, Mindee.
1
+ # Copyright (C) 2021-2025, Mindee.
2
2
 
3
3
  # This program is licensed under the Apache License 2.0.
4
4
  # See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
5
5
 
6
6
  import os
7
- from typing import Any, Dict, List, Tuple, Union
7
+ from typing import Any
8
8
 
9
9
  import defusedxml.ElementTree as ET
10
10
  import numpy as np
@@ -28,10 +28,10 @@ class SVT(VisionDataset):
28
28
  >>> img, target = train_set[0]
29
29
 
30
30
  Args:
31
- ----
32
31
  train: whether the subset should be the training one
33
32
  use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
34
33
  recognition_task: whether the dataset should be used for recognition task
34
+ detection_task: whether the dataset should be used for detection task
35
35
  **kwargs: keyword arguments from `VisionDataset`.
36
36
  """
37
37
 
@@ -43,6 +43,7 @@ class SVT(VisionDataset):
43
43
  train: bool = True,
44
44
  use_polygons: bool = False,
45
45
  recognition_task: bool = False,
46
+ detection_task: bool = False,
46
47
  **kwargs: Any,
47
48
  ) -> None:
48
49
  super().__init__(
@@ -53,8 +54,14 @@ class SVT(VisionDataset):
53
54
  pre_transforms=convert_target_to_relative if not recognition_task else None,
54
55
  **kwargs,
55
56
  )
57
+ if recognition_task and detection_task:
58
+ raise ValueError(
59
+ "`recognition_task` and `detection_task` cannot be set to True simultaneously. "
60
+ + "To get the whole dataset with boxes and labels leave both parameters to False."
61
+ )
62
+
56
63
  self.train = train
57
- self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
64
+ self.data: list[tuple[str | np.ndarray, str | dict[str, Any] | np.ndarray]] = []
58
65
  np_dtype = np.float32
59
66
 
60
67
  # Load xml data
@@ -66,7 +73,7 @@ class SVT(VisionDataset):
66
73
  )
67
74
  xml_root = xml_tree.getroot()
68
75
 
69
- for image in tqdm(iterable=xml_root, desc="Unpacking SVT", total=len(xml_root)):
76
+ for image in tqdm(iterable=xml_root, desc="Preparing and Loading SVT", total=len(xml_root)):
70
77
  name, _, _, _resolution, rectangles = image
71
78
 
72
79
  # File existence check
@@ -108,6 +115,8 @@ class SVT(VisionDataset):
108
115
  for crop, label in zip(crops, labels):
109
116
  if crop.shape[0] > 0 and crop.shape[1] > 0 and len(label) > 0:
110
117
  self.data.append((crop, label))
118
+ elif detection_task:
119
+ self.data.append((name.text, boxes))
111
120
  else:
112
121
  self.data.append((name.text, dict(boxes=boxes, labels=labels)))
113
122
 
@@ -1,11 +1,11 @@
1
- # Copyright (C) 2021-2024, Mindee.
1
+ # Copyright (C) 2021-2025, Mindee.
2
2
 
3
3
  # This program is licensed under the Apache License 2.0.
4
4
  # See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
5
5
 
6
6
  import glob
7
7
  import os
8
- from typing import Any, Dict, List, Tuple, Union
8
+ from typing import Any
9
9
 
10
10
  import numpy as np
11
11
  from PIL import Image
@@ -31,10 +31,10 @@ class SynthText(VisionDataset):
31
31
  >>> img, target = train_set[0]
32
32
 
33
33
  Args:
34
- ----
35
34
  train: whether the subset should be the training one
36
35
  use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
37
36
  recognition_task: whether the dataset should be used for recognition task
37
+ detection_task: whether the dataset should be used for detection task
38
38
  **kwargs: keyword arguments from `VisionDataset`.
39
39
  """
40
40
 
@@ -46,6 +46,7 @@ class SynthText(VisionDataset):
46
46
  train: bool = True,
47
47
  use_polygons: bool = False,
48
48
  recognition_task: bool = False,
49
+ detection_task: bool = False,
49
50
  **kwargs: Any,
50
51
  ) -> None:
51
52
  super().__init__(
@@ -56,8 +57,14 @@ class SynthText(VisionDataset):
56
57
  pre_transforms=convert_target_to_relative if not recognition_task else None,
57
58
  **kwargs,
58
59
  )
60
+ if recognition_task and detection_task:
61
+ raise ValueError(
62
+ "`recognition_task` and `detection_task` cannot be set to True simultaneously. "
63
+ + "To get the whole dataset with boxes and labels leave both parameters to False."
64
+ )
65
+
59
66
  self.train = train
60
- self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
67
+ self.data: list[tuple[str | np.ndarray, str | dict[str, Any] | np.ndarray]] = []
61
68
  np_dtype = np.float32
62
69
 
63
70
  # Load mat data
@@ -83,7 +90,7 @@ class SynthText(VisionDataset):
83
90
  del mat_data
84
91
 
85
92
  for img_path, word_boxes, txt in tqdm(
86
- iterable=zip(paths, boxes, labels), desc="Unpacking SynthText", total=len(paths)
93
+ iterable=zip(paths, boxes, labels), desc="Preparing and Loading SynthText", total=len(paths)
87
94
  ):
88
95
  # File existence check
89
96
  if not os.path.exists(os.path.join(tmp_root, img_path[0])):
@@ -111,6 +118,8 @@ class SynthText(VisionDataset):
111
118
  tmp_img = Image.fromarray(crop)
112
119
  tmp_img.save(os.path.join(reco_folder_path, f"{reco_images_counter}.png"))
113
120
  reco_images_counter += 1
121
+ elif detection_task:
122
+ self.data.append((img_path[0], np.asarray(word_boxes, dtype=np_dtype)))
114
123
  else:
115
124
  self.data.append((img_path[0], dict(boxes=np.asarray(word_boxes, dtype=np_dtype), labels=labels)))
116
125
 
doctr/datasets/utils.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright (C) 2021-2024, Mindee.
1
+ # Copyright (C) 2021-2025, Mindee.
2
2
 
3
3
  # This program is licensed under the Apache License 2.0.
4
4
  # See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
@@ -6,10 +6,10 @@
6
6
  import string
7
7
  import unicodedata
8
8
  from collections.abc import Sequence
9
+ from collections.abc import Sequence as SequenceType
9
10
  from functools import partial
10
11
  from pathlib import Path
11
- from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
12
- from typing import Sequence as SequenceType
12
+ from typing import Any, TypeVar
13
13
 
14
14
  import numpy as np
15
15
  from PIL import Image
@@ -19,7 +19,15 @@ from doctr.utils.geometry import convert_to_relative_coords, extract_crops, extr
19
19
 
20
20
  from .vocabs import VOCABS
21
21
 
22
- __all__ = ["translate", "encode_string", "decode_sequence", "encode_sequences", "pre_transform_multiclass"]
22
+ __all__ = [
23
+ "translate",
24
+ "encode_string",
25
+ "decode_sequence",
26
+ "encode_sequences",
27
+ "pre_transform_multiclass",
28
+ "crop_bboxes_from_image",
29
+ "convert_target_to_relative",
30
+ ]
23
31
 
24
32
  ImageTensor = TypeVar("ImageTensor")
25
33
 
@@ -32,13 +40,11 @@ def translate(
32
40
  """Translate a string input in a given vocabulary
33
41
 
34
42
  Args:
35
- ----
36
43
  input_string: input string to translate
37
44
  vocab_name: vocabulary to use (french, latin, ...)
38
45
  unknown_char: unknown character for non-translatable characters
39
46
 
40
47
  Returns:
41
- -------
42
48
  A string translated in a given vocab
43
49
  """
44
50
  if VOCABS.get(vocab_name) is None:
@@ -63,16 +69,14 @@ def translate(
63
69
  def encode_string(
64
70
  input_string: str,
65
71
  vocab: str,
66
- ) -> List[int]:
72
+ ) -> list[int]:
67
73
  """Given a predefined mapping, encode the string to a sequence of numbers
68
74
 
69
75
  Args:
70
- ----
71
76
  input_string: string to encode
72
77
  vocab: vocabulary (string), the encoding is given by the indexing of the character sequence
73
78
 
74
79
  Returns:
75
- -------
76
80
  A list encoding the input_string
77
81
  """
78
82
  try:
@@ -85,18 +89,16 @@ def encode_string(
85
89
 
86
90
 
87
91
  def decode_sequence(
88
- input_seq: Union[np.ndarray, SequenceType[int]],
92
+ input_seq: np.ndarray | SequenceType[int],
89
93
  mapping: str,
90
94
  ) -> str:
91
95
  """Given a predefined mapping, decode the sequence of numbers to a string
92
96
 
93
97
  Args:
94
- ----
95
98
  input_seq: array to decode
96
99
  mapping: vocabulary (string), the encoding is given by the indexing of the character sequence
97
100
 
98
101
  Returns:
99
- -------
100
102
  A string, decoded from input_seq
101
103
  """
102
104
  if not isinstance(input_seq, (Sequence, np.ndarray)):
@@ -108,18 +110,17 @@ def decode_sequence(
108
110
 
109
111
 
110
112
  def encode_sequences(
111
- sequences: List[str],
113
+ sequences: list[str],
112
114
  vocab: str,
113
- target_size: Optional[int] = None,
115
+ target_size: int | None = None,
114
116
  eos: int = -1,
115
- sos: Optional[int] = None,
116
- pad: Optional[int] = None,
117
+ sos: int | None = None,
118
+ pad: int | None = None,
117
119
  dynamic_seq_length: bool = False,
118
120
  ) -> np.ndarray:
119
121
  """Encode character sequences using a given vocab as mapping
120
122
 
121
123
  Args:
122
- ----
123
124
  sequences: the list of character sequences of size N
124
125
  vocab: the ordered vocab to use for encoding
125
126
  target_size: maximum length of the encoded data
@@ -129,7 +130,6 @@ def encode_sequences(
129
130
  dynamic_seq_length: if `target_size` is specified, uses it as upper bound and enables dynamic sequence size
130
131
 
131
132
  Returns:
132
- -------
133
133
  the padded encoded data as a tensor
134
134
  """
135
135
  if 0 <= eos < len(vocab):
@@ -169,21 +169,33 @@ def encode_sequences(
169
169
  return encoded_data
170
170
 
171
171
 
172
- def convert_target_to_relative(img: ImageTensor, target: Dict[str, Any]) -> Tuple[ImageTensor, Dict[str, Any]]:
173
- target["boxes"] = convert_to_relative_coords(target["boxes"], get_img_shape(img))
172
+ def convert_target_to_relative(
173
+ img: ImageTensor, target: np.ndarray | dict[str, Any]
174
+ ) -> tuple[ImageTensor, dict[str, Any] | np.ndarray]:
175
+ """Converts target to relative coordinates
176
+
177
+ Args:
178
+ img: tf.Tensor or torch.Tensor representing the image
179
+ target: target to convert to relative coordinates (boxes (N, 4) or polygons (N, 4, 2))
180
+
181
+ Returns:
182
+ The image and the target in relative coordinates
183
+ """
184
+ if isinstance(target, np.ndarray):
185
+ target = convert_to_relative_coords(target, get_img_shape(img)) # type: ignore[arg-type]
186
+ else:
187
+ target["boxes"] = convert_to_relative_coords(target["boxes"], get_img_shape(img)) # type: ignore[arg-type]
174
188
  return img, target
175
189
 
176
190
 
177
- def crop_bboxes_from_image(img_path: Union[str, Path], geoms: np.ndarray) -> List[np.ndarray]:
191
+ def crop_bboxes_from_image(img_path: str | Path, geoms: np.ndarray) -> list[np.ndarray]:
178
192
  """Crop a set of bounding boxes from an image
179
193
 
180
194
  Args:
181
- ----
182
195
  img_path: path to the image
183
196
  geoms: a array of polygons of shape (N, 4, 2) or of straight boxes of shape (N, 4)
184
197
 
185
198
  Returns:
186
- -------
187
199
  a list of cropped images
188
200
  """
189
201
  with Image.open(img_path) as pil_img:
@@ -196,21 +208,19 @@ def crop_bboxes_from_image(img_path: Union[str, Path], geoms: np.ndarray) -> Lis
196
208
  raise ValueError("Invalid geometry format")
197
209
 
198
210
 
199
- def pre_transform_multiclass(img, target: Tuple[np.ndarray, List]) -> Tuple[np.ndarray, Dict[str, List]]:
211
+ def pre_transform_multiclass(img, target: tuple[np.ndarray, list]) -> tuple[np.ndarray, dict[str, list]]:
200
212
  """Converts multiclass target to relative coordinates.
201
213
 
202
214
  Args:
203
- ----
204
215
  img: Image
205
216
  target: tuple of target polygons and their classes names
206
217
 
207
218
  Returns:
208
- -------
209
219
  Image and dictionary of boxes, with class names as keys
210
220
  """
211
221
  boxes = convert_to_relative_coords(target[0], get_img_shape(img))
212
222
  boxes_classes = target[1]
213
- boxes_dict: Dict = {k: [] for k in sorted(set(boxes_classes))}
223
+ boxes_dict: dict = {k: [] for k in sorted(set(boxes_classes))}
214
224
  for k, poly in zip(boxes_classes, boxes):
215
225
  boxes_dict[k].append(poly)
216
226
  boxes_dict = {k: np.stack(v, axis=0) for k, v in boxes_dict.items()}
doctr/datasets/vocabs.py CHANGED
@@ -1,15 +1,14 @@
1
- # Copyright (C) 2021-2024, Mindee.
1
+ # Copyright (C) 2021-2025, Mindee.
2
2
 
3
3
  # This program is licensed under the Apache License 2.0.
4
4
  # See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
5
5
 
6
6
  import string
7
- from typing import Dict
8
7
 
9
8
  __all__ = ["VOCABS"]
10
9
 
11
10
 
12
- VOCABS: Dict[str, str] = {
11
+ VOCABS: dict[str, str] = {
13
12
  "digits": string.digits,
14
13
  "ascii_letters": string.ascii_letters,
15
14
  "punctuation": string.punctuation,
@@ -20,11 +19,16 @@ VOCABS: Dict[str, str] = {
20
19
  "arabic_digits": "٠١٢٣٤٥٦٧٨٩",
21
20
  "arabic_diacritics": "ًٌٍَُِّْ",
22
21
  "arabic_punctuation": "؟؛«»—",
23
- "hindi_letters": "अआइईउऊऋॠऌॡएऐओऔअंअःकखगघङचछजझञटठडढणतथदधनपफबभमयरलवशषसह",
22
+ "hindi_letters": "अआइईउऊऋॠऌॡएऐओऔंःकखगघङचछजझञटठडढणतथदधनपफबभमयरलवशषसह",
24
23
  "hindi_digits": "०१२३४५६७८९",
25
- "hindi_punctuation": "।,?!:्ॐ॰॥॰",
24
+ "hindi_punctuation": "।,?!:्ॐ॰॥",
25
+ "gujarati_vowels": "અઆઇઈઉઊઋએઐઓ",
26
+ "gujarati_consonants": "ખગઘચછજઝઞટઠડઢણતથદધનપફબભમયરલવશસહળક્ષ",
27
+ "gujarati_digits": "૦૧૨૩૪૫૬૭૮૯",
28
+ "gujarati_punctuation": "૰ઽ◌ંઃ॥ૐ઼ઁ" + "૱",
26
29
  "bangla_letters": "অআইঈউঊঋএঐওঔকখগঘঙচছজঝঞটঠডঢণতথদধনপফবভমযরলশষসহ়ঽািীুূৃেৈোৌ্ৎংঃঁ",
27
30
  "bangla_digits": "০১২৩৪৫৬৭৮৯",
31
+ "generic_cyrillic_letters": "абвгдежзийклмнопрстуфхцчшщьюяАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЬЮЯ",
28
32
  }
29
33
 
30
34
  VOCABS["latin"] = VOCABS["digits"] + VOCABS["ascii_letters"] + VOCABS["punctuation"]
@@ -53,12 +57,22 @@ VOCABS["finnish"] = VOCABS["english"] + "äöÄÖ"
53
57
  VOCABS["swedish"] = VOCABS["english"] + "åäöÅÄÖ"
54
58
  VOCABS["vietnamese"] = (
55
59
  VOCABS["english"]
56
- + "áàảạãăắằẳẵặâấầẩẫậéèẻẽẹêếềểễệóòỏõọôốồổộỗơớờởợỡúùủũụưứừửữựiíìỉĩịýỳỷỹỵ"
57
- + "ÁÀẢẠÃĂẮẰẲẴẶÂẤẦẨẪẬÉÈẺẼẸÊẾỀỂỄỆÓÒỎÕỌÔỐỒỔỘỖƠỚỜỞỢỠÚÙỦŨỤƯỨỪỬỮỰIÍÌỈĨỊÝỲỶỸỴ"
60
+ + "áàảạãăắằẳẵặâấầẩẫậđéèẻẽẹêếềểễệóòỏõọôốồổộỗơớờởợỡúùủũụưứừửữựíìỉĩịýỳỷỹỵ"
61
+ + "ÁÀẢẠÃĂẮẰẲẴẶÂẤẦẨẪẬĐÉÈẺẼẸÊẾỀỂỄỆÓÒỎÕỌÔỐỒỔỘỖƠỚỜỞỢỠÚÙỦŨỤƯỨỪỬỮỰÍÌỈĨỊÝỲỶỸỴ"
58
62
  )
59
63
  VOCABS["hebrew"] = VOCABS["english"] + "אבגדהוזחטיכלמנסעפצקרשת" + "₪"
60
64
  VOCABS["hindi"] = VOCABS["hindi_letters"] + VOCABS["hindi_digits"] + VOCABS["hindi_punctuation"]
65
+ VOCABS["gujarati"] = (
66
+ VOCABS["gujarati_vowels"]
67
+ + VOCABS["gujarati_consonants"]
68
+ + VOCABS["gujarati_digits"]
69
+ + VOCABS["gujarati_punctuation"]
70
+ + VOCABS["punctuation"]
71
+ )
61
72
  VOCABS["bangla"] = VOCABS["bangla_letters"] + VOCABS["bangla_digits"]
73
+ VOCABS["ukrainian"] = (
74
+ VOCABS["generic_cyrillic_letters"] + VOCABS["digits"] + VOCABS["punctuation"] + VOCABS["currency"] + "ґіїєҐІЇЄ₴"
75
+ )
62
76
  VOCABS["multilingual"] = "".join(
63
77
  dict.fromkeys(
64
78
  VOCABS["french"]
@@ -1,4 +1,4 @@
1
- # Copyright (C) 2021-2024, Mindee.
1
+ # Copyright (C) 2021-2025, Mindee.
2
2
 
3
3
  # This program is licensed under the Apache License 2.0.
4
4
  # See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
@@ -6,9 +6,10 @@
6
6
  import json
7
7
  import os
8
8
  from pathlib import Path
9
- from typing import Any, Dict, List, Tuple, Union
9
+ from typing import Any
10
10
 
11
11
  import numpy as np
12
+ from tqdm import tqdm
12
13
 
13
14
  from .datasets import AbstractDataset
14
15
  from .utils import convert_target_to_relative, crop_bboxes_from_image
@@ -17,9 +18,10 @@ __all__ = ["WILDRECEIPT"]
17
18
 
18
19
 
19
20
  class WILDRECEIPT(AbstractDataset):
20
- """WildReceipt dataset from `"Spatial Dual-Modality Graph Reasoning for Key Information Extraction"
21
- <https://arxiv.org/abs/2103.14470v1>`_ |
22
- `repository <https://download.openmmlab.com/mmocr/data/wildreceipt.tar>`_.
21
+ """
22
+ WildReceipt dataset from `"Spatial Dual-Modality Graph Reasoning for Key Information Extraction"
23
+ <https://arxiv.org/abs/2103.14470v1>`_ |
24
+ `"repository" <https://download.openmmlab.com/mmocr/data/wildreceipt.tar>`_.
23
25
 
24
26
  .. image:: https://doctr-static.mindee.com/models?id=v0.7.0/wildreceipt-dataset.jpg&src=0
25
27
  :align: center
@@ -34,12 +36,12 @@ class WILDRECEIPT(AbstractDataset):
34
36
  >>> img, target = test_set[0]
35
37
 
36
38
  Args:
37
- ----
38
39
  img_folder: folder with all the images of the dataset
39
40
  label_path: path to the annotations file of the dataset
40
41
  train: whether the subset should be the training one
41
42
  use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
42
43
  recognition_task: whether the dataset should be used for recognition task
44
+ detection_task: whether the dataset should be used for detection task
43
45
  **kwargs: keyword arguments from `AbstractDataset`.
44
46
  """
45
47
 
@@ -50,11 +52,19 @@ class WILDRECEIPT(AbstractDataset):
50
52
  train: bool = True,
51
53
  use_polygons: bool = False,
52
54
  recognition_task: bool = False,
55
+ detection_task: bool = False,
53
56
  **kwargs: Any,
54
57
  ) -> None:
55
58
  super().__init__(
56
59
  img_folder, pre_transforms=convert_target_to_relative if not recognition_task else None, **kwargs
57
60
  )
61
+ # Task check
62
+ if recognition_task and detection_task:
63
+ raise ValueError(
64
+ "`recognition_task` and `detection_task` cannot be set to True simultaneously. "
65
+ + "To get the whole dataset with boxes and labels leave both parameters to False."
66
+ )
67
+
58
68
  # File existence check
59
69
  if not os.path.exists(label_path) or not os.path.exists(img_folder):
60
70
  raise FileNotFoundError(f"unable to locate {label_path if not os.path.exists(label_path) else img_folder}")
@@ -62,15 +72,18 @@ class WILDRECEIPT(AbstractDataset):
62
72
  tmp_root = img_folder
63
73
  self.train = train
64
74
  np_dtype = np.float32
65
- self.data: List[Tuple[Union[str, Path, np.ndarray], Union[str, Dict[str, Any]]]] = []
75
+ self.data: list[tuple[str | Path | np.ndarray, str | dict[str, Any] | np.ndarray]] = []
66
76
 
67
77
  with open(label_path, "r") as file:
68
78
  data = file.read()
69
79
  # Split the text file into separate JSON strings
70
80
  json_strings = data.strip().split("\n")
71
- box: Union[List[float], np.ndarray]
72
- _targets = []
73
- for json_string in json_strings:
81
+ box: list[float] | np.ndarray
82
+
83
+ for json_string in tqdm(
84
+ iterable=json_strings, desc="Preparing and Loading WILDRECEIPT", total=len(json_strings)
85
+ ):
86
+ _targets = []
74
87
  json_data = json.loads(json_string)
75
88
  img_path = json_data["file_name"]
76
89
  annotations = json_data["annotations"]
@@ -100,6 +113,8 @@ class WILDRECEIPT(AbstractDataset):
100
113
  for crop, label in zip(crops, list(text_targets)):
101
114
  if label and " " not in label:
102
115
  self.data.append((crop, label))
116
+ elif detection_task:
117
+ self.data.append((img_path, np.asarray(box_targets, dtype=int).clip(min=0)))
103
118
  else:
104
119
  self.data.append((
105
120
  img_path,
doctr/file_utils.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright (C) 2021-2024, Mindee.
1
+ # Copyright (C) 2021-2025, Mindee.
2
2
 
3
3
  # This program is licensed under the Apache License 2.0.
4
4
  # See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
@@ -9,7 +9,6 @@ import importlib.metadata
9
9
  import importlib.util
10
10
  import logging
11
11
  import os
12
- from typing import Optional
13
12
 
14
13
  CLASS_NAME: str = "words"
15
14
 
@@ -35,6 +34,20 @@ else: # pragma: no cover
35
34
  logging.info("Disabling PyTorch because USE_TF is set")
36
35
  _torch_available = False
37
36
 
37
+ # Compatibility fix to make sure tensorflow.keras stays at Keras 2
38
+ if "TF_USE_LEGACY_KERAS" not in os.environ:
39
+ os.environ["TF_USE_LEGACY_KERAS"] = "1"
40
+
41
+ elif os.environ["TF_USE_LEGACY_KERAS"] != "1":
42
+ raise ValueError(
43
+ "docTR is only compatible with Keras 2, but you have explicitly set `TF_USE_LEGACY_KERAS` to `0`. "
44
+ )
45
+
46
+
47
+ def ensure_keras_v2() -> None: # pragma: no cover
48
+ if not os.environ.get("TF_USE_LEGACY_KERAS") == "1":
49
+ os.environ["TF_USE_LEGACY_KERAS"] = "1"
50
+
38
51
 
39
52
  if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
40
53
  _tf_available = importlib.util.find_spec("tensorflow") is not None
@@ -65,6 +78,8 @@ if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VA
65
78
  _tf_available = False
66
79
  else:
67
80
  logging.info(f"TensorFlow version {_tf_version} available.")
81
+ ensure_keras_v2()
82
+
68
83
  else: # pragma: no cover
69
84
  logging.info("Disabling Tensorflow because USE_TORCH is set")
70
85
  _tf_available = False
@@ -77,12 +92,11 @@ if not _torch_available and not _tf_available: # pragma: no cover
77
92
  )
78
93
 
79
94
 
80
- def requires_package(name: str, extra_message: Optional[str] = None) -> None: # pragma: no cover
95
+ def requires_package(name: str, extra_message: str | None = None) -> None: # pragma: no cover
81
96
  """
82
97
  package requirement helper
83
98
 
84
99
  Args:
85
- ----
86
100
  name: name of the package
87
101
  extra_message: additional message to display if the package is not found
88
102
  """