python-doctr 0.10.0__py3-none-any.whl → 0.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (162) hide show
  1. doctr/contrib/__init__.py +1 -0
  2. doctr/contrib/artefacts.py +7 -9
  3. doctr/contrib/base.py +8 -17
  4. doctr/datasets/cord.py +8 -7
  5. doctr/datasets/datasets/__init__.py +4 -4
  6. doctr/datasets/datasets/base.py +16 -16
  7. doctr/datasets/datasets/pytorch.py +12 -12
  8. doctr/datasets/datasets/tensorflow.py +10 -10
  9. doctr/datasets/detection.py +6 -9
  10. doctr/datasets/doc_artefacts.py +3 -4
  11. doctr/datasets/funsd.py +7 -6
  12. doctr/datasets/generator/__init__.py +4 -4
  13. doctr/datasets/generator/base.py +16 -17
  14. doctr/datasets/generator/pytorch.py +1 -3
  15. doctr/datasets/generator/tensorflow.py +1 -3
  16. doctr/datasets/ic03.py +4 -5
  17. doctr/datasets/ic13.py +4 -5
  18. doctr/datasets/iiit5k.py +6 -5
  19. doctr/datasets/iiithws.py +4 -5
  20. doctr/datasets/imgur5k.py +6 -5
  21. doctr/datasets/loader.py +4 -7
  22. doctr/datasets/mjsynth.py +6 -5
  23. doctr/datasets/ocr.py +3 -4
  24. doctr/datasets/orientation.py +3 -4
  25. doctr/datasets/recognition.py +3 -4
  26. doctr/datasets/sroie.py +6 -5
  27. doctr/datasets/svhn.py +6 -5
  28. doctr/datasets/svt.py +4 -5
  29. doctr/datasets/synthtext.py +4 -5
  30. doctr/datasets/utils.py +34 -29
  31. doctr/datasets/vocabs.py +17 -7
  32. doctr/datasets/wildreceipt.py +14 -10
  33. doctr/file_utils.py +2 -7
  34. doctr/io/elements.py +59 -79
  35. doctr/io/html.py +1 -3
  36. doctr/io/image/__init__.py +3 -3
  37. doctr/io/image/base.py +2 -5
  38. doctr/io/image/pytorch.py +3 -12
  39. doctr/io/image/tensorflow.py +2 -11
  40. doctr/io/pdf.py +5 -7
  41. doctr/io/reader.py +5 -11
  42. doctr/models/_utils.py +14 -22
  43. doctr/models/builder.py +30 -48
  44. doctr/models/classification/magc_resnet/__init__.py +3 -3
  45. doctr/models/classification/magc_resnet/pytorch.py +10 -13
  46. doctr/models/classification/magc_resnet/tensorflow.py +8 -11
  47. doctr/models/classification/mobilenet/__init__.py +3 -3
  48. doctr/models/classification/mobilenet/pytorch.py +5 -17
  49. doctr/models/classification/mobilenet/tensorflow.py +8 -21
  50. doctr/models/classification/predictor/__init__.py +4 -4
  51. doctr/models/classification/predictor/pytorch.py +6 -8
  52. doctr/models/classification/predictor/tensorflow.py +6 -8
  53. doctr/models/classification/resnet/__init__.py +4 -4
  54. doctr/models/classification/resnet/pytorch.py +21 -31
  55. doctr/models/classification/resnet/tensorflow.py +20 -31
  56. doctr/models/classification/textnet/__init__.py +3 -3
  57. doctr/models/classification/textnet/pytorch.py +10 -17
  58. doctr/models/classification/textnet/tensorflow.py +8 -15
  59. doctr/models/classification/vgg/__init__.py +3 -3
  60. doctr/models/classification/vgg/pytorch.py +5 -7
  61. doctr/models/classification/vgg/tensorflow.py +9 -12
  62. doctr/models/classification/vit/__init__.py +3 -3
  63. doctr/models/classification/vit/pytorch.py +8 -14
  64. doctr/models/classification/vit/tensorflow.py +6 -12
  65. doctr/models/classification/zoo.py +19 -14
  66. doctr/models/core.py +3 -3
  67. doctr/models/detection/_utils/__init__.py +4 -4
  68. doctr/models/detection/_utils/base.py +4 -7
  69. doctr/models/detection/_utils/pytorch.py +1 -5
  70. doctr/models/detection/_utils/tensorflow.py +1 -5
  71. doctr/models/detection/core.py +2 -8
  72. doctr/models/detection/differentiable_binarization/__init__.py +4 -4
  73. doctr/models/detection/differentiable_binarization/base.py +7 -17
  74. doctr/models/detection/differentiable_binarization/pytorch.py +27 -30
  75. doctr/models/detection/differentiable_binarization/tensorflow.py +15 -25
  76. doctr/models/detection/fast/__init__.py +4 -4
  77. doctr/models/detection/fast/base.py +6 -14
  78. doctr/models/detection/fast/pytorch.py +24 -31
  79. doctr/models/detection/fast/tensorflow.py +14 -26
  80. doctr/models/detection/linknet/__init__.py +4 -4
  81. doctr/models/detection/linknet/base.py +6 -15
  82. doctr/models/detection/linknet/pytorch.py +24 -27
  83. doctr/models/detection/linknet/tensorflow.py +14 -23
  84. doctr/models/detection/predictor/__init__.py +5 -5
  85. doctr/models/detection/predictor/pytorch.py +6 -7
  86. doctr/models/detection/predictor/tensorflow.py +5 -6
  87. doctr/models/detection/zoo.py +27 -7
  88. doctr/models/factory/hub.py +3 -7
  89. doctr/models/kie_predictor/__init__.py +5 -5
  90. doctr/models/kie_predictor/base.py +4 -5
  91. doctr/models/kie_predictor/pytorch.py +18 -19
  92. doctr/models/kie_predictor/tensorflow.py +13 -14
  93. doctr/models/modules/layers/__init__.py +3 -3
  94. doctr/models/modules/layers/pytorch.py +6 -9
  95. doctr/models/modules/layers/tensorflow.py +5 -7
  96. doctr/models/modules/transformer/__init__.py +3 -3
  97. doctr/models/modules/transformer/pytorch.py +12 -13
  98. doctr/models/modules/transformer/tensorflow.py +9 -10
  99. doctr/models/modules/vision_transformer/__init__.py +3 -3
  100. doctr/models/modules/vision_transformer/pytorch.py +2 -3
  101. doctr/models/modules/vision_transformer/tensorflow.py +3 -3
  102. doctr/models/predictor/__init__.py +5 -5
  103. doctr/models/predictor/base.py +28 -29
  104. doctr/models/predictor/pytorch.py +12 -13
  105. doctr/models/predictor/tensorflow.py +8 -9
  106. doctr/models/preprocessor/__init__.py +4 -4
  107. doctr/models/preprocessor/pytorch.py +13 -17
  108. doctr/models/preprocessor/tensorflow.py +10 -14
  109. doctr/models/recognition/core.py +3 -7
  110. doctr/models/recognition/crnn/__init__.py +4 -4
  111. doctr/models/recognition/crnn/pytorch.py +20 -28
  112. doctr/models/recognition/crnn/tensorflow.py +11 -23
  113. doctr/models/recognition/master/__init__.py +3 -3
  114. doctr/models/recognition/master/base.py +3 -7
  115. doctr/models/recognition/master/pytorch.py +22 -24
  116. doctr/models/recognition/master/tensorflow.py +12 -22
  117. doctr/models/recognition/parseq/__init__.py +3 -3
  118. doctr/models/recognition/parseq/base.py +3 -7
  119. doctr/models/recognition/parseq/pytorch.py +26 -26
  120. doctr/models/recognition/parseq/tensorflow.py +16 -22
  121. doctr/models/recognition/predictor/__init__.py +5 -5
  122. doctr/models/recognition/predictor/_utils.py +7 -10
  123. doctr/models/recognition/predictor/pytorch.py +6 -6
  124. doctr/models/recognition/predictor/tensorflow.py +5 -6
  125. doctr/models/recognition/sar/__init__.py +4 -4
  126. doctr/models/recognition/sar/pytorch.py +20 -21
  127. doctr/models/recognition/sar/tensorflow.py +12 -21
  128. doctr/models/recognition/utils.py +5 -10
  129. doctr/models/recognition/vitstr/__init__.py +4 -4
  130. doctr/models/recognition/vitstr/base.py +3 -7
  131. doctr/models/recognition/vitstr/pytorch.py +18 -20
  132. doctr/models/recognition/vitstr/tensorflow.py +12 -20
  133. doctr/models/recognition/zoo.py +22 -11
  134. doctr/models/utils/__init__.py +4 -4
  135. doctr/models/utils/pytorch.py +14 -17
  136. doctr/models/utils/tensorflow.py +17 -16
  137. doctr/models/zoo.py +1 -5
  138. doctr/transforms/functional/__init__.py +3 -3
  139. doctr/transforms/functional/base.py +4 -11
  140. doctr/transforms/functional/pytorch.py +20 -28
  141. doctr/transforms/functional/tensorflow.py +10 -22
  142. doctr/transforms/modules/__init__.py +4 -4
  143. doctr/transforms/modules/base.py +48 -55
  144. doctr/transforms/modules/pytorch.py +58 -22
  145. doctr/transforms/modules/tensorflow.py +18 -32
  146. doctr/utils/common_types.py +8 -9
  147. doctr/utils/data.py +8 -12
  148. doctr/utils/fonts.py +2 -7
  149. doctr/utils/geometry.py +16 -47
  150. doctr/utils/metrics.py +17 -37
  151. doctr/utils/multithreading.py +4 -6
  152. doctr/utils/reconstitution.py +9 -13
  153. doctr/utils/repr.py +2 -3
  154. doctr/utils/visualization.py +16 -29
  155. doctr/version.py +1 -1
  156. {python_doctr-0.10.0.dist-info → python_doctr-0.11.0.dist-info}/METADATA +54 -52
  157. python_doctr-0.11.0.dist-info/RECORD +173 -0
  158. {python_doctr-0.10.0.dist-info → python_doctr-0.11.0.dist-info}/WHEEL +1 -1
  159. python_doctr-0.10.0.dist-info/RECORD +0 -173
  160. {python_doctr-0.10.0.dist-info → python_doctr-0.11.0.dist-info}/LICENSE +0 -0
  161. {python_doctr-0.10.0.dist-info → python_doctr-0.11.0.dist-info}/top_level.txt +0 -0
  162. {python_doctr-0.10.0.dist-info → python_doctr-0.11.0.dist-info}/zip-safe +0 -0
doctr/datasets/utils.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright (C) 2021-2024, Mindee.
1
+ # Copyright (C) 2021-2025, Mindee.
2
2
 
3
3
  # This program is licensed under the Apache License 2.0.
4
4
  # See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
@@ -6,10 +6,10 @@
6
6
  import string
7
7
  import unicodedata
8
8
  from collections.abc import Sequence
9
+ from collections.abc import Sequence as SequenceType
9
10
  from functools import partial
10
11
  from pathlib import Path
11
- from typing import Any, Dict, List, Optional, Tuple, TypeVar, Union
12
- from typing import Sequence as SequenceType
12
+ from typing import Any, TypeVar
13
13
 
14
14
  import numpy as np
15
15
  from PIL import Image
@@ -19,7 +19,15 @@ from doctr.utils.geometry import convert_to_relative_coords, extract_crops, extr
19
19
 
20
20
  from .vocabs import VOCABS
21
21
 
22
- __all__ = ["translate", "encode_string", "decode_sequence", "encode_sequences", "pre_transform_multiclass"]
22
+ __all__ = [
23
+ "translate",
24
+ "encode_string",
25
+ "decode_sequence",
26
+ "encode_sequences",
27
+ "pre_transform_multiclass",
28
+ "crop_bboxes_from_image",
29
+ "convert_target_to_relative",
30
+ ]
23
31
 
24
32
  ImageTensor = TypeVar("ImageTensor")
25
33
 
@@ -32,13 +40,11 @@ def translate(
32
40
  """Translate a string input in a given vocabulary
33
41
 
34
42
  Args:
35
- ----
36
43
  input_string: input string to translate
37
44
  vocab_name: vocabulary to use (french, latin, ...)
38
45
  unknown_char: unknown character for non-translatable characters
39
46
 
40
47
  Returns:
41
- -------
42
48
  A string translated in a given vocab
43
49
  """
44
50
  if VOCABS.get(vocab_name) is None:
@@ -63,16 +69,14 @@ def translate(
63
69
  def encode_string(
64
70
  input_string: str,
65
71
  vocab: str,
66
- ) -> List[int]:
72
+ ) -> list[int]:
67
73
  """Given a predefined mapping, encode the string to a sequence of numbers
68
74
 
69
75
  Args:
70
- ----
71
76
  input_string: string to encode
72
77
  vocab: vocabulary (string), the encoding is given by the indexing of the character sequence
73
78
 
74
79
  Returns:
75
- -------
76
80
  A list encoding the input_string
77
81
  """
78
82
  try:
@@ -85,18 +89,16 @@ def encode_string(
85
89
 
86
90
 
87
91
  def decode_sequence(
88
- input_seq: Union[np.ndarray, SequenceType[int]],
92
+ input_seq: np.ndarray | SequenceType[int],
89
93
  mapping: str,
90
94
  ) -> str:
91
95
  """Given a predefined mapping, decode the sequence of numbers to a string
92
96
 
93
97
  Args:
94
- ----
95
98
  input_seq: array to decode
96
99
  mapping: vocabulary (string), the encoding is given by the indexing of the character sequence
97
100
 
98
101
  Returns:
99
- -------
100
102
  A string, decoded from input_seq
101
103
  """
102
104
  if not isinstance(input_seq, (Sequence, np.ndarray)):
@@ -108,18 +110,17 @@ def decode_sequence(
108
110
 
109
111
 
110
112
  def encode_sequences(
111
- sequences: List[str],
113
+ sequences: list[str],
112
114
  vocab: str,
113
- target_size: Optional[int] = None,
115
+ target_size: int | None = None,
114
116
  eos: int = -1,
115
- sos: Optional[int] = None,
116
- pad: Optional[int] = None,
117
+ sos: int | None = None,
118
+ pad: int | None = None,
117
119
  dynamic_seq_length: bool = False,
118
120
  ) -> np.ndarray:
119
121
  """Encode character sequences using a given vocab as mapping
120
122
 
121
123
  Args:
122
- ----
123
124
  sequences: the list of character sequences of size N
124
125
  vocab: the ordered vocab to use for encoding
125
126
  target_size: maximum length of the encoded data
@@ -129,7 +130,6 @@ def encode_sequences(
129
130
  dynamic_seq_length: if `target_size` is specified, uses it as upper bound and enables dynamic sequence size
130
131
 
131
132
  Returns:
132
- -------
133
133
  the padded encoded data as a tensor
134
134
  """
135
135
  if 0 <= eos < len(vocab):
@@ -170,25 +170,32 @@ def encode_sequences(
170
170
 
171
171
 
172
172
  def convert_target_to_relative(
173
- img: ImageTensor, target: Union[np.ndarray, Dict[str, Any]]
174
- ) -> Tuple[ImageTensor, Union[Dict[str, Any], np.ndarray]]:
173
+ img: ImageTensor, target: np.ndarray | dict[str, Any]
174
+ ) -> tuple[ImageTensor, dict[str, Any] | np.ndarray]:
175
+ """Converts target to relative coordinates
176
+
177
+ Args:
178
+ img: tf.Tensor or torch.Tensor representing the image
179
+ target: target to convert to relative coordinates (boxes (N, 4) or polygons (N, 4, 2))
180
+
181
+ Returns:
182
+ The image and the target in relative coordinates
183
+ """
175
184
  if isinstance(target, np.ndarray):
176
- target = convert_to_relative_coords(target, get_img_shape(img))
185
+ target = convert_to_relative_coords(target, get_img_shape(img)) # type: ignore[arg-type]
177
186
  else:
178
- target["boxes"] = convert_to_relative_coords(target["boxes"], get_img_shape(img))
187
+ target["boxes"] = convert_to_relative_coords(target["boxes"], get_img_shape(img)) # type: ignore[arg-type]
179
188
  return img, target
180
189
 
181
190
 
182
- def crop_bboxes_from_image(img_path: Union[str, Path], geoms: np.ndarray) -> List[np.ndarray]:
191
+ def crop_bboxes_from_image(img_path: str | Path, geoms: np.ndarray) -> list[np.ndarray]:
183
192
  """Crop a set of bounding boxes from an image
184
193
 
185
194
  Args:
186
- ----
187
195
  img_path: path to the image
188
196
  geoms: a array of polygons of shape (N, 4, 2) or of straight boxes of shape (N, 4)
189
197
 
190
198
  Returns:
191
- -------
192
199
  a list of cropped images
193
200
  """
194
201
  with Image.open(img_path) as pil_img:
@@ -201,21 +208,19 @@ def crop_bboxes_from_image(img_path: Union[str, Path], geoms: np.ndarray) -> Lis
201
208
  raise ValueError("Invalid geometry format")
202
209
 
203
210
 
204
- def pre_transform_multiclass(img, target: Tuple[np.ndarray, List]) -> Tuple[np.ndarray, Dict[str, List]]:
211
+ def pre_transform_multiclass(img, target: tuple[np.ndarray, list]) -> tuple[np.ndarray, dict[str, list]]:
205
212
  """Converts multiclass target to relative coordinates.
206
213
 
207
214
  Args:
208
- ----
209
215
  img: Image
210
216
  target: tuple of target polygons and their classes names
211
217
 
212
218
  Returns:
213
- -------
214
219
  Image and dictionary of boxes, with class names as keys
215
220
  """
216
221
  boxes = convert_to_relative_coords(target[0], get_img_shape(img))
217
222
  boxes_classes = target[1]
218
- boxes_dict: Dict = {k: [] for k in sorted(set(boxes_classes))}
223
+ boxes_dict: dict = {k: [] for k in sorted(set(boxes_classes))}
219
224
  for k, poly in zip(boxes_classes, boxes):
220
225
  boxes_dict[k].append(poly)
221
226
  boxes_dict = {k: np.stack(v, axis=0) for k, v in boxes_dict.items()}
doctr/datasets/vocabs.py CHANGED
@@ -1,15 +1,14 @@
1
- # Copyright (C) 2021-2024, Mindee.
1
+ # Copyright (C) 2021-2025, Mindee.
2
2
 
3
3
  # This program is licensed under the Apache License 2.0.
4
4
  # See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
5
5
 
6
6
  import string
7
- from typing import Dict
8
7
 
9
8
  __all__ = ["VOCABS"]
10
9
 
11
10
 
12
- VOCABS: Dict[str, str] = {
11
+ VOCABS: dict[str, str] = {
13
12
  "digits": string.digits,
14
13
  "ascii_letters": string.ascii_letters,
15
14
  "punctuation": string.punctuation,
@@ -20,9 +19,13 @@ VOCABS: Dict[str, str] = {
20
19
  "arabic_digits": "٠١٢٣٤٥٦٧٨٩",
21
20
  "arabic_diacritics": "ًٌٍَُِّْ",
22
21
  "arabic_punctuation": "؟؛«»—",
23
- "hindi_letters": "अआइईउऊऋॠऌॡएऐओऔअंअःकखगघङचछजझञटठडढणतथदधनपफबभमयरलवशषसह",
22
+ "hindi_letters": "अआइईउऊऋॠऌॡएऐओऔंःकखगघङचछजझञटठडढणतथदधनपफबभमयरलवशषसह",
24
23
  "hindi_digits": "०१२३४५६७८९",
25
- "hindi_punctuation": "।,?!:्ॐ॰॥॰",
24
+ "hindi_punctuation": "।,?!:्ॐ॰॥",
25
+ "gujarati_vowels": "અઆઇઈઉઊઋએઐઓ",
26
+ "gujarati_consonants": "ખગઘચછજઝઞટઠડઢણતથદધનપફબભમયરલવશસહળક્ષ",
27
+ "gujarati_digits": "૦૧૨૩૪૫૬૭૮૯",
28
+ "gujarati_punctuation": "૰ઽ◌ંઃ॥ૐ઼ઁ" + "૱",
26
29
  "bangla_letters": "অআইঈউঊঋএঐওঔকখগঘঙচছজঝঞটঠডঢণতথদধনপফবভমযরলশষসহ়ঽািীুূৃেৈোৌ্ৎংঃঁ",
27
30
  "bangla_digits": "০১২৩৪৫৬৭৮৯",
28
31
  "generic_cyrillic_letters": "абвгдежзийклмнопрстуфхцчшщьюяАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЬЮЯ",
@@ -54,11 +57,18 @@ VOCABS["finnish"] = VOCABS["english"] + "äöÄÖ"
54
57
  VOCABS["swedish"] = VOCABS["english"] + "åäöÅÄÖ"
55
58
  VOCABS["vietnamese"] = (
56
59
  VOCABS["english"]
57
- + "áàảạãăắằẳẵặâấầẩẫậđéèẻẽẹêếềểễệóòỏõọôốồổộỗơớờởợỡúùủũụưứừửữựiíìỉĩịýỳỷỹỵ"
58
- + "ÁÀẢẠÃĂẮẰẲẴẶÂẤẦẨẪẬĐÉÈẺẼẸÊẾỀỂỄỆÓÒỎÕỌÔỐỒỔỘỖƠỚỜỞỢỠÚÙỦŨỤƯỨỪỬỮỰIÍÌỈĨỊÝỲỶỸỴ"
60
+ + "áàảạãăắằẳẵặâấầẩẫậđéèẻẽẹêếềểễệóòỏõọôốồổộỗơớờởợỡúùủũụưứừửữựíìỉĩịýỳỷỹỵ"
61
+ + "ÁÀẢẠÃĂẮẰẲẴẶÂẤẦẨẪẬĐÉÈẺẼẸÊẾỀỂỄỆÓÒỎÕỌÔỐỒỔỘỖƠỚỜỞỢỠÚÙỦŨỤƯỨỪỬỮỰÍÌỈĨỊÝỲỶỸỴ"
59
62
  )
60
63
  VOCABS["hebrew"] = VOCABS["english"] + "אבגדהוזחטיכלמנסעפצקרשת" + "₪"
61
64
  VOCABS["hindi"] = VOCABS["hindi_letters"] + VOCABS["hindi_digits"] + VOCABS["hindi_punctuation"]
65
+ VOCABS["gujarati"] = (
66
+ VOCABS["gujarati_vowels"]
67
+ + VOCABS["gujarati_consonants"]
68
+ + VOCABS["gujarati_digits"]
69
+ + VOCABS["gujarati_punctuation"]
70
+ + VOCABS["punctuation"]
71
+ )
62
72
  VOCABS["bangla"] = VOCABS["bangla_letters"] + VOCABS["bangla_digits"]
63
73
  VOCABS["ukrainian"] = (
64
74
  VOCABS["generic_cyrillic_letters"] + VOCABS["digits"] + VOCABS["punctuation"] + VOCABS["currency"] + "ґіїєҐІЇЄ₴"
@@ -1,4 +1,4 @@
1
- # Copyright (C) 2021-2024, Mindee.
1
+ # Copyright (C) 2021-2025, Mindee.
2
2
 
3
3
  # This program is licensed under the Apache License 2.0.
4
4
  # See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
@@ -6,9 +6,10 @@
6
6
  import json
7
7
  import os
8
8
  from pathlib import Path
9
- from typing import Any, Dict, List, Tuple, Union
9
+ from typing import Any
10
10
 
11
11
  import numpy as np
12
+ from tqdm import tqdm
12
13
 
13
14
  from .datasets import AbstractDataset
14
15
  from .utils import convert_target_to_relative, crop_bboxes_from_image
@@ -17,9 +18,10 @@ __all__ = ["WILDRECEIPT"]
17
18
 
18
19
 
19
20
  class WILDRECEIPT(AbstractDataset):
20
- """WildReceipt dataset from `"Spatial Dual-Modality Graph Reasoning for Key Information Extraction"
21
- <https://arxiv.org/abs/2103.14470v1>`_ |
22
- `repository <https://download.openmmlab.com/mmocr/data/wildreceipt.tar>`_.
21
+ """
22
+ WildReceipt dataset from `"Spatial Dual-Modality Graph Reasoning for Key Information Extraction"
23
+ <https://arxiv.org/abs/2103.14470v1>`_ |
24
+ `"repository" <https://download.openmmlab.com/mmocr/data/wildreceipt.tar>`_.
23
25
 
24
26
  .. image:: https://doctr-static.mindee.com/models?id=v0.7.0/wildreceipt-dataset.jpg&src=0
25
27
  :align: center
@@ -34,7 +36,6 @@ class WILDRECEIPT(AbstractDataset):
34
36
  >>> img, target = test_set[0]
35
37
 
36
38
  Args:
37
- ----
38
39
  img_folder: folder with all the images of the dataset
39
40
  label_path: path to the annotations file of the dataset
40
41
  train: whether the subset should be the training one
@@ -71,15 +72,18 @@ class WILDRECEIPT(AbstractDataset):
71
72
  tmp_root = img_folder
72
73
  self.train = train
73
74
  np_dtype = np.float32
74
- self.data: List[Tuple[Union[str, Path, np.ndarray], Union[str, Dict[str, Any], np.ndarray]]] = []
75
+ self.data: list[tuple[str | Path | np.ndarray, str | dict[str, Any] | np.ndarray]] = []
75
76
 
76
77
  with open(label_path, "r") as file:
77
78
  data = file.read()
78
79
  # Split the text file into separate JSON strings
79
80
  json_strings = data.strip().split("\n")
80
- box: Union[List[float], np.ndarray]
81
- _targets = []
82
- for json_string in json_strings:
81
+ box: list[float] | np.ndarray
82
+
83
+ for json_string in tqdm(
84
+ iterable=json_strings, desc="Preparing and Loading WILDRECEIPT", total=len(json_strings)
85
+ ):
86
+ _targets = []
83
87
  json_data = json.loads(json_string)
84
88
  img_path = json_data["file_name"]
85
89
  annotations = json_data["annotations"]
doctr/file_utils.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright (C) 2021-2024, Mindee.
1
+ # Copyright (C) 2021-2025, Mindee.
2
2
 
3
3
  # This program is licensed under the Apache License 2.0.
4
4
  # See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
@@ -9,7 +9,6 @@ import importlib.metadata
9
9
  import importlib.util
10
10
  import logging
11
11
  import os
12
- from typing import Optional
13
12
 
14
13
  CLASS_NAME: str = "words"
15
14
 
@@ -80,10 +79,7 @@ if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VA
80
79
  else:
81
80
  logging.info(f"TensorFlow version {_tf_version} available.")
82
81
  ensure_keras_v2()
83
- import tensorflow as tf
84
82
 
85
- # Enable eager execution - this is required for some models to work properly
86
- tf.config.run_functions_eagerly(True)
87
83
  else: # pragma: no cover
88
84
  logging.info("Disabling Tensorflow because USE_TORCH is set")
89
85
  _tf_available = False
@@ -96,12 +92,11 @@ if not _torch_available and not _tf_available: # pragma: no cover
96
92
  )
97
93
 
98
94
 
99
- def requires_package(name: str, extra_message: Optional[str] = None) -> None: # pragma: no cover
95
+ def requires_package(name: str, extra_message: str | None = None) -> None: # pragma: no cover
100
96
  """
101
97
  package requirement helper
102
98
 
103
99
  Args:
104
- ----
105
100
  name: name of the package
106
101
  extra_message: additional message to display if the package is not found
107
102
  """