python-doctr 0.10.0__py3-none-any.whl → 0.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- doctr/contrib/__init__.py +1 -0
- doctr/contrib/artefacts.py +7 -9
- doctr/contrib/base.py +8 -17
- doctr/datasets/cord.py +8 -7
- doctr/datasets/datasets/__init__.py +4 -4
- doctr/datasets/datasets/base.py +16 -16
- doctr/datasets/datasets/pytorch.py +12 -12
- doctr/datasets/datasets/tensorflow.py +10 -10
- doctr/datasets/detection.py +6 -9
- doctr/datasets/doc_artefacts.py +3 -4
- doctr/datasets/funsd.py +7 -6
- doctr/datasets/generator/__init__.py +4 -4
- doctr/datasets/generator/base.py +16 -17
- doctr/datasets/generator/pytorch.py +1 -3
- doctr/datasets/generator/tensorflow.py +1 -3
- doctr/datasets/ic03.py +4 -5
- doctr/datasets/ic13.py +4 -5
- doctr/datasets/iiit5k.py +6 -5
- doctr/datasets/iiithws.py +4 -5
- doctr/datasets/imgur5k.py +6 -5
- doctr/datasets/loader.py +4 -7
- doctr/datasets/mjsynth.py +6 -5
- doctr/datasets/ocr.py +3 -4
- doctr/datasets/orientation.py +3 -4
- doctr/datasets/recognition.py +3 -4
- doctr/datasets/sroie.py +6 -5
- doctr/datasets/svhn.py +6 -5
- doctr/datasets/svt.py +4 -5
- doctr/datasets/synthtext.py +4 -5
- doctr/datasets/utils.py +34 -29
- doctr/datasets/vocabs.py +17 -7
- doctr/datasets/wildreceipt.py +14 -10
- doctr/file_utils.py +2 -7
- doctr/io/elements.py +59 -79
- doctr/io/html.py +1 -3
- doctr/io/image/__init__.py +3 -3
- doctr/io/image/base.py +2 -5
- doctr/io/image/pytorch.py +3 -12
- doctr/io/image/tensorflow.py +2 -11
- doctr/io/pdf.py +5 -7
- doctr/io/reader.py +5 -11
- doctr/models/_utils.py +14 -22
- doctr/models/builder.py +30 -48
- doctr/models/classification/magc_resnet/__init__.py +3 -3
- doctr/models/classification/magc_resnet/pytorch.py +10 -13
- doctr/models/classification/magc_resnet/tensorflow.py +8 -11
- doctr/models/classification/mobilenet/__init__.py +3 -3
- doctr/models/classification/mobilenet/pytorch.py +5 -17
- doctr/models/classification/mobilenet/tensorflow.py +8 -21
- doctr/models/classification/predictor/__init__.py +4 -4
- doctr/models/classification/predictor/pytorch.py +6 -8
- doctr/models/classification/predictor/tensorflow.py +6 -8
- doctr/models/classification/resnet/__init__.py +4 -4
- doctr/models/classification/resnet/pytorch.py +21 -31
- doctr/models/classification/resnet/tensorflow.py +20 -31
- doctr/models/classification/textnet/__init__.py +3 -3
- doctr/models/classification/textnet/pytorch.py +10 -17
- doctr/models/classification/textnet/tensorflow.py +8 -15
- doctr/models/classification/vgg/__init__.py +3 -3
- doctr/models/classification/vgg/pytorch.py +5 -7
- doctr/models/classification/vgg/tensorflow.py +9 -12
- doctr/models/classification/vit/__init__.py +3 -3
- doctr/models/classification/vit/pytorch.py +8 -14
- doctr/models/classification/vit/tensorflow.py +6 -12
- doctr/models/classification/zoo.py +19 -14
- doctr/models/core.py +3 -3
- doctr/models/detection/_utils/__init__.py +4 -4
- doctr/models/detection/_utils/base.py +4 -7
- doctr/models/detection/_utils/pytorch.py +1 -5
- doctr/models/detection/_utils/tensorflow.py +1 -5
- doctr/models/detection/core.py +2 -8
- doctr/models/detection/differentiable_binarization/__init__.py +4 -4
- doctr/models/detection/differentiable_binarization/base.py +7 -17
- doctr/models/detection/differentiable_binarization/pytorch.py +27 -30
- doctr/models/detection/differentiable_binarization/tensorflow.py +15 -25
- doctr/models/detection/fast/__init__.py +4 -4
- doctr/models/detection/fast/base.py +6 -14
- doctr/models/detection/fast/pytorch.py +24 -31
- doctr/models/detection/fast/tensorflow.py +14 -26
- doctr/models/detection/linknet/__init__.py +4 -4
- doctr/models/detection/linknet/base.py +6 -15
- doctr/models/detection/linknet/pytorch.py +24 -27
- doctr/models/detection/linknet/tensorflow.py +14 -23
- doctr/models/detection/predictor/__init__.py +5 -5
- doctr/models/detection/predictor/pytorch.py +6 -7
- doctr/models/detection/predictor/tensorflow.py +5 -6
- doctr/models/detection/zoo.py +27 -7
- doctr/models/factory/hub.py +3 -7
- doctr/models/kie_predictor/__init__.py +5 -5
- doctr/models/kie_predictor/base.py +4 -5
- doctr/models/kie_predictor/pytorch.py +18 -19
- doctr/models/kie_predictor/tensorflow.py +13 -14
- doctr/models/modules/layers/__init__.py +3 -3
- doctr/models/modules/layers/pytorch.py +6 -9
- doctr/models/modules/layers/tensorflow.py +5 -7
- doctr/models/modules/transformer/__init__.py +3 -3
- doctr/models/modules/transformer/pytorch.py +12 -13
- doctr/models/modules/transformer/tensorflow.py +9 -10
- doctr/models/modules/vision_transformer/__init__.py +3 -3
- doctr/models/modules/vision_transformer/pytorch.py +2 -3
- doctr/models/modules/vision_transformer/tensorflow.py +3 -3
- doctr/models/predictor/__init__.py +5 -5
- doctr/models/predictor/base.py +28 -29
- doctr/models/predictor/pytorch.py +12 -13
- doctr/models/predictor/tensorflow.py +8 -9
- doctr/models/preprocessor/__init__.py +4 -4
- doctr/models/preprocessor/pytorch.py +13 -17
- doctr/models/preprocessor/tensorflow.py +10 -14
- doctr/models/recognition/core.py +3 -7
- doctr/models/recognition/crnn/__init__.py +4 -4
- doctr/models/recognition/crnn/pytorch.py +20 -28
- doctr/models/recognition/crnn/tensorflow.py +11 -23
- doctr/models/recognition/master/__init__.py +3 -3
- doctr/models/recognition/master/base.py +3 -7
- doctr/models/recognition/master/pytorch.py +22 -24
- doctr/models/recognition/master/tensorflow.py +12 -22
- doctr/models/recognition/parseq/__init__.py +3 -3
- doctr/models/recognition/parseq/base.py +3 -7
- doctr/models/recognition/parseq/pytorch.py +26 -26
- doctr/models/recognition/parseq/tensorflow.py +16 -22
- doctr/models/recognition/predictor/__init__.py +5 -5
- doctr/models/recognition/predictor/_utils.py +7 -10
- doctr/models/recognition/predictor/pytorch.py +6 -6
- doctr/models/recognition/predictor/tensorflow.py +5 -6
- doctr/models/recognition/sar/__init__.py +4 -4
- doctr/models/recognition/sar/pytorch.py +20 -21
- doctr/models/recognition/sar/tensorflow.py +12 -21
- doctr/models/recognition/utils.py +5 -10
- doctr/models/recognition/vitstr/__init__.py +4 -4
- doctr/models/recognition/vitstr/base.py +3 -7
- doctr/models/recognition/vitstr/pytorch.py +18 -20
- doctr/models/recognition/vitstr/tensorflow.py +12 -20
- doctr/models/recognition/zoo.py +22 -11
- doctr/models/utils/__init__.py +4 -4
- doctr/models/utils/pytorch.py +14 -17
- doctr/models/utils/tensorflow.py +17 -16
- doctr/models/zoo.py +1 -5
- doctr/transforms/functional/__init__.py +3 -3
- doctr/transforms/functional/base.py +4 -11
- doctr/transforms/functional/pytorch.py +20 -28
- doctr/transforms/functional/tensorflow.py +10 -22
- doctr/transforms/modules/__init__.py +4 -4
- doctr/transforms/modules/base.py +48 -55
- doctr/transforms/modules/pytorch.py +58 -22
- doctr/transforms/modules/tensorflow.py +18 -32
- doctr/utils/common_types.py +8 -9
- doctr/utils/data.py +8 -12
- doctr/utils/fonts.py +2 -7
- doctr/utils/geometry.py +16 -47
- doctr/utils/metrics.py +17 -37
- doctr/utils/multithreading.py +4 -6
- doctr/utils/reconstitution.py +9 -13
- doctr/utils/repr.py +2 -3
- doctr/utils/visualization.py +16 -29
- doctr/version.py +1 -1
- {python_doctr-0.10.0.dist-info → python_doctr-0.11.0.dist-info}/METADATA +54 -52
- python_doctr-0.11.0.dist-info/RECORD +173 -0
- {python_doctr-0.10.0.dist-info → python_doctr-0.11.0.dist-info}/WHEEL +1 -1
- python_doctr-0.10.0.dist-info/RECORD +0 -173
- {python_doctr-0.10.0.dist-info → python_doctr-0.11.0.dist-info}/LICENSE +0 -0
- {python_doctr-0.10.0.dist-info → python_doctr-0.11.0.dist-info}/top_level.txt +0 -0
- {python_doctr-0.10.0.dist-info → python_doctr-0.11.0.dist-info}/zip-safe +0 -0
doctr/datasets/utils.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright (C) 2021-
|
|
1
|
+
# Copyright (C) 2021-2025, Mindee.
|
|
2
2
|
|
|
3
3
|
# This program is licensed under the Apache License 2.0.
|
|
4
4
|
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
|
|
@@ -6,10 +6,10 @@
|
|
|
6
6
|
import string
|
|
7
7
|
import unicodedata
|
|
8
8
|
from collections.abc import Sequence
|
|
9
|
+
from collections.abc import Sequence as SequenceType
|
|
9
10
|
from functools import partial
|
|
10
11
|
from pathlib import Path
|
|
11
|
-
from typing import Any,
|
|
12
|
-
from typing import Sequence as SequenceType
|
|
12
|
+
from typing import Any, TypeVar
|
|
13
13
|
|
|
14
14
|
import numpy as np
|
|
15
15
|
from PIL import Image
|
|
@@ -19,7 +19,15 @@ from doctr.utils.geometry import convert_to_relative_coords, extract_crops, extr
|
|
|
19
19
|
|
|
20
20
|
from .vocabs import VOCABS
|
|
21
21
|
|
|
22
|
-
__all__ = [
|
|
22
|
+
__all__ = [
|
|
23
|
+
"translate",
|
|
24
|
+
"encode_string",
|
|
25
|
+
"decode_sequence",
|
|
26
|
+
"encode_sequences",
|
|
27
|
+
"pre_transform_multiclass",
|
|
28
|
+
"crop_bboxes_from_image",
|
|
29
|
+
"convert_target_to_relative",
|
|
30
|
+
]
|
|
23
31
|
|
|
24
32
|
ImageTensor = TypeVar("ImageTensor")
|
|
25
33
|
|
|
@@ -32,13 +40,11 @@ def translate(
|
|
|
32
40
|
"""Translate a string input in a given vocabulary
|
|
33
41
|
|
|
34
42
|
Args:
|
|
35
|
-
----
|
|
36
43
|
input_string: input string to translate
|
|
37
44
|
vocab_name: vocabulary to use (french, latin, ...)
|
|
38
45
|
unknown_char: unknown character for non-translatable characters
|
|
39
46
|
|
|
40
47
|
Returns:
|
|
41
|
-
-------
|
|
42
48
|
A string translated in a given vocab
|
|
43
49
|
"""
|
|
44
50
|
if VOCABS.get(vocab_name) is None:
|
|
@@ -63,16 +69,14 @@ def translate(
|
|
|
63
69
|
def encode_string(
|
|
64
70
|
input_string: str,
|
|
65
71
|
vocab: str,
|
|
66
|
-
) ->
|
|
72
|
+
) -> list[int]:
|
|
67
73
|
"""Given a predefined mapping, encode the string to a sequence of numbers
|
|
68
74
|
|
|
69
75
|
Args:
|
|
70
|
-
----
|
|
71
76
|
input_string: string to encode
|
|
72
77
|
vocab: vocabulary (string), the encoding is given by the indexing of the character sequence
|
|
73
78
|
|
|
74
79
|
Returns:
|
|
75
|
-
-------
|
|
76
80
|
A list encoding the input_string
|
|
77
81
|
"""
|
|
78
82
|
try:
|
|
@@ -85,18 +89,16 @@ def encode_string(
|
|
|
85
89
|
|
|
86
90
|
|
|
87
91
|
def decode_sequence(
|
|
88
|
-
input_seq:
|
|
92
|
+
input_seq: np.ndarray | SequenceType[int],
|
|
89
93
|
mapping: str,
|
|
90
94
|
) -> str:
|
|
91
95
|
"""Given a predefined mapping, decode the sequence of numbers to a string
|
|
92
96
|
|
|
93
97
|
Args:
|
|
94
|
-
----
|
|
95
98
|
input_seq: array to decode
|
|
96
99
|
mapping: vocabulary (string), the encoding is given by the indexing of the character sequence
|
|
97
100
|
|
|
98
101
|
Returns:
|
|
99
|
-
-------
|
|
100
102
|
A string, decoded from input_seq
|
|
101
103
|
"""
|
|
102
104
|
if not isinstance(input_seq, (Sequence, np.ndarray)):
|
|
@@ -108,18 +110,17 @@ def decode_sequence(
|
|
|
108
110
|
|
|
109
111
|
|
|
110
112
|
def encode_sequences(
|
|
111
|
-
sequences:
|
|
113
|
+
sequences: list[str],
|
|
112
114
|
vocab: str,
|
|
113
|
-
target_size:
|
|
115
|
+
target_size: int | None = None,
|
|
114
116
|
eos: int = -1,
|
|
115
|
-
sos:
|
|
116
|
-
pad:
|
|
117
|
+
sos: int | None = None,
|
|
118
|
+
pad: int | None = None,
|
|
117
119
|
dynamic_seq_length: bool = False,
|
|
118
120
|
) -> np.ndarray:
|
|
119
121
|
"""Encode character sequences using a given vocab as mapping
|
|
120
122
|
|
|
121
123
|
Args:
|
|
122
|
-
----
|
|
123
124
|
sequences: the list of character sequences of size N
|
|
124
125
|
vocab: the ordered vocab to use for encoding
|
|
125
126
|
target_size: maximum length of the encoded data
|
|
@@ -129,7 +130,6 @@ def encode_sequences(
|
|
|
129
130
|
dynamic_seq_length: if `target_size` is specified, uses it as upper bound and enables dynamic sequence size
|
|
130
131
|
|
|
131
132
|
Returns:
|
|
132
|
-
-------
|
|
133
133
|
the padded encoded data as a tensor
|
|
134
134
|
"""
|
|
135
135
|
if 0 <= eos < len(vocab):
|
|
@@ -170,25 +170,32 @@ def encode_sequences(
|
|
|
170
170
|
|
|
171
171
|
|
|
172
172
|
def convert_target_to_relative(
|
|
173
|
-
img: ImageTensor, target:
|
|
174
|
-
) ->
|
|
173
|
+
img: ImageTensor, target: np.ndarray | dict[str, Any]
|
|
174
|
+
) -> tuple[ImageTensor, dict[str, Any] | np.ndarray]:
|
|
175
|
+
"""Converts target to relative coordinates
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
img: tf.Tensor or torch.Tensor representing the image
|
|
179
|
+
target: target to convert to relative coordinates (boxes (N, 4) or polygons (N, 4, 2))
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
The image and the target in relative coordinates
|
|
183
|
+
"""
|
|
175
184
|
if isinstance(target, np.ndarray):
|
|
176
|
-
target = convert_to_relative_coords(target, get_img_shape(img))
|
|
185
|
+
target = convert_to_relative_coords(target, get_img_shape(img)) # type: ignore[arg-type]
|
|
177
186
|
else:
|
|
178
|
-
target["boxes"] = convert_to_relative_coords(target["boxes"], get_img_shape(img))
|
|
187
|
+
target["boxes"] = convert_to_relative_coords(target["boxes"], get_img_shape(img)) # type: ignore[arg-type]
|
|
179
188
|
return img, target
|
|
180
189
|
|
|
181
190
|
|
|
182
|
-
def crop_bboxes_from_image(img_path:
|
|
191
|
+
def crop_bboxes_from_image(img_path: str | Path, geoms: np.ndarray) -> list[np.ndarray]:
|
|
183
192
|
"""Crop a set of bounding boxes from an image
|
|
184
193
|
|
|
185
194
|
Args:
|
|
186
|
-
----
|
|
187
195
|
img_path: path to the image
|
|
188
196
|
geoms: a array of polygons of shape (N, 4, 2) or of straight boxes of shape (N, 4)
|
|
189
197
|
|
|
190
198
|
Returns:
|
|
191
|
-
-------
|
|
192
199
|
a list of cropped images
|
|
193
200
|
"""
|
|
194
201
|
with Image.open(img_path) as pil_img:
|
|
@@ -201,21 +208,19 @@ def crop_bboxes_from_image(img_path: Union[str, Path], geoms: np.ndarray) -> Lis
|
|
|
201
208
|
raise ValueError("Invalid geometry format")
|
|
202
209
|
|
|
203
210
|
|
|
204
|
-
def pre_transform_multiclass(img, target:
|
|
211
|
+
def pre_transform_multiclass(img, target: tuple[np.ndarray, list]) -> tuple[np.ndarray, dict[str, list]]:
|
|
205
212
|
"""Converts multiclass target to relative coordinates.
|
|
206
213
|
|
|
207
214
|
Args:
|
|
208
|
-
----
|
|
209
215
|
img: Image
|
|
210
216
|
target: tuple of target polygons and their classes names
|
|
211
217
|
|
|
212
218
|
Returns:
|
|
213
|
-
-------
|
|
214
219
|
Image and dictionary of boxes, with class names as keys
|
|
215
220
|
"""
|
|
216
221
|
boxes = convert_to_relative_coords(target[0], get_img_shape(img))
|
|
217
222
|
boxes_classes = target[1]
|
|
218
|
-
boxes_dict:
|
|
223
|
+
boxes_dict: dict = {k: [] for k in sorted(set(boxes_classes))}
|
|
219
224
|
for k, poly in zip(boxes_classes, boxes):
|
|
220
225
|
boxes_dict[k].append(poly)
|
|
221
226
|
boxes_dict = {k: np.stack(v, axis=0) for k, v in boxes_dict.items()}
|
doctr/datasets/vocabs.py
CHANGED
|
@@ -1,15 +1,14 @@
|
|
|
1
|
-
# Copyright (C) 2021-
|
|
1
|
+
# Copyright (C) 2021-2025, Mindee.
|
|
2
2
|
|
|
3
3
|
# This program is licensed under the Apache License 2.0.
|
|
4
4
|
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
|
|
5
5
|
|
|
6
6
|
import string
|
|
7
|
-
from typing import Dict
|
|
8
7
|
|
|
9
8
|
__all__ = ["VOCABS"]
|
|
10
9
|
|
|
11
10
|
|
|
12
|
-
VOCABS:
|
|
11
|
+
VOCABS: dict[str, str] = {
|
|
13
12
|
"digits": string.digits,
|
|
14
13
|
"ascii_letters": string.ascii_letters,
|
|
15
14
|
"punctuation": string.punctuation,
|
|
@@ -20,9 +19,13 @@ VOCABS: Dict[str, str] = {
|
|
|
20
19
|
"arabic_digits": "٠١٢٣٤٥٦٧٨٩",
|
|
21
20
|
"arabic_diacritics": "ًٌٍَُِّْ",
|
|
22
21
|
"arabic_punctuation": "؟؛«»—",
|
|
23
|
-
"hindi_letters": "
|
|
22
|
+
"hindi_letters": "अआइईउऊऋॠऌॡएऐओऔंःकखगघङचछजझञटठडढणतथदधनपफबभमयरलवशषसह",
|
|
24
23
|
"hindi_digits": "०१२३४५६७८९",
|
|
25
|
-
"hindi_punctuation": "
|
|
24
|
+
"hindi_punctuation": "।,?!:्ॐ॰॥",
|
|
25
|
+
"gujarati_vowels": "અઆઇઈઉઊઋએઐઓ",
|
|
26
|
+
"gujarati_consonants": "ખગઘચછજઝઞટઠડઢણતથદધનપફબભમયરલવશસહળક્ષ",
|
|
27
|
+
"gujarati_digits": "૦૧૨૩૪૫૬૭૮૯",
|
|
28
|
+
"gujarati_punctuation": "૰ઽ◌ંઃ॥ૐ઼ઁ" + "૱",
|
|
26
29
|
"bangla_letters": "অআইঈউঊঋএঐওঔকখগঘঙচছজঝঞটঠডঢণতথদধনপফবভমযরলশষসহ়ঽািীুূৃেৈোৌ্ৎংঃঁ",
|
|
27
30
|
"bangla_digits": "০১২৩৪৫৬৭৮৯",
|
|
28
31
|
"generic_cyrillic_letters": "абвгдежзийклмнопрстуфхцчшщьюяАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЬЮЯ",
|
|
@@ -54,11 +57,18 @@ VOCABS["finnish"] = VOCABS["english"] + "äöÄÖ"
|
|
|
54
57
|
VOCABS["swedish"] = VOCABS["english"] + "åäöÅÄÖ"
|
|
55
58
|
VOCABS["vietnamese"] = (
|
|
56
59
|
VOCABS["english"]
|
|
57
|
-
+ "
|
|
58
|
-
+ "
|
|
60
|
+
+ "áàảạãăắằẳẵặâấầẩẫậđéèẻẽẹêếềểễệóòỏõọôốồổộỗơớờởợỡúùủũụưứừửữựíìỉĩịýỳỷỹỵ"
|
|
61
|
+
+ "ÁÀẢẠÃĂẮẰẲẴẶÂẤẦẨẪẬĐÉÈẺẼẸÊẾỀỂỄỆÓÒỎÕỌÔỐỒỔỘỖƠỚỜỞỢỠÚÙỦŨỤƯỨỪỬỮỰÍÌỈĨỊÝỲỶỸỴ"
|
|
59
62
|
)
|
|
60
63
|
VOCABS["hebrew"] = VOCABS["english"] + "אבגדהוזחטיכלמנסעפצקרשת" + "₪"
|
|
61
64
|
VOCABS["hindi"] = VOCABS["hindi_letters"] + VOCABS["hindi_digits"] + VOCABS["hindi_punctuation"]
|
|
65
|
+
VOCABS["gujarati"] = (
|
|
66
|
+
VOCABS["gujarati_vowels"]
|
|
67
|
+
+ VOCABS["gujarati_consonants"]
|
|
68
|
+
+ VOCABS["gujarati_digits"]
|
|
69
|
+
+ VOCABS["gujarati_punctuation"]
|
|
70
|
+
+ VOCABS["punctuation"]
|
|
71
|
+
)
|
|
62
72
|
VOCABS["bangla"] = VOCABS["bangla_letters"] + VOCABS["bangla_digits"]
|
|
63
73
|
VOCABS["ukrainian"] = (
|
|
64
74
|
VOCABS["generic_cyrillic_letters"] + VOCABS["digits"] + VOCABS["punctuation"] + VOCABS["currency"] + "ґіїєҐІЇЄ₴"
|
doctr/datasets/wildreceipt.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright (C) 2021-
|
|
1
|
+
# Copyright (C) 2021-2025, Mindee.
|
|
2
2
|
|
|
3
3
|
# This program is licensed under the Apache License 2.0.
|
|
4
4
|
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
|
|
@@ -6,9 +6,10 @@
|
|
|
6
6
|
import json
|
|
7
7
|
import os
|
|
8
8
|
from pathlib import Path
|
|
9
|
-
from typing import Any
|
|
9
|
+
from typing import Any
|
|
10
10
|
|
|
11
11
|
import numpy as np
|
|
12
|
+
from tqdm import tqdm
|
|
12
13
|
|
|
13
14
|
from .datasets import AbstractDataset
|
|
14
15
|
from .utils import convert_target_to_relative, crop_bboxes_from_image
|
|
@@ -17,9 +18,10 @@ __all__ = ["WILDRECEIPT"]
|
|
|
17
18
|
|
|
18
19
|
|
|
19
20
|
class WILDRECEIPT(AbstractDataset):
|
|
20
|
-
"""
|
|
21
|
-
|
|
22
|
-
|
|
21
|
+
"""
|
|
22
|
+
WildReceipt dataset from `"Spatial Dual-Modality Graph Reasoning for Key Information Extraction"
|
|
23
|
+
<https://arxiv.org/abs/2103.14470v1>`_ |
|
|
24
|
+
`"repository" <https://download.openmmlab.com/mmocr/data/wildreceipt.tar>`_.
|
|
23
25
|
|
|
24
26
|
.. image:: https://doctr-static.mindee.com/models?id=v0.7.0/wildreceipt-dataset.jpg&src=0
|
|
25
27
|
:align: center
|
|
@@ -34,7 +36,6 @@ class WILDRECEIPT(AbstractDataset):
|
|
|
34
36
|
>>> img, target = test_set[0]
|
|
35
37
|
|
|
36
38
|
Args:
|
|
37
|
-
----
|
|
38
39
|
img_folder: folder with all the images of the dataset
|
|
39
40
|
label_path: path to the annotations file of the dataset
|
|
40
41
|
train: whether the subset should be the training one
|
|
@@ -71,15 +72,18 @@ class WILDRECEIPT(AbstractDataset):
|
|
|
71
72
|
tmp_root = img_folder
|
|
72
73
|
self.train = train
|
|
73
74
|
np_dtype = np.float32
|
|
74
|
-
self.data:
|
|
75
|
+
self.data: list[tuple[str | Path | np.ndarray, str | dict[str, Any] | np.ndarray]] = []
|
|
75
76
|
|
|
76
77
|
with open(label_path, "r") as file:
|
|
77
78
|
data = file.read()
|
|
78
79
|
# Split the text file into separate JSON strings
|
|
79
80
|
json_strings = data.strip().split("\n")
|
|
80
|
-
box:
|
|
81
|
-
|
|
82
|
-
for json_string in
|
|
81
|
+
box: list[float] | np.ndarray
|
|
82
|
+
|
|
83
|
+
for json_string in tqdm(
|
|
84
|
+
iterable=json_strings, desc="Preparing and Loading WILDRECEIPT", total=len(json_strings)
|
|
85
|
+
):
|
|
86
|
+
_targets = []
|
|
83
87
|
json_data = json.loads(json_string)
|
|
84
88
|
img_path = json_data["file_name"]
|
|
85
89
|
annotations = json_data["annotations"]
|
doctr/file_utils.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright (C) 2021-
|
|
1
|
+
# Copyright (C) 2021-2025, Mindee.
|
|
2
2
|
|
|
3
3
|
# This program is licensed under the Apache License 2.0.
|
|
4
4
|
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
|
|
@@ -9,7 +9,6 @@ import importlib.metadata
|
|
|
9
9
|
import importlib.util
|
|
10
10
|
import logging
|
|
11
11
|
import os
|
|
12
|
-
from typing import Optional
|
|
13
12
|
|
|
14
13
|
CLASS_NAME: str = "words"
|
|
15
14
|
|
|
@@ -80,10 +79,7 @@ if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VA
|
|
|
80
79
|
else:
|
|
81
80
|
logging.info(f"TensorFlow version {_tf_version} available.")
|
|
82
81
|
ensure_keras_v2()
|
|
83
|
-
import tensorflow as tf
|
|
84
82
|
|
|
85
|
-
# Enable eager execution - this is required for some models to work properly
|
|
86
|
-
tf.config.run_functions_eagerly(True)
|
|
87
83
|
else: # pragma: no cover
|
|
88
84
|
logging.info("Disabling Tensorflow because USE_TORCH is set")
|
|
89
85
|
_tf_available = False
|
|
@@ -96,12 +92,11 @@ if not _torch_available and not _tf_available: # pragma: no cover
|
|
|
96
92
|
)
|
|
97
93
|
|
|
98
94
|
|
|
99
|
-
def requires_package(name: str, extra_message:
|
|
95
|
+
def requires_package(name: str, extra_message: str | None = None) -> None: # pragma: no cover
|
|
100
96
|
"""
|
|
101
97
|
package requirement helper
|
|
102
98
|
|
|
103
99
|
Args:
|
|
104
|
-
----
|
|
105
100
|
name: name of the package
|
|
106
101
|
extra_message: additional message to display if the package is not found
|
|
107
102
|
"""
|