python-doctr 0.9.0__py3-none-any.whl → 0.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- doctr/contrib/__init__.py +1 -0
- doctr/contrib/artefacts.py +7 -9
- doctr/contrib/base.py +8 -17
- doctr/datasets/cord.py +17 -7
- doctr/datasets/datasets/__init__.py +4 -4
- doctr/datasets/datasets/base.py +16 -16
- doctr/datasets/datasets/pytorch.py +12 -12
- doctr/datasets/datasets/tensorflow.py +10 -10
- doctr/datasets/detection.py +6 -9
- doctr/datasets/doc_artefacts.py +3 -4
- doctr/datasets/funsd.py +17 -6
- doctr/datasets/generator/__init__.py +4 -4
- doctr/datasets/generator/base.py +16 -17
- doctr/datasets/generator/pytorch.py +1 -3
- doctr/datasets/generator/tensorflow.py +1 -3
- doctr/datasets/ic03.py +14 -5
- doctr/datasets/ic13.py +13 -5
- doctr/datasets/iiit5k.py +31 -20
- doctr/datasets/iiithws.py +4 -5
- doctr/datasets/imgur5k.py +15 -5
- doctr/datasets/loader.py +4 -7
- doctr/datasets/mjsynth.py +6 -5
- doctr/datasets/ocr.py +3 -4
- doctr/datasets/orientation.py +3 -4
- doctr/datasets/recognition.py +3 -4
- doctr/datasets/sroie.py +16 -5
- doctr/datasets/svhn.py +16 -5
- doctr/datasets/svt.py +14 -5
- doctr/datasets/synthtext.py +14 -5
- doctr/datasets/utils.py +37 -27
- doctr/datasets/vocabs.py +21 -7
- doctr/datasets/wildreceipt.py +25 -10
- doctr/file_utils.py +18 -4
- doctr/io/elements.py +69 -81
- doctr/io/html.py +1 -3
- doctr/io/image/__init__.py +3 -3
- doctr/io/image/base.py +2 -5
- doctr/io/image/pytorch.py +3 -12
- doctr/io/image/tensorflow.py +2 -11
- doctr/io/pdf.py +5 -7
- doctr/io/reader.py +5 -11
- doctr/models/_utils.py +14 -22
- doctr/models/builder.py +32 -50
- doctr/models/classification/magc_resnet/__init__.py +3 -3
- doctr/models/classification/magc_resnet/pytorch.py +10 -13
- doctr/models/classification/magc_resnet/tensorflow.py +21 -17
- doctr/models/classification/mobilenet/__init__.py +3 -3
- doctr/models/classification/mobilenet/pytorch.py +7 -17
- doctr/models/classification/mobilenet/tensorflow.py +22 -29
- doctr/models/classification/predictor/__init__.py +4 -4
- doctr/models/classification/predictor/pytorch.py +13 -11
- doctr/models/classification/predictor/tensorflow.py +13 -11
- doctr/models/classification/resnet/__init__.py +4 -4
- doctr/models/classification/resnet/pytorch.py +21 -31
- doctr/models/classification/resnet/tensorflow.py +41 -39
- doctr/models/classification/textnet/__init__.py +3 -3
- doctr/models/classification/textnet/pytorch.py +10 -17
- doctr/models/classification/textnet/tensorflow.py +19 -20
- doctr/models/classification/vgg/__init__.py +3 -3
- doctr/models/classification/vgg/pytorch.py +5 -7
- doctr/models/classification/vgg/tensorflow.py +18 -15
- doctr/models/classification/vit/__init__.py +3 -3
- doctr/models/classification/vit/pytorch.py +8 -14
- doctr/models/classification/vit/tensorflow.py +16 -16
- doctr/models/classification/zoo.py +36 -19
- doctr/models/core.py +3 -3
- doctr/models/detection/_utils/__init__.py +4 -4
- doctr/models/detection/_utils/base.py +4 -7
- doctr/models/detection/_utils/pytorch.py +1 -5
- doctr/models/detection/_utils/tensorflow.py +1 -5
- doctr/models/detection/core.py +2 -8
- doctr/models/detection/differentiable_binarization/__init__.py +4 -4
- doctr/models/detection/differentiable_binarization/base.py +7 -17
- doctr/models/detection/differentiable_binarization/pytorch.py +27 -30
- doctr/models/detection/differentiable_binarization/tensorflow.py +49 -37
- doctr/models/detection/fast/__init__.py +4 -4
- doctr/models/detection/fast/base.py +6 -14
- doctr/models/detection/fast/pytorch.py +24 -31
- doctr/models/detection/fast/tensorflow.py +28 -37
- doctr/models/detection/linknet/__init__.py +4 -4
- doctr/models/detection/linknet/base.py +6 -15
- doctr/models/detection/linknet/pytorch.py +24 -27
- doctr/models/detection/linknet/tensorflow.py +36 -33
- doctr/models/detection/predictor/__init__.py +5 -5
- doctr/models/detection/predictor/pytorch.py +6 -7
- doctr/models/detection/predictor/tensorflow.py +7 -8
- doctr/models/detection/zoo.py +27 -7
- doctr/models/factory/hub.py +8 -13
- doctr/models/kie_predictor/__init__.py +5 -5
- doctr/models/kie_predictor/base.py +8 -5
- doctr/models/kie_predictor/pytorch.py +22 -19
- doctr/models/kie_predictor/tensorflow.py +21 -15
- doctr/models/modules/layers/__init__.py +3 -3
- doctr/models/modules/layers/pytorch.py +6 -9
- doctr/models/modules/layers/tensorflow.py +5 -7
- doctr/models/modules/transformer/__init__.py +3 -3
- doctr/models/modules/transformer/pytorch.py +12 -13
- doctr/models/modules/transformer/tensorflow.py +9 -12
- doctr/models/modules/vision_transformer/__init__.py +3 -3
- doctr/models/modules/vision_transformer/pytorch.py +3 -4
- doctr/models/modules/vision_transformer/tensorflow.py +4 -4
- doctr/models/predictor/__init__.py +5 -5
- doctr/models/predictor/base.py +52 -41
- doctr/models/predictor/pytorch.py +16 -13
- doctr/models/predictor/tensorflow.py +16 -10
- doctr/models/preprocessor/__init__.py +4 -4
- doctr/models/preprocessor/pytorch.py +13 -17
- doctr/models/preprocessor/tensorflow.py +11 -15
- doctr/models/recognition/core.py +3 -7
- doctr/models/recognition/crnn/__init__.py +4 -4
- doctr/models/recognition/crnn/pytorch.py +20 -28
- doctr/models/recognition/crnn/tensorflow.py +19 -29
- doctr/models/recognition/master/__init__.py +3 -3
- doctr/models/recognition/master/base.py +3 -7
- doctr/models/recognition/master/pytorch.py +22 -24
- doctr/models/recognition/master/tensorflow.py +21 -26
- doctr/models/recognition/parseq/__init__.py +3 -3
- doctr/models/recognition/parseq/base.py +3 -7
- doctr/models/recognition/parseq/pytorch.py +26 -26
- doctr/models/recognition/parseq/tensorflow.py +26 -30
- doctr/models/recognition/predictor/__init__.py +5 -5
- doctr/models/recognition/predictor/_utils.py +7 -10
- doctr/models/recognition/predictor/pytorch.py +6 -6
- doctr/models/recognition/predictor/tensorflow.py +5 -6
- doctr/models/recognition/sar/__init__.py +4 -4
- doctr/models/recognition/sar/pytorch.py +20 -21
- doctr/models/recognition/sar/tensorflow.py +19 -24
- doctr/models/recognition/utils.py +5 -10
- doctr/models/recognition/vitstr/__init__.py +4 -4
- doctr/models/recognition/vitstr/base.py +3 -7
- doctr/models/recognition/vitstr/pytorch.py +18 -20
- doctr/models/recognition/vitstr/tensorflow.py +21 -24
- doctr/models/recognition/zoo.py +22 -11
- doctr/models/utils/__init__.py +4 -4
- doctr/models/utils/pytorch.py +13 -16
- doctr/models/utils/tensorflow.py +31 -30
- doctr/models/zoo.py +1 -5
- doctr/transforms/functional/__init__.py +3 -3
- doctr/transforms/functional/base.py +4 -11
- doctr/transforms/functional/pytorch.py +21 -29
- doctr/transforms/functional/tensorflow.py +10 -22
- doctr/transforms/modules/__init__.py +4 -4
- doctr/transforms/modules/base.py +48 -55
- doctr/transforms/modules/pytorch.py +65 -28
- doctr/transforms/modules/tensorflow.py +33 -44
- doctr/utils/common_types.py +8 -9
- doctr/utils/data.py +8 -12
- doctr/utils/fonts.py +2 -7
- doctr/utils/geometry.py +120 -64
- doctr/utils/metrics.py +18 -38
- doctr/utils/multithreading.py +4 -6
- doctr/utils/reconstitution.py +157 -75
- doctr/utils/repr.py +2 -3
- doctr/utils/visualization.py +16 -29
- doctr/version.py +1 -1
- {python_doctr-0.9.0.dist-info → python_doctr-0.11.0.dist-info}/METADATA +59 -57
- python_doctr-0.11.0.dist-info/RECORD +173 -0
- {python_doctr-0.9.0.dist-info → python_doctr-0.11.0.dist-info}/WHEEL +1 -1
- python_doctr-0.9.0.dist-info/RECORD +0 -173
- {python_doctr-0.9.0.dist-info → python_doctr-0.11.0.dist-info}/LICENSE +0 -0
- {python_doctr-0.9.0.dist-info → python_doctr-0.11.0.dist-info}/top_level.txt +0 -0
- {python_doctr-0.9.0.dist-info → python_doctr-0.11.0.dist-info}/zip-safe +0 -0
doctr/datasets/sroie.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright (C) 2021-
|
|
1
|
+
# Copyright (C) 2021-2025, Mindee.
|
|
2
2
|
|
|
3
3
|
# This program is licensed under the Apache License 2.0.
|
|
4
4
|
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
import csv
|
|
7
7
|
import os
|
|
8
8
|
from pathlib import Path
|
|
9
|
-
from typing import Any
|
|
9
|
+
from typing import Any
|
|
10
10
|
|
|
11
11
|
import numpy as np
|
|
12
12
|
from tqdm import tqdm
|
|
@@ -29,10 +29,10 @@ class SROIE(VisionDataset):
|
|
|
29
29
|
>>> img, target = train_set[0]
|
|
30
30
|
|
|
31
31
|
Args:
|
|
32
|
-
----
|
|
33
32
|
train: whether the subset should be the training one
|
|
34
33
|
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
|
|
35
34
|
recognition_task: whether the dataset should be used for recognition task
|
|
35
|
+
detection_task: whether the dataset should be used for detection task
|
|
36
36
|
**kwargs: keyword arguments from `VisionDataset`.
|
|
37
37
|
"""
|
|
38
38
|
|
|
@@ -52,6 +52,7 @@ class SROIE(VisionDataset):
|
|
|
52
52
|
train: bool = True,
|
|
53
53
|
use_polygons: bool = False,
|
|
54
54
|
recognition_task: bool = False,
|
|
55
|
+
detection_task: bool = False,
|
|
55
56
|
**kwargs: Any,
|
|
56
57
|
) -> None:
|
|
57
58
|
url, sha256, name = self.TRAIN if train else self.TEST
|
|
@@ -63,13 +64,21 @@ class SROIE(VisionDataset):
|
|
|
63
64
|
pre_transforms=convert_target_to_relative if not recognition_task else None,
|
|
64
65
|
**kwargs,
|
|
65
66
|
)
|
|
67
|
+
if recognition_task and detection_task:
|
|
68
|
+
raise ValueError(
|
|
69
|
+
"`recognition_task` and `detection_task` cannot be set to True simultaneously. "
|
|
70
|
+
+ "To get the whole dataset with boxes and labels leave both parameters to False."
|
|
71
|
+
)
|
|
72
|
+
|
|
66
73
|
self.train = train
|
|
67
74
|
|
|
68
75
|
tmp_root = os.path.join(self.root, "images")
|
|
69
|
-
self.data:
|
|
76
|
+
self.data: list[tuple[str | np.ndarray, str | dict[str, Any] | np.ndarray]] = []
|
|
70
77
|
np_dtype = np.float32
|
|
71
78
|
|
|
72
|
-
for img_path in tqdm(
|
|
79
|
+
for img_path in tqdm(
|
|
80
|
+
iterable=os.listdir(tmp_root), desc="Preparing and Loading SROIE", total=len(os.listdir(tmp_root))
|
|
81
|
+
):
|
|
73
82
|
# File existence check
|
|
74
83
|
if not os.path.exists(os.path.join(tmp_root, img_path)):
|
|
75
84
|
raise FileNotFoundError(f"unable to locate {os.path.join(tmp_root, img_path)}")
|
|
@@ -94,6 +103,8 @@ class SROIE(VisionDataset):
|
|
|
94
103
|
for crop, label in zip(crops, labels):
|
|
95
104
|
if crop.shape[0] > 0 and crop.shape[1] > 0 and len(label) > 0:
|
|
96
105
|
self.data.append((crop, label))
|
|
106
|
+
elif detection_task:
|
|
107
|
+
self.data.append((img_path, coords))
|
|
97
108
|
else:
|
|
98
109
|
self.data.append((img_path, dict(boxes=coords, labels=labels)))
|
|
99
110
|
|
doctr/datasets/svhn.py
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
# Copyright (C) 2021-
|
|
1
|
+
# Copyright (C) 2021-2025, Mindee.
|
|
2
2
|
|
|
3
3
|
# This program is licensed under the Apache License 2.0.
|
|
4
4
|
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
|
|
5
5
|
|
|
6
6
|
import os
|
|
7
|
-
from typing import Any
|
|
7
|
+
from typing import Any
|
|
8
8
|
|
|
9
9
|
import h5py
|
|
10
10
|
import numpy as np
|
|
@@ -28,10 +28,10 @@ class SVHN(VisionDataset):
|
|
|
28
28
|
>>> img, target = train_set[0]
|
|
29
29
|
|
|
30
30
|
Args:
|
|
31
|
-
----
|
|
32
31
|
train: whether the subset should be the training one
|
|
33
32
|
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
|
|
34
33
|
recognition_task: whether the dataset should be used for recognition task
|
|
34
|
+
detection_task: whether the dataset should be used for detection task
|
|
35
35
|
**kwargs: keyword arguments from `VisionDataset`.
|
|
36
36
|
"""
|
|
37
37
|
|
|
@@ -52,6 +52,7 @@ class SVHN(VisionDataset):
|
|
|
52
52
|
train: bool = True,
|
|
53
53
|
use_polygons: bool = False,
|
|
54
54
|
recognition_task: bool = False,
|
|
55
|
+
detection_task: bool = False,
|
|
55
56
|
**kwargs: Any,
|
|
56
57
|
) -> None:
|
|
57
58
|
url, sha256, name = self.TRAIN if train else self.TEST
|
|
@@ -63,8 +64,14 @@ class SVHN(VisionDataset):
|
|
|
63
64
|
pre_transforms=convert_target_to_relative if not recognition_task else None,
|
|
64
65
|
**kwargs,
|
|
65
66
|
)
|
|
67
|
+
if recognition_task and detection_task:
|
|
68
|
+
raise ValueError(
|
|
69
|
+
"`recognition_task` and `detection_task` cannot be set to True simultaneously. "
|
|
70
|
+
+ "To get the whole dataset with boxes and labels leave both parameters to False."
|
|
71
|
+
)
|
|
72
|
+
|
|
66
73
|
self.train = train
|
|
67
|
-
self.data:
|
|
74
|
+
self.data: list[tuple[str | np.ndarray, str | dict[str, Any] | np.ndarray]] = []
|
|
68
75
|
np_dtype = np.float32
|
|
69
76
|
|
|
70
77
|
tmp_root = os.path.join(self.root, "train" if train else "test")
|
|
@@ -73,7 +80,9 @@ class SVHN(VisionDataset):
|
|
|
73
80
|
with h5py.File(os.path.join(tmp_root, "digitStruct.mat"), "r") as f:
|
|
74
81
|
img_refs = f["digitStruct/name"]
|
|
75
82
|
box_refs = f["digitStruct/bbox"]
|
|
76
|
-
for img_ref, box_ref in tqdm(
|
|
83
|
+
for img_ref, box_ref in tqdm(
|
|
84
|
+
iterable=zip(img_refs, box_refs), desc="Preparing and Loading SVHN", total=len(img_refs)
|
|
85
|
+
):
|
|
77
86
|
# convert ascii matrix to string
|
|
78
87
|
img_name = "".join(map(chr, f[img_ref[0]][()].flatten()))
|
|
79
88
|
|
|
@@ -122,6 +131,8 @@ class SVHN(VisionDataset):
|
|
|
122
131
|
for crop, label in zip(crops, label_targets):
|
|
123
132
|
if crop.shape[0] > 0 and crop.shape[1] > 0 and len(label) > 0:
|
|
124
133
|
self.data.append((crop, label))
|
|
134
|
+
elif detection_task:
|
|
135
|
+
self.data.append((img_name, box_targets))
|
|
125
136
|
else:
|
|
126
137
|
self.data.append((img_name, dict(boxes=box_targets, labels=label_targets)))
|
|
127
138
|
|
doctr/datasets/svt.py
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
|
-
# Copyright (C) 2021-
|
|
1
|
+
# Copyright (C) 2021-2025, Mindee.
|
|
2
2
|
|
|
3
3
|
# This program is licensed under the Apache License 2.0.
|
|
4
4
|
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
|
|
5
5
|
|
|
6
6
|
import os
|
|
7
|
-
from typing import Any
|
|
7
|
+
from typing import Any
|
|
8
8
|
|
|
9
9
|
import defusedxml.ElementTree as ET
|
|
10
10
|
import numpy as np
|
|
@@ -28,10 +28,10 @@ class SVT(VisionDataset):
|
|
|
28
28
|
>>> img, target = train_set[0]
|
|
29
29
|
|
|
30
30
|
Args:
|
|
31
|
-
----
|
|
32
31
|
train: whether the subset should be the training one
|
|
33
32
|
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
|
|
34
33
|
recognition_task: whether the dataset should be used for recognition task
|
|
34
|
+
detection_task: whether the dataset should be used for detection task
|
|
35
35
|
**kwargs: keyword arguments from `VisionDataset`.
|
|
36
36
|
"""
|
|
37
37
|
|
|
@@ -43,6 +43,7 @@ class SVT(VisionDataset):
|
|
|
43
43
|
train: bool = True,
|
|
44
44
|
use_polygons: bool = False,
|
|
45
45
|
recognition_task: bool = False,
|
|
46
|
+
detection_task: bool = False,
|
|
46
47
|
**kwargs: Any,
|
|
47
48
|
) -> None:
|
|
48
49
|
super().__init__(
|
|
@@ -53,8 +54,14 @@ class SVT(VisionDataset):
|
|
|
53
54
|
pre_transforms=convert_target_to_relative if not recognition_task else None,
|
|
54
55
|
**kwargs,
|
|
55
56
|
)
|
|
57
|
+
if recognition_task and detection_task:
|
|
58
|
+
raise ValueError(
|
|
59
|
+
"`recognition_task` and `detection_task` cannot be set to True simultaneously. "
|
|
60
|
+
+ "To get the whole dataset with boxes and labels leave both parameters to False."
|
|
61
|
+
)
|
|
62
|
+
|
|
56
63
|
self.train = train
|
|
57
|
-
self.data:
|
|
64
|
+
self.data: list[tuple[str | np.ndarray, str | dict[str, Any] | np.ndarray]] = []
|
|
58
65
|
np_dtype = np.float32
|
|
59
66
|
|
|
60
67
|
# Load xml data
|
|
@@ -66,7 +73,7 @@ class SVT(VisionDataset):
|
|
|
66
73
|
)
|
|
67
74
|
xml_root = xml_tree.getroot()
|
|
68
75
|
|
|
69
|
-
for image in tqdm(iterable=xml_root, desc="
|
|
76
|
+
for image in tqdm(iterable=xml_root, desc="Preparing and Loading SVT", total=len(xml_root)):
|
|
70
77
|
name, _, _, _resolution, rectangles = image
|
|
71
78
|
|
|
72
79
|
# File existence check
|
|
@@ -108,6 +115,8 @@ class SVT(VisionDataset):
|
|
|
108
115
|
for crop, label in zip(crops, labels):
|
|
109
116
|
if crop.shape[0] > 0 and crop.shape[1] > 0 and len(label) > 0:
|
|
110
117
|
self.data.append((crop, label))
|
|
118
|
+
elif detection_task:
|
|
119
|
+
self.data.append((name.text, boxes))
|
|
111
120
|
else:
|
|
112
121
|
self.data.append((name.text, dict(boxes=boxes, labels=labels)))
|
|
113
122
|
|
doctr/datasets/synthtext.py
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
# Copyright (C) 2021-
|
|
1
|
+
# Copyright (C) 2021-2025, Mindee.
|
|
2
2
|
|
|
3
3
|
# This program is licensed under the Apache License 2.0.
|
|
4
4
|
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
|
|
5
5
|
|
|
6
6
|
import glob
|
|
7
7
|
import os
|
|
8
|
-
from typing import Any
|
|
8
|
+
from typing import Any
|
|
9
9
|
|
|
10
10
|
import numpy as np
|
|
11
11
|
from PIL import Image
|
|
@@ -31,10 +31,10 @@ class SynthText(VisionDataset):
|
|
|
31
31
|
>>> img, target = train_set[0]
|
|
32
32
|
|
|
33
33
|
Args:
|
|
34
|
-
----
|
|
35
34
|
train: whether the subset should be the training one
|
|
36
35
|
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
|
|
37
36
|
recognition_task: whether the dataset should be used for recognition task
|
|
37
|
+
detection_task: whether the dataset should be used for detection task
|
|
38
38
|
**kwargs: keyword arguments from `VisionDataset`.
|
|
39
39
|
"""
|
|
40
40
|
|
|
@@ -46,6 +46,7 @@ class SynthText(VisionDataset):
|
|
|
46
46
|
train: bool = True,
|
|
47
47
|
use_polygons: bool = False,
|
|
48
48
|
recognition_task: bool = False,
|
|
49
|
+
detection_task: bool = False,
|
|
49
50
|
**kwargs: Any,
|
|
50
51
|
) -> None:
|
|
51
52
|
super().__init__(
|
|
@@ -56,8 +57,14 @@ class SynthText(VisionDataset):
|
|
|
56
57
|
pre_transforms=convert_target_to_relative if not recognition_task else None,
|
|
57
58
|
**kwargs,
|
|
58
59
|
)
|
|
60
|
+
if recognition_task and detection_task:
|
|
61
|
+
raise ValueError(
|
|
62
|
+
"`recognition_task` and `detection_task` cannot be set to True simultaneously. "
|
|
63
|
+
+ "To get the whole dataset with boxes and labels leave both parameters to False."
|
|
64
|
+
)
|
|
65
|
+
|
|
59
66
|
self.train = train
|
|
60
|
-
self.data:
|
|
67
|
+
self.data: list[tuple[str | np.ndarray, str | dict[str, Any] | np.ndarray]] = []
|
|
61
68
|
np_dtype = np.float32
|
|
62
69
|
|
|
63
70
|
# Load mat data
|
|
@@ -83,7 +90,7 @@ class SynthText(VisionDataset):
|
|
|
83
90
|
del mat_data
|
|
84
91
|
|
|
85
92
|
for img_path, word_boxes, txt in tqdm(
|
|
86
|
-
iterable=zip(paths, boxes, labels), desc="
|
|
93
|
+
iterable=zip(paths, boxes, labels), desc="Preparing and Loading SynthText", total=len(paths)
|
|
87
94
|
):
|
|
88
95
|
# File existence check
|
|
89
96
|
if not os.path.exists(os.path.join(tmp_root, img_path[0])):
|
|
@@ -111,6 +118,8 @@ class SynthText(VisionDataset):
|
|
|
111
118
|
tmp_img = Image.fromarray(crop)
|
|
112
119
|
tmp_img.save(os.path.join(reco_folder_path, f"{reco_images_counter}.png"))
|
|
113
120
|
reco_images_counter += 1
|
|
121
|
+
elif detection_task:
|
|
122
|
+
self.data.append((img_path[0], np.asarray(word_boxes, dtype=np_dtype)))
|
|
114
123
|
else:
|
|
115
124
|
self.data.append((img_path[0], dict(boxes=np.asarray(word_boxes, dtype=np_dtype), labels=labels)))
|
|
116
125
|
|
doctr/datasets/utils.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright (C) 2021-
|
|
1
|
+
# Copyright (C) 2021-2025, Mindee.
|
|
2
2
|
|
|
3
3
|
# This program is licensed under the Apache License 2.0.
|
|
4
4
|
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
|
|
@@ -6,10 +6,10 @@
|
|
|
6
6
|
import string
|
|
7
7
|
import unicodedata
|
|
8
8
|
from collections.abc import Sequence
|
|
9
|
+
from collections.abc import Sequence as SequenceType
|
|
9
10
|
from functools import partial
|
|
10
11
|
from pathlib import Path
|
|
11
|
-
from typing import Any,
|
|
12
|
-
from typing import Sequence as SequenceType
|
|
12
|
+
from typing import Any, TypeVar
|
|
13
13
|
|
|
14
14
|
import numpy as np
|
|
15
15
|
from PIL import Image
|
|
@@ -19,7 +19,15 @@ from doctr.utils.geometry import convert_to_relative_coords, extract_crops, extr
|
|
|
19
19
|
|
|
20
20
|
from .vocabs import VOCABS
|
|
21
21
|
|
|
22
|
-
__all__ = [
|
|
22
|
+
__all__ = [
|
|
23
|
+
"translate",
|
|
24
|
+
"encode_string",
|
|
25
|
+
"decode_sequence",
|
|
26
|
+
"encode_sequences",
|
|
27
|
+
"pre_transform_multiclass",
|
|
28
|
+
"crop_bboxes_from_image",
|
|
29
|
+
"convert_target_to_relative",
|
|
30
|
+
]
|
|
23
31
|
|
|
24
32
|
ImageTensor = TypeVar("ImageTensor")
|
|
25
33
|
|
|
@@ -32,13 +40,11 @@ def translate(
|
|
|
32
40
|
"""Translate a string input in a given vocabulary
|
|
33
41
|
|
|
34
42
|
Args:
|
|
35
|
-
----
|
|
36
43
|
input_string: input string to translate
|
|
37
44
|
vocab_name: vocabulary to use (french, latin, ...)
|
|
38
45
|
unknown_char: unknown character for non-translatable characters
|
|
39
46
|
|
|
40
47
|
Returns:
|
|
41
|
-
-------
|
|
42
48
|
A string translated in a given vocab
|
|
43
49
|
"""
|
|
44
50
|
if VOCABS.get(vocab_name) is None:
|
|
@@ -63,16 +69,14 @@ def translate(
|
|
|
63
69
|
def encode_string(
|
|
64
70
|
input_string: str,
|
|
65
71
|
vocab: str,
|
|
66
|
-
) ->
|
|
72
|
+
) -> list[int]:
|
|
67
73
|
"""Given a predefined mapping, encode the string to a sequence of numbers
|
|
68
74
|
|
|
69
75
|
Args:
|
|
70
|
-
----
|
|
71
76
|
input_string: string to encode
|
|
72
77
|
vocab: vocabulary (string), the encoding is given by the indexing of the character sequence
|
|
73
78
|
|
|
74
79
|
Returns:
|
|
75
|
-
-------
|
|
76
80
|
A list encoding the input_string
|
|
77
81
|
"""
|
|
78
82
|
try:
|
|
@@ -85,18 +89,16 @@ def encode_string(
|
|
|
85
89
|
|
|
86
90
|
|
|
87
91
|
def decode_sequence(
|
|
88
|
-
input_seq:
|
|
92
|
+
input_seq: np.ndarray | SequenceType[int],
|
|
89
93
|
mapping: str,
|
|
90
94
|
) -> str:
|
|
91
95
|
"""Given a predefined mapping, decode the sequence of numbers to a string
|
|
92
96
|
|
|
93
97
|
Args:
|
|
94
|
-
----
|
|
95
98
|
input_seq: array to decode
|
|
96
99
|
mapping: vocabulary (string), the encoding is given by the indexing of the character sequence
|
|
97
100
|
|
|
98
101
|
Returns:
|
|
99
|
-
-------
|
|
100
102
|
A string, decoded from input_seq
|
|
101
103
|
"""
|
|
102
104
|
if not isinstance(input_seq, (Sequence, np.ndarray)):
|
|
@@ -108,18 +110,17 @@ def decode_sequence(
|
|
|
108
110
|
|
|
109
111
|
|
|
110
112
|
def encode_sequences(
|
|
111
|
-
sequences:
|
|
113
|
+
sequences: list[str],
|
|
112
114
|
vocab: str,
|
|
113
|
-
target_size:
|
|
115
|
+
target_size: int | None = None,
|
|
114
116
|
eos: int = -1,
|
|
115
|
-
sos:
|
|
116
|
-
pad:
|
|
117
|
+
sos: int | None = None,
|
|
118
|
+
pad: int | None = None,
|
|
117
119
|
dynamic_seq_length: bool = False,
|
|
118
120
|
) -> np.ndarray:
|
|
119
121
|
"""Encode character sequences using a given vocab as mapping
|
|
120
122
|
|
|
121
123
|
Args:
|
|
122
|
-
----
|
|
123
124
|
sequences: the list of character sequences of size N
|
|
124
125
|
vocab: the ordered vocab to use for encoding
|
|
125
126
|
target_size: maximum length of the encoded data
|
|
@@ -129,7 +130,6 @@ def encode_sequences(
|
|
|
129
130
|
dynamic_seq_length: if `target_size` is specified, uses it as upper bound and enables dynamic sequence size
|
|
130
131
|
|
|
131
132
|
Returns:
|
|
132
|
-
-------
|
|
133
133
|
the padded encoded data as a tensor
|
|
134
134
|
"""
|
|
135
135
|
if 0 <= eos < len(vocab):
|
|
@@ -169,21 +169,33 @@ def encode_sequences(
|
|
|
169
169
|
return encoded_data
|
|
170
170
|
|
|
171
171
|
|
|
172
|
-
def convert_target_to_relative(
|
|
173
|
-
target
|
|
172
|
+
def convert_target_to_relative(
|
|
173
|
+
img: ImageTensor, target: np.ndarray | dict[str, Any]
|
|
174
|
+
) -> tuple[ImageTensor, dict[str, Any] | np.ndarray]:
|
|
175
|
+
"""Converts target to relative coordinates
|
|
176
|
+
|
|
177
|
+
Args:
|
|
178
|
+
img: tf.Tensor or torch.Tensor representing the image
|
|
179
|
+
target: target to convert to relative coordinates (boxes (N, 4) or polygons (N, 4, 2))
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
The image and the target in relative coordinates
|
|
183
|
+
"""
|
|
184
|
+
if isinstance(target, np.ndarray):
|
|
185
|
+
target = convert_to_relative_coords(target, get_img_shape(img)) # type: ignore[arg-type]
|
|
186
|
+
else:
|
|
187
|
+
target["boxes"] = convert_to_relative_coords(target["boxes"], get_img_shape(img)) # type: ignore[arg-type]
|
|
174
188
|
return img, target
|
|
175
189
|
|
|
176
190
|
|
|
177
|
-
def crop_bboxes_from_image(img_path:
|
|
191
|
+
def crop_bboxes_from_image(img_path: str | Path, geoms: np.ndarray) -> list[np.ndarray]:
|
|
178
192
|
"""Crop a set of bounding boxes from an image
|
|
179
193
|
|
|
180
194
|
Args:
|
|
181
|
-
----
|
|
182
195
|
img_path: path to the image
|
|
183
196
|
geoms: a array of polygons of shape (N, 4, 2) or of straight boxes of shape (N, 4)
|
|
184
197
|
|
|
185
198
|
Returns:
|
|
186
|
-
-------
|
|
187
199
|
a list of cropped images
|
|
188
200
|
"""
|
|
189
201
|
with Image.open(img_path) as pil_img:
|
|
@@ -196,21 +208,19 @@ def crop_bboxes_from_image(img_path: Union[str, Path], geoms: np.ndarray) -> Lis
|
|
|
196
208
|
raise ValueError("Invalid geometry format")
|
|
197
209
|
|
|
198
210
|
|
|
199
|
-
def pre_transform_multiclass(img, target:
|
|
211
|
+
def pre_transform_multiclass(img, target: tuple[np.ndarray, list]) -> tuple[np.ndarray, dict[str, list]]:
|
|
200
212
|
"""Converts multiclass target to relative coordinates.
|
|
201
213
|
|
|
202
214
|
Args:
|
|
203
|
-
----
|
|
204
215
|
img: Image
|
|
205
216
|
target: tuple of target polygons and their classes names
|
|
206
217
|
|
|
207
218
|
Returns:
|
|
208
|
-
-------
|
|
209
219
|
Image and dictionary of boxes, with class names as keys
|
|
210
220
|
"""
|
|
211
221
|
boxes = convert_to_relative_coords(target[0], get_img_shape(img))
|
|
212
222
|
boxes_classes = target[1]
|
|
213
|
-
boxes_dict:
|
|
223
|
+
boxes_dict: dict = {k: [] for k in sorted(set(boxes_classes))}
|
|
214
224
|
for k, poly in zip(boxes_classes, boxes):
|
|
215
225
|
boxes_dict[k].append(poly)
|
|
216
226
|
boxes_dict = {k: np.stack(v, axis=0) for k, v in boxes_dict.items()}
|
doctr/datasets/vocabs.py
CHANGED
|
@@ -1,15 +1,14 @@
|
|
|
1
|
-
# Copyright (C) 2021-
|
|
1
|
+
# Copyright (C) 2021-2025, Mindee.
|
|
2
2
|
|
|
3
3
|
# This program is licensed under the Apache License 2.0.
|
|
4
4
|
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
|
|
5
5
|
|
|
6
6
|
import string
|
|
7
|
-
from typing import Dict
|
|
8
7
|
|
|
9
8
|
__all__ = ["VOCABS"]
|
|
10
9
|
|
|
11
10
|
|
|
12
|
-
VOCABS:
|
|
11
|
+
VOCABS: dict[str, str] = {
|
|
13
12
|
"digits": string.digits,
|
|
14
13
|
"ascii_letters": string.ascii_letters,
|
|
15
14
|
"punctuation": string.punctuation,
|
|
@@ -20,11 +19,16 @@ VOCABS: Dict[str, str] = {
|
|
|
20
19
|
"arabic_digits": "٠١٢٣٤٥٦٧٨٩",
|
|
21
20
|
"arabic_diacritics": "ًٌٍَُِّْ",
|
|
22
21
|
"arabic_punctuation": "؟؛«»—",
|
|
23
|
-
"hindi_letters": "
|
|
22
|
+
"hindi_letters": "अआइईउऊऋॠऌॡएऐओऔंःकखगघङचछजझञटठडढणतथदधनपफबभमयरलवशषसह",
|
|
24
23
|
"hindi_digits": "०१२३४५६७८९",
|
|
25
|
-
"hindi_punctuation": "
|
|
24
|
+
"hindi_punctuation": "।,?!:्ॐ॰॥",
|
|
25
|
+
"gujarati_vowels": "અઆઇઈઉઊઋએઐઓ",
|
|
26
|
+
"gujarati_consonants": "ખગઘચછજઝઞટઠડઢણતથદધનપફબભમયરલવશસહળક્ષ",
|
|
27
|
+
"gujarati_digits": "૦૧૨૩૪૫૬૭૮૯",
|
|
28
|
+
"gujarati_punctuation": "૰ઽ◌ંઃ॥ૐ઼ઁ" + "૱",
|
|
26
29
|
"bangla_letters": "অআইঈউঊঋএঐওঔকখগঘঙচছজঝঞটঠডঢণতথদধনপফবভমযরলশষসহ়ঽািীুূৃেৈোৌ্ৎংঃঁ",
|
|
27
30
|
"bangla_digits": "০১২৩৪৫৬৭৮৯",
|
|
31
|
+
"generic_cyrillic_letters": "абвгдежзийклмнопрстуфхцчшщьюяАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЬЮЯ",
|
|
28
32
|
}
|
|
29
33
|
|
|
30
34
|
VOCABS["latin"] = VOCABS["digits"] + VOCABS["ascii_letters"] + VOCABS["punctuation"]
|
|
@@ -53,12 +57,22 @@ VOCABS["finnish"] = VOCABS["english"] + "äöÄÖ"
|
|
|
53
57
|
VOCABS["swedish"] = VOCABS["english"] + "åäöÅÄÖ"
|
|
54
58
|
VOCABS["vietnamese"] = (
|
|
55
59
|
VOCABS["english"]
|
|
56
|
-
+ "
|
|
57
|
-
+ "
|
|
60
|
+
+ "áàảạãăắằẳẵặâấầẩẫậđéèẻẽẹêếềểễệóòỏõọôốồổộỗơớờởợỡúùủũụưứừửữựíìỉĩịýỳỷỹỵ"
|
|
61
|
+
+ "ÁÀẢẠÃĂẮẰẲẴẶÂẤẦẨẪẬĐÉÈẺẼẸÊẾỀỂỄỆÓÒỎÕỌÔỐỒỔỘỖƠỚỜỞỢỠÚÙỦŨỤƯỨỪỬỮỰÍÌỈĨỊÝỲỶỸỴ"
|
|
58
62
|
)
|
|
59
63
|
VOCABS["hebrew"] = VOCABS["english"] + "אבגדהוזחטיכלמנסעפצקרשת" + "₪"
|
|
60
64
|
VOCABS["hindi"] = VOCABS["hindi_letters"] + VOCABS["hindi_digits"] + VOCABS["hindi_punctuation"]
|
|
65
|
+
VOCABS["gujarati"] = (
|
|
66
|
+
VOCABS["gujarati_vowels"]
|
|
67
|
+
+ VOCABS["gujarati_consonants"]
|
|
68
|
+
+ VOCABS["gujarati_digits"]
|
|
69
|
+
+ VOCABS["gujarati_punctuation"]
|
|
70
|
+
+ VOCABS["punctuation"]
|
|
71
|
+
)
|
|
61
72
|
VOCABS["bangla"] = VOCABS["bangla_letters"] + VOCABS["bangla_digits"]
|
|
73
|
+
VOCABS["ukrainian"] = (
|
|
74
|
+
VOCABS["generic_cyrillic_letters"] + VOCABS["digits"] + VOCABS["punctuation"] + VOCABS["currency"] + "ґіїєҐІЇЄ₴"
|
|
75
|
+
)
|
|
62
76
|
VOCABS["multilingual"] = "".join(
|
|
63
77
|
dict.fromkeys(
|
|
64
78
|
VOCABS["french"]
|
doctr/datasets/wildreceipt.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright (C) 2021-
|
|
1
|
+
# Copyright (C) 2021-2025, Mindee.
|
|
2
2
|
|
|
3
3
|
# This program is licensed under the Apache License 2.0.
|
|
4
4
|
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
|
|
@@ -6,9 +6,10 @@
|
|
|
6
6
|
import json
|
|
7
7
|
import os
|
|
8
8
|
from pathlib import Path
|
|
9
|
-
from typing import Any
|
|
9
|
+
from typing import Any
|
|
10
10
|
|
|
11
11
|
import numpy as np
|
|
12
|
+
from tqdm import tqdm
|
|
12
13
|
|
|
13
14
|
from .datasets import AbstractDataset
|
|
14
15
|
from .utils import convert_target_to_relative, crop_bboxes_from_image
|
|
@@ -17,9 +18,10 @@ __all__ = ["WILDRECEIPT"]
|
|
|
17
18
|
|
|
18
19
|
|
|
19
20
|
class WILDRECEIPT(AbstractDataset):
|
|
20
|
-
"""
|
|
21
|
-
|
|
22
|
-
|
|
21
|
+
"""
|
|
22
|
+
WildReceipt dataset from `"Spatial Dual-Modality Graph Reasoning for Key Information Extraction"
|
|
23
|
+
<https://arxiv.org/abs/2103.14470v1>`_ |
|
|
24
|
+
`"repository" <https://download.openmmlab.com/mmocr/data/wildreceipt.tar>`_.
|
|
23
25
|
|
|
24
26
|
.. image:: https://doctr-static.mindee.com/models?id=v0.7.0/wildreceipt-dataset.jpg&src=0
|
|
25
27
|
:align: center
|
|
@@ -34,12 +36,12 @@ class WILDRECEIPT(AbstractDataset):
|
|
|
34
36
|
>>> img, target = test_set[0]
|
|
35
37
|
|
|
36
38
|
Args:
|
|
37
|
-
----
|
|
38
39
|
img_folder: folder with all the images of the dataset
|
|
39
40
|
label_path: path to the annotations file of the dataset
|
|
40
41
|
train: whether the subset should be the training one
|
|
41
42
|
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
|
|
42
43
|
recognition_task: whether the dataset should be used for recognition task
|
|
44
|
+
detection_task: whether the dataset should be used for detection task
|
|
43
45
|
**kwargs: keyword arguments from `AbstractDataset`.
|
|
44
46
|
"""
|
|
45
47
|
|
|
@@ -50,11 +52,19 @@ class WILDRECEIPT(AbstractDataset):
|
|
|
50
52
|
train: bool = True,
|
|
51
53
|
use_polygons: bool = False,
|
|
52
54
|
recognition_task: bool = False,
|
|
55
|
+
detection_task: bool = False,
|
|
53
56
|
**kwargs: Any,
|
|
54
57
|
) -> None:
|
|
55
58
|
super().__init__(
|
|
56
59
|
img_folder, pre_transforms=convert_target_to_relative if not recognition_task else None, **kwargs
|
|
57
60
|
)
|
|
61
|
+
# Task check
|
|
62
|
+
if recognition_task and detection_task:
|
|
63
|
+
raise ValueError(
|
|
64
|
+
"`recognition_task` and `detection_task` cannot be set to True simultaneously. "
|
|
65
|
+
+ "To get the whole dataset with boxes and labels leave both parameters to False."
|
|
66
|
+
)
|
|
67
|
+
|
|
58
68
|
# File existence check
|
|
59
69
|
if not os.path.exists(label_path) or not os.path.exists(img_folder):
|
|
60
70
|
raise FileNotFoundError(f"unable to locate {label_path if not os.path.exists(label_path) else img_folder}")
|
|
@@ -62,15 +72,18 @@ class WILDRECEIPT(AbstractDataset):
|
|
|
62
72
|
tmp_root = img_folder
|
|
63
73
|
self.train = train
|
|
64
74
|
np_dtype = np.float32
|
|
65
|
-
self.data:
|
|
75
|
+
self.data: list[tuple[str | Path | np.ndarray, str | dict[str, Any] | np.ndarray]] = []
|
|
66
76
|
|
|
67
77
|
with open(label_path, "r") as file:
|
|
68
78
|
data = file.read()
|
|
69
79
|
# Split the text file into separate JSON strings
|
|
70
80
|
json_strings = data.strip().split("\n")
|
|
71
|
-
box:
|
|
72
|
-
|
|
73
|
-
for json_string in
|
|
81
|
+
box: list[float] | np.ndarray
|
|
82
|
+
|
|
83
|
+
for json_string in tqdm(
|
|
84
|
+
iterable=json_strings, desc="Preparing and Loading WILDRECEIPT", total=len(json_strings)
|
|
85
|
+
):
|
|
86
|
+
_targets = []
|
|
74
87
|
json_data = json.loads(json_string)
|
|
75
88
|
img_path = json_data["file_name"]
|
|
76
89
|
annotations = json_data["annotations"]
|
|
@@ -100,6 +113,8 @@ class WILDRECEIPT(AbstractDataset):
|
|
|
100
113
|
for crop, label in zip(crops, list(text_targets)):
|
|
101
114
|
if label and " " not in label:
|
|
102
115
|
self.data.append((crop, label))
|
|
116
|
+
elif detection_task:
|
|
117
|
+
self.data.append((img_path, np.asarray(box_targets, dtype=int).clip(min=0)))
|
|
103
118
|
else:
|
|
104
119
|
self.data.append((
|
|
105
120
|
img_path,
|
doctr/file_utils.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright (C) 2021-
|
|
1
|
+
# Copyright (C) 2021-2025, Mindee.
|
|
2
2
|
|
|
3
3
|
# This program is licensed under the Apache License 2.0.
|
|
4
4
|
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
|
|
@@ -9,7 +9,6 @@ import importlib.metadata
|
|
|
9
9
|
import importlib.util
|
|
10
10
|
import logging
|
|
11
11
|
import os
|
|
12
|
-
from typing import Optional
|
|
13
12
|
|
|
14
13
|
CLASS_NAME: str = "words"
|
|
15
14
|
|
|
@@ -35,6 +34,20 @@ else: # pragma: no cover
|
|
|
35
34
|
logging.info("Disabling PyTorch because USE_TF is set")
|
|
36
35
|
_torch_available = False
|
|
37
36
|
|
|
37
|
+
# Compatibility fix to make sure tensorflow.keras stays at Keras 2
|
|
38
|
+
if "TF_USE_LEGACY_KERAS" not in os.environ:
|
|
39
|
+
os.environ["TF_USE_LEGACY_KERAS"] = "1"
|
|
40
|
+
|
|
41
|
+
elif os.environ["TF_USE_LEGACY_KERAS"] != "1":
|
|
42
|
+
raise ValueError(
|
|
43
|
+
"docTR is only compatible with Keras 2, but you have explicitly set `TF_USE_LEGACY_KERAS` to `0`. "
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def ensure_keras_v2() -> None: # pragma: no cover
|
|
48
|
+
if not os.environ.get("TF_USE_LEGACY_KERAS") == "1":
|
|
49
|
+
os.environ["TF_USE_LEGACY_KERAS"] = "1"
|
|
50
|
+
|
|
38
51
|
|
|
39
52
|
if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
|
|
40
53
|
_tf_available = importlib.util.find_spec("tensorflow") is not None
|
|
@@ -65,6 +78,8 @@ if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VA
|
|
|
65
78
|
_tf_available = False
|
|
66
79
|
else:
|
|
67
80
|
logging.info(f"TensorFlow version {_tf_version} available.")
|
|
81
|
+
ensure_keras_v2()
|
|
82
|
+
|
|
68
83
|
else: # pragma: no cover
|
|
69
84
|
logging.info("Disabling Tensorflow because USE_TORCH is set")
|
|
70
85
|
_tf_available = False
|
|
@@ -77,12 +92,11 @@ if not _torch_available and not _tf_available: # pragma: no cover
|
|
|
77
92
|
)
|
|
78
93
|
|
|
79
94
|
|
|
80
|
-
def requires_package(name: str, extra_message:
|
|
95
|
+
def requires_package(name: str, extra_message: str | None = None) -> None: # pragma: no cover
|
|
81
96
|
"""
|
|
82
97
|
package requirement helper
|
|
83
98
|
|
|
84
99
|
Args:
|
|
85
|
-
----
|
|
86
100
|
name: name of the package
|
|
87
101
|
extra_message: additional message to display if the package is not found
|
|
88
102
|
"""
|