python-doctr 0.9.0__py3-none-any.whl → 0.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- doctr/datasets/cord.py +10 -1
- doctr/datasets/funsd.py +11 -1
- doctr/datasets/ic03.py +11 -1
- doctr/datasets/ic13.py +10 -1
- doctr/datasets/iiit5k.py +26 -16
- doctr/datasets/imgur5k.py +10 -1
- doctr/datasets/sroie.py +11 -1
- doctr/datasets/svhn.py +11 -1
- doctr/datasets/svt.py +11 -1
- doctr/datasets/synthtext.py +11 -1
- doctr/datasets/utils.py +7 -2
- doctr/datasets/vocabs.py +6 -2
- doctr/datasets/wildreceipt.py +12 -1
- doctr/file_utils.py +19 -0
- doctr/io/elements.py +12 -4
- doctr/models/builder.py +2 -2
- doctr/models/classification/magc_resnet/tensorflow.py +13 -6
- doctr/models/classification/mobilenet/pytorch.py +2 -0
- doctr/models/classification/mobilenet/tensorflow.py +14 -8
- doctr/models/classification/predictor/pytorch.py +11 -7
- doctr/models/classification/predictor/tensorflow.py +10 -6
- doctr/models/classification/resnet/tensorflow.py +21 -8
- doctr/models/classification/textnet/tensorflow.py +11 -5
- doctr/models/classification/vgg/tensorflow.py +9 -3
- doctr/models/classification/vit/tensorflow.py +10 -4
- doctr/models/classification/zoo.py +22 -10
- doctr/models/detection/differentiable_binarization/tensorflow.py +34 -12
- doctr/models/detection/fast/tensorflow.py +14 -11
- doctr/models/detection/linknet/tensorflow.py +23 -11
- doctr/models/detection/predictor/tensorflow.py +2 -2
- doctr/models/factory/hub.py +5 -6
- doctr/models/kie_predictor/base.py +4 -0
- doctr/models/kie_predictor/pytorch.py +4 -0
- doctr/models/kie_predictor/tensorflow.py +8 -1
- doctr/models/modules/transformer/tensorflow.py +0 -2
- doctr/models/modules/vision_transformer/pytorch.py +1 -1
- doctr/models/modules/vision_transformer/tensorflow.py +1 -1
- doctr/models/predictor/base.py +24 -12
- doctr/models/predictor/pytorch.py +4 -0
- doctr/models/predictor/tensorflow.py +8 -1
- doctr/models/preprocessor/tensorflow.py +1 -1
- doctr/models/recognition/crnn/tensorflow.py +8 -6
- doctr/models/recognition/master/tensorflow.py +9 -4
- doctr/models/recognition/parseq/tensorflow.py +10 -8
- doctr/models/recognition/sar/tensorflow.py +7 -3
- doctr/models/recognition/vitstr/tensorflow.py +9 -4
- doctr/models/utils/pytorch.py +1 -1
- doctr/models/utils/tensorflow.py +15 -15
- doctr/transforms/functional/pytorch.py +1 -1
- doctr/transforms/modules/pytorch.py +7 -6
- doctr/transforms/modules/tensorflow.py +15 -12
- doctr/utils/geometry.py +106 -19
- doctr/utils/metrics.py +1 -1
- doctr/utils/reconstitution.py +151 -65
- doctr/version.py +1 -1
- {python_doctr-0.9.0.dist-info → python_doctr-0.10.0.dist-info}/METADATA +11 -11
- {python_doctr-0.9.0.dist-info → python_doctr-0.10.0.dist-info}/RECORD +61 -61
- {python_doctr-0.9.0.dist-info → python_doctr-0.10.0.dist-info}/WHEEL +1 -1
- {python_doctr-0.9.0.dist-info → python_doctr-0.10.0.dist-info}/LICENSE +0 -0
- {python_doctr-0.9.0.dist-info → python_doctr-0.10.0.dist-info}/top_level.txt +0 -0
- {python_doctr-0.9.0.dist-info → python_doctr-0.10.0.dist-info}/zip-safe +0 -0
doctr/datasets/cord.py
CHANGED
|
@@ -33,6 +33,7 @@ class CORD(VisionDataset):
|
|
|
33
33
|
train: whether the subset should be the training one
|
|
34
34
|
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
|
|
35
35
|
recognition_task: whether the dataset should be used for recognition task
|
|
36
|
+
detection_task: whether the dataset should be used for detection task
|
|
36
37
|
**kwargs: keyword arguments from `VisionDataset`.
|
|
37
38
|
"""
|
|
38
39
|
|
|
@@ -53,6 +54,7 @@ class CORD(VisionDataset):
|
|
|
53
54
|
train: bool = True,
|
|
54
55
|
use_polygons: bool = False,
|
|
55
56
|
recognition_task: bool = False,
|
|
57
|
+
detection_task: bool = False,
|
|
56
58
|
**kwargs: Any,
|
|
57
59
|
) -> None:
|
|
58
60
|
url, sha256, name = self.TRAIN if train else self.TEST
|
|
@@ -64,10 +66,15 @@ class CORD(VisionDataset):
|
|
|
64
66
|
pre_transforms=convert_target_to_relative if not recognition_task else None,
|
|
65
67
|
**kwargs,
|
|
66
68
|
)
|
|
69
|
+
if recognition_task and detection_task:
|
|
70
|
+
raise ValueError(
|
|
71
|
+
"`recognition_task` and `detection_task` cannot be set to True simultaneously. "
|
|
72
|
+
+ "To get the whole dataset with boxes and labels leave both parameters to False."
|
|
73
|
+
)
|
|
67
74
|
|
|
68
75
|
# List images
|
|
69
76
|
tmp_root = os.path.join(self.root, "image")
|
|
70
|
-
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
|
|
77
|
+
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any], np.ndarray]]] = []
|
|
71
78
|
self.train = train
|
|
72
79
|
np_dtype = np.float32
|
|
73
80
|
for img_path in tqdm(iterable=os.listdir(tmp_root), desc="Unpacking CORD", total=len(os.listdir(tmp_root))):
|
|
@@ -109,6 +116,8 @@ class CORD(VisionDataset):
|
|
|
109
116
|
)
|
|
110
117
|
for crop, label in zip(crops, list(text_targets)):
|
|
111
118
|
self.data.append((crop, label))
|
|
119
|
+
elif detection_task:
|
|
120
|
+
self.data.append((img_path, np.asarray(box_targets, dtype=int).clip(min=0)))
|
|
112
121
|
else:
|
|
113
122
|
self.data.append((
|
|
114
123
|
img_path,
|
doctr/datasets/funsd.py
CHANGED
|
@@ -33,6 +33,7 @@ class FUNSD(VisionDataset):
|
|
|
33
33
|
train: whether the subset should be the training one
|
|
34
34
|
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
|
|
35
35
|
recognition_task: whether the dataset should be used for recognition task
|
|
36
|
+
detection_task: whether the dataset should be used for detection task
|
|
36
37
|
**kwargs: keyword arguments from `VisionDataset`.
|
|
37
38
|
"""
|
|
38
39
|
|
|
@@ -45,6 +46,7 @@ class FUNSD(VisionDataset):
|
|
|
45
46
|
train: bool = True,
|
|
46
47
|
use_polygons: bool = False,
|
|
47
48
|
recognition_task: bool = False,
|
|
49
|
+
detection_task: bool = False,
|
|
48
50
|
**kwargs: Any,
|
|
49
51
|
) -> None:
|
|
50
52
|
super().__init__(
|
|
@@ -55,6 +57,12 @@ class FUNSD(VisionDataset):
|
|
|
55
57
|
pre_transforms=convert_target_to_relative if not recognition_task else None,
|
|
56
58
|
**kwargs,
|
|
57
59
|
)
|
|
60
|
+
if recognition_task and detection_task:
|
|
61
|
+
raise ValueError(
|
|
62
|
+
"`recognition_task` and `detection_task` cannot be set to True simultaneously. "
|
|
63
|
+
+ "To get the whole dataset with boxes and labels leave both parameters to False."
|
|
64
|
+
)
|
|
65
|
+
|
|
58
66
|
self.train = train
|
|
59
67
|
np_dtype = np.float32
|
|
60
68
|
|
|
@@ -63,7 +71,7 @@ class FUNSD(VisionDataset):
|
|
|
63
71
|
|
|
64
72
|
# # List images
|
|
65
73
|
tmp_root = os.path.join(self.root, subfolder, "images")
|
|
66
|
-
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
|
|
74
|
+
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any], np.ndarray]]] = []
|
|
67
75
|
for img_path in tqdm(iterable=os.listdir(tmp_root), desc="Unpacking FUNSD", total=len(os.listdir(tmp_root))):
|
|
68
76
|
# File existence check
|
|
69
77
|
if not os.path.exists(os.path.join(tmp_root, img_path)):
|
|
@@ -100,6 +108,8 @@ class FUNSD(VisionDataset):
|
|
|
100
108
|
# filter labels with unknown characters
|
|
101
109
|
if not any(char in label for char in ["☑", "☐", "\uf703", "\uf702"]):
|
|
102
110
|
self.data.append((crop, label))
|
|
111
|
+
elif detection_task:
|
|
112
|
+
self.data.append((img_path, np.asarray(box_targets, dtype=np_dtype)))
|
|
103
113
|
else:
|
|
104
114
|
self.data.append((
|
|
105
115
|
img_path,
|
doctr/datasets/ic03.py
CHANGED
|
@@ -32,6 +32,7 @@ class IC03(VisionDataset):
|
|
|
32
32
|
train: whether the subset should be the training one
|
|
33
33
|
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
|
|
34
34
|
recognition_task: whether the dataset should be used for recognition task
|
|
35
|
+
detection_task: whether the dataset should be used for detection task
|
|
35
36
|
**kwargs: keyword arguments from `VisionDataset`.
|
|
36
37
|
"""
|
|
37
38
|
|
|
@@ -51,6 +52,7 @@ class IC03(VisionDataset):
|
|
|
51
52
|
train: bool = True,
|
|
52
53
|
use_polygons: bool = False,
|
|
53
54
|
recognition_task: bool = False,
|
|
55
|
+
detection_task: bool = False,
|
|
54
56
|
**kwargs: Any,
|
|
55
57
|
) -> None:
|
|
56
58
|
url, sha256, file_name = self.TRAIN if train else self.TEST
|
|
@@ -62,8 +64,14 @@ class IC03(VisionDataset):
|
|
|
62
64
|
pre_transforms=convert_target_to_relative if not recognition_task else None,
|
|
63
65
|
**kwargs,
|
|
64
66
|
)
|
|
67
|
+
if recognition_task and detection_task:
|
|
68
|
+
raise ValueError(
|
|
69
|
+
"`recognition_task` and `detection_task` cannot be set to True simultaneously. "
|
|
70
|
+
+ "To get the whole dataset with boxes and labels leave both parameters to False."
|
|
71
|
+
)
|
|
72
|
+
|
|
65
73
|
self.train = train
|
|
66
|
-
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
|
|
74
|
+
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any], np.ndarray]]] = []
|
|
67
75
|
np_dtype = np.float32
|
|
68
76
|
|
|
69
77
|
# Load xml data
|
|
@@ -117,6 +125,8 @@ class IC03(VisionDataset):
|
|
|
117
125
|
for crop, label in zip(crops, labels):
|
|
118
126
|
if crop.shape[0] > 0 and crop.shape[1] > 0 and len(label) > 0:
|
|
119
127
|
self.data.append((crop, label))
|
|
128
|
+
elif detection_task:
|
|
129
|
+
self.data.append((name.text, boxes))
|
|
120
130
|
else:
|
|
121
131
|
self.data.append((name.text, dict(boxes=boxes, labels=labels)))
|
|
122
132
|
|
doctr/datasets/ic13.py
CHANGED
|
@@ -38,6 +38,7 @@ class IC13(AbstractDataset):
|
|
|
38
38
|
label_folder: folder with all annotation files for the images
|
|
39
39
|
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
|
|
40
40
|
recognition_task: whether the dataset should be used for recognition task
|
|
41
|
+
detection_task: whether the dataset should be used for detection task
|
|
41
42
|
**kwargs: keyword arguments from `AbstractDataset`.
|
|
42
43
|
"""
|
|
43
44
|
|
|
@@ -47,11 +48,17 @@ class IC13(AbstractDataset):
|
|
|
47
48
|
label_folder: str,
|
|
48
49
|
use_polygons: bool = False,
|
|
49
50
|
recognition_task: bool = False,
|
|
51
|
+
detection_task: bool = False,
|
|
50
52
|
**kwargs: Any,
|
|
51
53
|
) -> None:
|
|
52
54
|
super().__init__(
|
|
53
55
|
img_folder, pre_transforms=convert_target_to_relative if not recognition_task else None, **kwargs
|
|
54
56
|
)
|
|
57
|
+
if recognition_task and detection_task:
|
|
58
|
+
raise ValueError(
|
|
59
|
+
"`recognition_task` and `detection_task` cannot be set to True simultaneously. "
|
|
60
|
+
+ "To get the whole dataset with boxes and labels leave both parameters to False."
|
|
61
|
+
)
|
|
55
62
|
|
|
56
63
|
# File existence check
|
|
57
64
|
if not os.path.exists(label_folder) or not os.path.exists(img_folder):
|
|
@@ -59,7 +66,7 @@ class IC13(AbstractDataset):
|
|
|
59
66
|
f"unable to locate {label_folder if not os.path.exists(label_folder) else img_folder}"
|
|
60
67
|
)
|
|
61
68
|
|
|
62
|
-
self.data: List[Tuple[Union[Path, np.ndarray], Union[str, Dict[str, Any]]]] = []
|
|
69
|
+
self.data: List[Tuple[Union[Path, np.ndarray], Union[str, Dict[str, Any], np.ndarray]]] = []
|
|
63
70
|
np_dtype = np.float32
|
|
64
71
|
|
|
65
72
|
img_names = os.listdir(img_folder)
|
|
@@ -95,5 +102,7 @@ class IC13(AbstractDataset):
|
|
|
95
102
|
crops = crop_bboxes_from_image(img_path=img_path, geoms=box_targets)
|
|
96
103
|
for crop, label in zip(crops, labels):
|
|
97
104
|
self.data.append((crop, label))
|
|
105
|
+
elif detection_task:
|
|
106
|
+
self.data.append((img_path, box_targets))
|
|
98
107
|
else:
|
|
99
108
|
self.data.append((img_path, dict(boxes=box_targets, labels=labels)))
|
doctr/datasets/iiit5k.py
CHANGED
|
@@ -34,6 +34,7 @@ class IIIT5K(VisionDataset):
|
|
|
34
34
|
train: whether the subset should be the training one
|
|
35
35
|
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
|
|
36
36
|
recognition_task: whether the dataset should be used for recognition task
|
|
37
|
+
detection_task: whether the dataset should be used for detection task
|
|
37
38
|
**kwargs: keyword arguments from `VisionDataset`.
|
|
38
39
|
"""
|
|
39
40
|
|
|
@@ -45,6 +46,7 @@ class IIIT5K(VisionDataset):
|
|
|
45
46
|
train: bool = True,
|
|
46
47
|
use_polygons: bool = False,
|
|
47
48
|
recognition_task: bool = False,
|
|
49
|
+
detection_task: bool = False,
|
|
48
50
|
**kwargs: Any,
|
|
49
51
|
) -> None:
|
|
50
52
|
super().__init__(
|
|
@@ -55,6 +57,12 @@ class IIIT5K(VisionDataset):
|
|
|
55
57
|
pre_transforms=convert_target_to_relative if not recognition_task else None,
|
|
56
58
|
**kwargs,
|
|
57
59
|
)
|
|
60
|
+
if recognition_task and detection_task:
|
|
61
|
+
raise ValueError(
|
|
62
|
+
"`recognition_task` and `detection_task` cannot be set to True simultaneously. "
|
|
63
|
+
+ "To get the whole dataset with boxes and labels leave both parameters to False."
|
|
64
|
+
)
|
|
65
|
+
|
|
58
66
|
self.train = train
|
|
59
67
|
|
|
60
68
|
# Load mat data
|
|
@@ -62,7 +70,7 @@ class IIIT5K(VisionDataset):
|
|
|
62
70
|
mat_file = "trainCharBound" if self.train else "testCharBound"
|
|
63
71
|
mat_data = sio.loadmat(os.path.join(tmp_root, f"{mat_file}.mat"))[mat_file][0]
|
|
64
72
|
|
|
65
|
-
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
|
|
73
|
+
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any], np.ndarray]]] = []
|
|
66
74
|
np_dtype = np.float32
|
|
67
75
|
|
|
68
76
|
for img_path, label, box_targets in tqdm(iterable=mat_data, desc="Unpacking IIIT5K", total=len(mat_data)):
|
|
@@ -73,24 +81,26 @@ class IIIT5K(VisionDataset):
|
|
|
73
81
|
if not os.path.exists(os.path.join(tmp_root, _raw_path)):
|
|
74
82
|
raise FileNotFoundError(f"unable to locate {os.path.join(tmp_root, _raw_path)}")
|
|
75
83
|
|
|
84
|
+
if use_polygons:
|
|
85
|
+
# (x, y) coordinates of top left, top right, bottom right, bottom left corners
|
|
86
|
+
box_targets = [
|
|
87
|
+
[
|
|
88
|
+
[box[0], box[1]],
|
|
89
|
+
[box[0] + box[2], box[1]],
|
|
90
|
+
[box[0] + box[2], box[1] + box[3]],
|
|
91
|
+
[box[0], box[1] + box[3]],
|
|
92
|
+
]
|
|
93
|
+
for box in box_targets
|
|
94
|
+
]
|
|
95
|
+
else:
|
|
96
|
+
# xmin, ymin, xmax, ymax
|
|
97
|
+
box_targets = [[box[0], box[1], box[0] + box[2], box[1] + box[3]] for box in box_targets]
|
|
98
|
+
|
|
76
99
|
if recognition_task:
|
|
77
100
|
self.data.append((_raw_path, _raw_label))
|
|
101
|
+
elif detection_task:
|
|
102
|
+
self.data.append((_raw_path, np.asarray(box_targets, dtype=np_dtype)))
|
|
78
103
|
else:
|
|
79
|
-
if use_polygons:
|
|
80
|
-
# (x, y) coordinates of top left, top right, bottom right, bottom left corners
|
|
81
|
-
box_targets = [
|
|
82
|
-
[
|
|
83
|
-
[box[0], box[1]],
|
|
84
|
-
[box[0] + box[2], box[1]],
|
|
85
|
-
[box[0] + box[2], box[1] + box[3]],
|
|
86
|
-
[box[0], box[1] + box[3]],
|
|
87
|
-
]
|
|
88
|
-
for box in box_targets
|
|
89
|
-
]
|
|
90
|
-
else:
|
|
91
|
-
# xmin, ymin, xmax, ymax
|
|
92
|
-
box_targets = [[box[0], box[1], box[0] + box[2], box[1] + box[3]] for box in box_targets]
|
|
93
|
-
|
|
94
104
|
# label are casted to list where each char corresponds to the character's bounding box
|
|
95
105
|
self.data.append((
|
|
96
106
|
_raw_path,
|
doctr/datasets/imgur5k.py
CHANGED
|
@@ -46,6 +46,7 @@ class IMGUR5K(AbstractDataset):
|
|
|
46
46
|
train: whether the subset should be the training one
|
|
47
47
|
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
|
|
48
48
|
recognition_task: whether the dataset should be used for recognition task
|
|
49
|
+
detection_task: whether the dataset should be used for detection task
|
|
49
50
|
**kwargs: keyword arguments from `AbstractDataset`.
|
|
50
51
|
"""
|
|
51
52
|
|
|
@@ -56,17 +57,23 @@ class IMGUR5K(AbstractDataset):
|
|
|
56
57
|
train: bool = True,
|
|
57
58
|
use_polygons: bool = False,
|
|
58
59
|
recognition_task: bool = False,
|
|
60
|
+
detection_task: bool = False,
|
|
59
61
|
**kwargs: Any,
|
|
60
62
|
) -> None:
|
|
61
63
|
super().__init__(
|
|
62
64
|
img_folder, pre_transforms=convert_target_to_relative if not recognition_task else None, **kwargs
|
|
63
65
|
)
|
|
66
|
+
if recognition_task and detection_task:
|
|
67
|
+
raise ValueError(
|
|
68
|
+
"`recognition_task` and `detection_task` cannot be set to True simultaneously. "
|
|
69
|
+
+ "To get the whole dataset with boxes and labels leave both parameters to False."
|
|
70
|
+
)
|
|
64
71
|
|
|
65
72
|
# File existence check
|
|
66
73
|
if not os.path.exists(label_path) or not os.path.exists(img_folder):
|
|
67
74
|
raise FileNotFoundError(f"unable to locate {label_path if not os.path.exists(label_path) else img_folder}")
|
|
68
75
|
|
|
69
|
-
self.data: List[Tuple[Union[str, Path, np.ndarray], Union[str, Dict[str, Any]]]] = []
|
|
76
|
+
self.data: List[Tuple[Union[str, Path, np.ndarray], Union[str, Dict[str, Any], np.ndarray]]] = []
|
|
70
77
|
self.train = train
|
|
71
78
|
np_dtype = np.float32
|
|
72
79
|
|
|
@@ -132,6 +139,8 @@ class IMGUR5K(AbstractDataset):
|
|
|
132
139
|
tmp_img = Image.fromarray(crop)
|
|
133
140
|
tmp_img.save(os.path.join(reco_folder_path, f"{reco_images_counter}.png"))
|
|
134
141
|
reco_images_counter += 1
|
|
142
|
+
elif detection_task:
|
|
143
|
+
self.data.append((img_path, np.asarray(box_targets, dtype=np_dtype)))
|
|
135
144
|
else:
|
|
136
145
|
self.data.append((img_path, dict(boxes=np.asarray(box_targets, dtype=np_dtype), labels=labels)))
|
|
137
146
|
|
doctr/datasets/sroie.py
CHANGED
|
@@ -33,6 +33,7 @@ class SROIE(VisionDataset):
|
|
|
33
33
|
train: whether the subset should be the training one
|
|
34
34
|
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
|
|
35
35
|
recognition_task: whether the dataset should be used for recognition task
|
|
36
|
+
detection_task: whether the dataset should be used for detection task
|
|
36
37
|
**kwargs: keyword arguments from `VisionDataset`.
|
|
37
38
|
"""
|
|
38
39
|
|
|
@@ -52,6 +53,7 @@ class SROIE(VisionDataset):
|
|
|
52
53
|
train: bool = True,
|
|
53
54
|
use_polygons: bool = False,
|
|
54
55
|
recognition_task: bool = False,
|
|
56
|
+
detection_task: bool = False,
|
|
55
57
|
**kwargs: Any,
|
|
56
58
|
) -> None:
|
|
57
59
|
url, sha256, name = self.TRAIN if train else self.TEST
|
|
@@ -63,10 +65,16 @@ class SROIE(VisionDataset):
|
|
|
63
65
|
pre_transforms=convert_target_to_relative if not recognition_task else None,
|
|
64
66
|
**kwargs,
|
|
65
67
|
)
|
|
68
|
+
if recognition_task and detection_task:
|
|
69
|
+
raise ValueError(
|
|
70
|
+
"`recognition_task` and `detection_task` cannot be set to True simultaneously. "
|
|
71
|
+
+ "To get the whole dataset with boxes and labels leave both parameters to False."
|
|
72
|
+
)
|
|
73
|
+
|
|
66
74
|
self.train = train
|
|
67
75
|
|
|
68
76
|
tmp_root = os.path.join(self.root, "images")
|
|
69
|
-
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
|
|
77
|
+
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any], np.ndarray]]] = []
|
|
70
78
|
np_dtype = np.float32
|
|
71
79
|
|
|
72
80
|
for img_path in tqdm(iterable=os.listdir(tmp_root), desc="Unpacking SROIE", total=len(os.listdir(tmp_root))):
|
|
@@ -94,6 +102,8 @@ class SROIE(VisionDataset):
|
|
|
94
102
|
for crop, label in zip(crops, labels):
|
|
95
103
|
if crop.shape[0] > 0 and crop.shape[1] > 0 and len(label) > 0:
|
|
96
104
|
self.data.append((crop, label))
|
|
105
|
+
elif detection_task:
|
|
106
|
+
self.data.append((img_path, coords))
|
|
97
107
|
else:
|
|
98
108
|
self.data.append((img_path, dict(boxes=coords, labels=labels)))
|
|
99
109
|
|
doctr/datasets/svhn.py
CHANGED
|
@@ -32,6 +32,7 @@ class SVHN(VisionDataset):
|
|
|
32
32
|
train: whether the subset should be the training one
|
|
33
33
|
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
|
|
34
34
|
recognition_task: whether the dataset should be used for recognition task
|
|
35
|
+
detection_task: whether the dataset should be used for detection task
|
|
35
36
|
**kwargs: keyword arguments from `VisionDataset`.
|
|
36
37
|
"""
|
|
37
38
|
|
|
@@ -52,6 +53,7 @@ class SVHN(VisionDataset):
|
|
|
52
53
|
train: bool = True,
|
|
53
54
|
use_polygons: bool = False,
|
|
54
55
|
recognition_task: bool = False,
|
|
56
|
+
detection_task: bool = False,
|
|
55
57
|
**kwargs: Any,
|
|
56
58
|
) -> None:
|
|
57
59
|
url, sha256, name = self.TRAIN if train else self.TEST
|
|
@@ -63,8 +65,14 @@ class SVHN(VisionDataset):
|
|
|
63
65
|
pre_transforms=convert_target_to_relative if not recognition_task else None,
|
|
64
66
|
**kwargs,
|
|
65
67
|
)
|
|
68
|
+
if recognition_task and detection_task:
|
|
69
|
+
raise ValueError(
|
|
70
|
+
"`recognition_task` and `detection_task` cannot be set to True simultaneously. "
|
|
71
|
+
+ "To get the whole dataset with boxes and labels leave both parameters to False."
|
|
72
|
+
)
|
|
73
|
+
|
|
66
74
|
self.train = train
|
|
67
|
-
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
|
|
75
|
+
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any], np.ndarray]]] = []
|
|
68
76
|
np_dtype = np.float32
|
|
69
77
|
|
|
70
78
|
tmp_root = os.path.join(self.root, "train" if train else "test")
|
|
@@ -122,6 +130,8 @@ class SVHN(VisionDataset):
|
|
|
122
130
|
for crop, label in zip(crops, label_targets):
|
|
123
131
|
if crop.shape[0] > 0 and crop.shape[1] > 0 and len(label) > 0:
|
|
124
132
|
self.data.append((crop, label))
|
|
133
|
+
elif detection_task:
|
|
134
|
+
self.data.append((img_name, box_targets))
|
|
125
135
|
else:
|
|
126
136
|
self.data.append((img_name, dict(boxes=box_targets, labels=label_targets)))
|
|
127
137
|
|
doctr/datasets/svt.py
CHANGED
|
@@ -32,6 +32,7 @@ class SVT(VisionDataset):
|
|
|
32
32
|
train: whether the subset should be the training one
|
|
33
33
|
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
|
|
34
34
|
recognition_task: whether the dataset should be used for recognition task
|
|
35
|
+
detection_task: whether the dataset should be used for detection task
|
|
35
36
|
**kwargs: keyword arguments from `VisionDataset`.
|
|
36
37
|
"""
|
|
37
38
|
|
|
@@ -43,6 +44,7 @@ class SVT(VisionDataset):
|
|
|
43
44
|
train: bool = True,
|
|
44
45
|
use_polygons: bool = False,
|
|
45
46
|
recognition_task: bool = False,
|
|
47
|
+
detection_task: bool = False,
|
|
46
48
|
**kwargs: Any,
|
|
47
49
|
) -> None:
|
|
48
50
|
super().__init__(
|
|
@@ -53,8 +55,14 @@ class SVT(VisionDataset):
|
|
|
53
55
|
pre_transforms=convert_target_to_relative if not recognition_task else None,
|
|
54
56
|
**kwargs,
|
|
55
57
|
)
|
|
58
|
+
if recognition_task and detection_task:
|
|
59
|
+
raise ValueError(
|
|
60
|
+
"`recognition_task` and `detection_task` cannot be set to True simultaneously. "
|
|
61
|
+
+ "To get the whole dataset with boxes and labels leave both parameters to False."
|
|
62
|
+
)
|
|
63
|
+
|
|
56
64
|
self.train = train
|
|
57
|
-
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
|
|
65
|
+
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any], np.ndarray]]] = []
|
|
58
66
|
np_dtype = np.float32
|
|
59
67
|
|
|
60
68
|
# Load xml data
|
|
@@ -108,6 +116,8 @@ class SVT(VisionDataset):
|
|
|
108
116
|
for crop, label in zip(crops, labels):
|
|
109
117
|
if crop.shape[0] > 0 and crop.shape[1] > 0 and len(label) > 0:
|
|
110
118
|
self.data.append((crop, label))
|
|
119
|
+
elif detection_task:
|
|
120
|
+
self.data.append((name.text, boxes))
|
|
111
121
|
else:
|
|
112
122
|
self.data.append((name.text, dict(boxes=boxes, labels=labels)))
|
|
113
123
|
|
doctr/datasets/synthtext.py
CHANGED
|
@@ -35,6 +35,7 @@ class SynthText(VisionDataset):
|
|
|
35
35
|
train: whether the subset should be the training one
|
|
36
36
|
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
|
|
37
37
|
recognition_task: whether the dataset should be used for recognition task
|
|
38
|
+
detection_task: whether the dataset should be used for detection task
|
|
38
39
|
**kwargs: keyword arguments from `VisionDataset`.
|
|
39
40
|
"""
|
|
40
41
|
|
|
@@ -46,6 +47,7 @@ class SynthText(VisionDataset):
|
|
|
46
47
|
train: bool = True,
|
|
47
48
|
use_polygons: bool = False,
|
|
48
49
|
recognition_task: bool = False,
|
|
50
|
+
detection_task: bool = False,
|
|
49
51
|
**kwargs: Any,
|
|
50
52
|
) -> None:
|
|
51
53
|
super().__init__(
|
|
@@ -56,8 +58,14 @@ class SynthText(VisionDataset):
|
|
|
56
58
|
pre_transforms=convert_target_to_relative if not recognition_task else None,
|
|
57
59
|
**kwargs,
|
|
58
60
|
)
|
|
61
|
+
if recognition_task and detection_task:
|
|
62
|
+
raise ValueError(
|
|
63
|
+
"`recognition_task` and `detection_task` cannot be set to True simultaneously. "
|
|
64
|
+
+ "To get the whole dataset with boxes and labels leave both parameters to False."
|
|
65
|
+
)
|
|
66
|
+
|
|
59
67
|
self.train = train
|
|
60
|
-
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any]]]] = []
|
|
68
|
+
self.data: List[Tuple[Union[str, np.ndarray], Union[str, Dict[str, Any], np.ndarray]]] = []
|
|
61
69
|
np_dtype = np.float32
|
|
62
70
|
|
|
63
71
|
# Load mat data
|
|
@@ -111,6 +119,8 @@ class SynthText(VisionDataset):
|
|
|
111
119
|
tmp_img = Image.fromarray(crop)
|
|
112
120
|
tmp_img.save(os.path.join(reco_folder_path, f"{reco_images_counter}.png"))
|
|
113
121
|
reco_images_counter += 1
|
|
122
|
+
elif detection_task:
|
|
123
|
+
self.data.append((img_path[0], np.asarray(word_boxes, dtype=np_dtype)))
|
|
114
124
|
else:
|
|
115
125
|
self.data.append((img_path[0], dict(boxes=np.asarray(word_boxes, dtype=np_dtype), labels=labels)))
|
|
116
126
|
|
doctr/datasets/utils.py
CHANGED
|
@@ -169,8 +169,13 @@ def encode_sequences(
|
|
|
169
169
|
return encoded_data
|
|
170
170
|
|
|
171
171
|
|
|
172
|
-
def convert_target_to_relative(
|
|
173
|
-
|
|
172
|
+
def convert_target_to_relative(
|
|
173
|
+
img: ImageTensor, target: Union[np.ndarray, Dict[str, Any]]
|
|
174
|
+
) -> Tuple[ImageTensor, Union[Dict[str, Any], np.ndarray]]:
|
|
175
|
+
if isinstance(target, np.ndarray):
|
|
176
|
+
target = convert_to_relative_coords(target, get_img_shape(img))
|
|
177
|
+
else:
|
|
178
|
+
target["boxes"] = convert_to_relative_coords(target["boxes"], get_img_shape(img))
|
|
174
179
|
return img, target
|
|
175
180
|
|
|
176
181
|
|
doctr/datasets/vocabs.py
CHANGED
|
@@ -25,6 +25,7 @@ VOCABS: Dict[str, str] = {
|
|
|
25
25
|
"hindi_punctuation": "।,?!:्ॐ॰॥॰",
|
|
26
26
|
"bangla_letters": "অআইঈউঊঋএঐওঔকখগঘঙচছজঝঞটঠডঢণতথদধনপফবভমযরলশষসহ়ঽািীুূৃেৈোৌ্ৎংঃঁ",
|
|
27
27
|
"bangla_digits": "০১২৩৪৫৬৭৮৯",
|
|
28
|
+
"generic_cyrillic_letters": "абвгдежзийклмнопрстуфхцчшщьюяАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЬЮЯ",
|
|
28
29
|
}
|
|
29
30
|
|
|
30
31
|
VOCABS["latin"] = VOCABS["digits"] + VOCABS["ascii_letters"] + VOCABS["punctuation"]
|
|
@@ -53,12 +54,15 @@ VOCABS["finnish"] = VOCABS["english"] + "äöÄÖ"
|
|
|
53
54
|
VOCABS["swedish"] = VOCABS["english"] + "åäöÅÄÖ"
|
|
54
55
|
VOCABS["vietnamese"] = (
|
|
55
56
|
VOCABS["english"]
|
|
56
|
-
+ "
|
|
57
|
-
+ "
|
|
57
|
+
+ "áàảạãăắằẳẵặâấầẩẫậđéèẻẽẹêếềểễệóòỏõọôốồổộỗơớờởợỡúùủũụưứừửữựiíìỉĩịýỳỷỹỵ"
|
|
58
|
+
+ "ÁÀẢẠÃĂẮẰẲẴẶÂẤẦẨẪẬĐÉÈẺẼẸÊẾỀỂỄỆÓÒỎÕỌÔỐỒỔỘỖƠỚỜỞỢỠÚÙỦŨỤƯỨỪỬỮỰIÍÌỈĨỊÝỲỶỸỴ"
|
|
58
59
|
)
|
|
59
60
|
VOCABS["hebrew"] = VOCABS["english"] + "אבגדהוזחטיכלמנסעפצקרשת" + "₪"
|
|
60
61
|
VOCABS["hindi"] = VOCABS["hindi_letters"] + VOCABS["hindi_digits"] + VOCABS["hindi_punctuation"]
|
|
61
62
|
VOCABS["bangla"] = VOCABS["bangla_letters"] + VOCABS["bangla_digits"]
|
|
63
|
+
VOCABS["ukrainian"] = (
|
|
64
|
+
VOCABS["generic_cyrillic_letters"] + VOCABS["digits"] + VOCABS["punctuation"] + VOCABS["currency"] + "ґіїєҐІЇЄ₴"
|
|
65
|
+
)
|
|
62
66
|
VOCABS["multilingual"] = "".join(
|
|
63
67
|
dict.fromkeys(
|
|
64
68
|
VOCABS["french"]
|
doctr/datasets/wildreceipt.py
CHANGED
|
@@ -40,6 +40,7 @@ class WILDRECEIPT(AbstractDataset):
|
|
|
40
40
|
train: whether the subset should be the training one
|
|
41
41
|
use_polygons: whether polygons should be considered as rotated bounding box (instead of straight ones)
|
|
42
42
|
recognition_task: whether the dataset should be used for recognition task
|
|
43
|
+
detection_task: whether the dataset should be used for detection task
|
|
43
44
|
**kwargs: keyword arguments from `AbstractDataset`.
|
|
44
45
|
"""
|
|
45
46
|
|
|
@@ -50,11 +51,19 @@ class WILDRECEIPT(AbstractDataset):
|
|
|
50
51
|
train: bool = True,
|
|
51
52
|
use_polygons: bool = False,
|
|
52
53
|
recognition_task: bool = False,
|
|
54
|
+
detection_task: bool = False,
|
|
53
55
|
**kwargs: Any,
|
|
54
56
|
) -> None:
|
|
55
57
|
super().__init__(
|
|
56
58
|
img_folder, pre_transforms=convert_target_to_relative if not recognition_task else None, **kwargs
|
|
57
59
|
)
|
|
60
|
+
# Task check
|
|
61
|
+
if recognition_task and detection_task:
|
|
62
|
+
raise ValueError(
|
|
63
|
+
"`recognition_task` and `detection_task` cannot be set to True simultaneously. "
|
|
64
|
+
+ "To get the whole dataset with boxes and labels leave both parameters to False."
|
|
65
|
+
)
|
|
66
|
+
|
|
58
67
|
# File existence check
|
|
59
68
|
if not os.path.exists(label_path) or not os.path.exists(img_folder):
|
|
60
69
|
raise FileNotFoundError(f"unable to locate {label_path if not os.path.exists(label_path) else img_folder}")
|
|
@@ -62,7 +71,7 @@ class WILDRECEIPT(AbstractDataset):
|
|
|
62
71
|
tmp_root = img_folder
|
|
63
72
|
self.train = train
|
|
64
73
|
np_dtype = np.float32
|
|
65
|
-
self.data: List[Tuple[Union[str, Path, np.ndarray], Union[str, Dict[str, Any]]]] = []
|
|
74
|
+
self.data: List[Tuple[Union[str, Path, np.ndarray], Union[str, Dict[str, Any], np.ndarray]]] = []
|
|
66
75
|
|
|
67
76
|
with open(label_path, "r") as file:
|
|
68
77
|
data = file.read()
|
|
@@ -100,6 +109,8 @@ class WILDRECEIPT(AbstractDataset):
|
|
|
100
109
|
for crop, label in zip(crops, list(text_targets)):
|
|
101
110
|
if label and " " not in label:
|
|
102
111
|
self.data.append((crop, label))
|
|
112
|
+
elif detection_task:
|
|
113
|
+
self.data.append((img_path, np.asarray(box_targets, dtype=int).clip(min=0)))
|
|
103
114
|
else:
|
|
104
115
|
self.data.append((
|
|
105
116
|
img_path,
|
doctr/file_utils.py
CHANGED
|
@@ -35,6 +35,20 @@ else: # pragma: no cover
|
|
|
35
35
|
logging.info("Disabling PyTorch because USE_TF is set")
|
|
36
36
|
_torch_available = False
|
|
37
37
|
|
|
38
|
+
# Compatibility fix to make sure tensorflow.keras stays at Keras 2
|
|
39
|
+
if "TF_USE_LEGACY_KERAS" not in os.environ:
|
|
40
|
+
os.environ["TF_USE_LEGACY_KERAS"] = "1"
|
|
41
|
+
|
|
42
|
+
elif os.environ["TF_USE_LEGACY_KERAS"] != "1":
|
|
43
|
+
raise ValueError(
|
|
44
|
+
"docTR is only compatible with Keras 2, but you have explicitly set `TF_USE_LEGACY_KERAS` to `0`. "
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def ensure_keras_v2() -> None: # pragma: no cover
|
|
49
|
+
if not os.environ.get("TF_USE_LEGACY_KERAS") == "1":
|
|
50
|
+
os.environ["TF_USE_LEGACY_KERAS"] = "1"
|
|
51
|
+
|
|
38
52
|
|
|
39
53
|
if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
|
|
40
54
|
_tf_available = importlib.util.find_spec("tensorflow") is not None
|
|
@@ -65,6 +79,11 @@ if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VA
|
|
|
65
79
|
_tf_available = False
|
|
66
80
|
else:
|
|
67
81
|
logging.info(f"TensorFlow version {_tf_version} available.")
|
|
82
|
+
ensure_keras_v2()
|
|
83
|
+
import tensorflow as tf
|
|
84
|
+
|
|
85
|
+
# Enable eager execution - this is required for some models to work properly
|
|
86
|
+
tf.config.run_functions_eagerly(True)
|
|
68
87
|
else: # pragma: no cover
|
|
69
88
|
logging.info("Disabling Tensorflow because USE_TORCH is set")
|
|
70
89
|
_tf_available = False
|
doctr/io/elements.py
CHANGED
|
@@ -168,7 +168,7 @@ class Line(Element):
|
|
|
168
168
|
if geometry is None:
|
|
169
169
|
# Check whether this is a rotated or straight box
|
|
170
170
|
box_resolution_fn = resolve_enclosing_rbbox if len(words[0].geometry) == 4 else resolve_enclosing_bbox
|
|
171
|
-
geometry = box_resolution_fn([w.geometry for w in words]) # type: ignore[
|
|
171
|
+
geometry = box_resolution_fn([w.geometry for w in words]) # type: ignore[misc]
|
|
172
172
|
|
|
173
173
|
super().__init__(words=words)
|
|
174
174
|
self.geometry = geometry
|
|
@@ -232,7 +232,7 @@ class Block(Element):
|
|
|
232
232
|
box_resolution_fn = (
|
|
233
233
|
resolve_enclosing_rbbox if isinstance(lines[0].geometry, np.ndarray) else resolve_enclosing_bbox
|
|
234
234
|
)
|
|
235
|
-
geometry = box_resolution_fn(line_boxes + artefact_boxes) # type: ignore
|
|
235
|
+
geometry = box_resolution_fn(line_boxes + artefact_boxes) # type: ignore
|
|
236
236
|
|
|
237
237
|
super().__init__(lines=lines, artefacts=artefacts)
|
|
238
238
|
self.geometry = geometry
|
|
@@ -310,6 +310,10 @@ class Page(Element):
|
|
|
310
310
|
def synthesize(self, **kwargs) -> np.ndarray:
|
|
311
311
|
"""Synthesize the page from the predictions
|
|
312
312
|
|
|
313
|
+
Args:
|
|
314
|
+
----
|
|
315
|
+
**kwargs: keyword arguments passed to the `synthesize_page` method
|
|
316
|
+
|
|
313
317
|
Returns
|
|
314
318
|
-------
|
|
315
319
|
synthesized page
|
|
@@ -493,7 +497,7 @@ class KIEPage(Element):
|
|
|
493
497
|
|
|
494
498
|
Args:
|
|
495
499
|
----
|
|
496
|
-
**kwargs: keyword arguments passed to the
|
|
500
|
+
**kwargs: keyword arguments passed to the `synthesize_kie_page` method
|
|
497
501
|
|
|
498
502
|
Returns:
|
|
499
503
|
-------
|
|
@@ -603,11 +607,15 @@ class Document(Element):
|
|
|
603
607
|
def synthesize(self, **kwargs) -> List[np.ndarray]:
|
|
604
608
|
"""Synthesize all pages from their predictions
|
|
605
609
|
|
|
610
|
+
Args:
|
|
611
|
+
----
|
|
612
|
+
**kwargs: keyword arguments passed to the `Page.synthesize` method
|
|
613
|
+
|
|
606
614
|
Returns
|
|
607
615
|
-------
|
|
608
616
|
list of synthesized pages
|
|
609
617
|
"""
|
|
610
|
-
return [page.synthesize() for page in self.pages]
|
|
618
|
+
return [page.synthesize(**kwargs) for page in self.pages]
|
|
611
619
|
|
|
612
620
|
def export_as_xml(self, **kwargs) -> List[Tuple[bytes, ET.ElementTree]]:
|
|
613
621
|
"""Export the document as XML (hOCR-format)
|
doctr/models/builder.py
CHANGED
|
@@ -266,7 +266,7 @@ class DocumentBuilder(NestedObject):
|
|
|
266
266
|
Line([
|
|
267
267
|
Word(
|
|
268
268
|
*word_preds[idx],
|
|
269
|
-
tuple(
|
|
269
|
+
tuple(tuple(pt) for pt in boxes[idx].tolist()), # type: ignore[arg-type]
|
|
270
270
|
float(objectness_scores[idx]),
|
|
271
271
|
crop_orientations[idx],
|
|
272
272
|
)
|
|
@@ -500,7 +500,7 @@ class KIEDocumentBuilder(DocumentBuilder):
|
|
|
500
500
|
Prediction(
|
|
501
501
|
value=word_preds[idx][0],
|
|
502
502
|
confidence=word_preds[idx][1],
|
|
503
|
-
geometry=tuple(
|
|
503
|
+
geometry=tuple(tuple(pt) for pt in boxes[idx].tolist()), # type: ignore[arg-type]
|
|
504
504
|
objectness_score=float(objectness_scores[idx]),
|
|
505
505
|
crop_orientation=crop_orientations[idx],
|
|
506
506
|
)
|