python-doctr 0.8.0__py3-none-any.whl → 0.8.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- doctr/models/detection/__init__.py +1 -0
- doctr/models/detection/differentiable_binarization/tensorflow.py +14 -18
- doctr/models/detection/fast/__init__.py +6 -0
- doctr/models/detection/fast/base.py +256 -0
- doctr/models/detection/fast/pytorch.py +442 -0
- doctr/models/detection/fast/tensorflow.py +428 -0
- doctr/models/detection/zoo.py +14 -2
- doctr/models/modules/layers/pytorch.py +89 -9
- doctr/models/modules/layers/tensorflow.py +89 -9
- doctr/version.py +1 -1
- {python_doctr-0.8.0.dist-info → python_doctr-0.8.1.dist-info}/METADATA +14 -13
- {python_doctr-0.8.0.dist-info → python_doctr-0.8.1.dist-info}/RECORD +16 -12
- {python_doctr-0.8.0.dist-info → python_doctr-0.8.1.dist-info}/LICENSE +0 -0
- {python_doctr-0.8.0.dist-info → python_doctr-0.8.1.dist-info}/WHEEL +0 -0
- {python_doctr-0.8.0.dist-info → python_doctr-0.8.1.dist-info}/top_level.txt +0 -0
- {python_doctr-0.8.0.dist-info → python_doctr-0.8.1.dist-info}/zip-safe +0 -0
|
@@ -5,6 +5,7 @@
|
|
|
5
5
|
|
|
6
6
|
from typing import Any, Tuple, Union
|
|
7
7
|
|
|
8
|
+
import numpy as np
|
|
8
9
|
import tensorflow as tf
|
|
9
10
|
from tensorflow.keras import layers
|
|
10
11
|
|
|
@@ -28,18 +29,21 @@ class FASTConvLayer(layers.Layer, NestedObject):
|
|
|
28
29
|
) -> None:
|
|
29
30
|
super().__init__()
|
|
30
31
|
|
|
31
|
-
|
|
32
|
+
self.groups = groups
|
|
33
|
+
self.in_channels = in_channels
|
|
34
|
+
self.converted_ks = (kernel_size, kernel_size) if isinstance(kernel_size, int) else kernel_size
|
|
32
35
|
|
|
33
36
|
self.hor_conv, self.hor_bn = None, None
|
|
34
37
|
self.ver_conv, self.ver_bn = None, None
|
|
35
38
|
|
|
36
|
-
padding = ((converted_ks[0] - 1) * dilation // 2, (converted_ks[1] - 1) * dilation // 2)
|
|
39
|
+
padding = ((self.converted_ks[0] - 1) * dilation // 2, (self.converted_ks[1] - 1) * dilation // 2)
|
|
37
40
|
|
|
38
41
|
self.activation = layers.ReLU()
|
|
39
42
|
self.conv_pad = layers.ZeroPadding2D(padding=padding)
|
|
43
|
+
|
|
40
44
|
self.conv = layers.Conv2D(
|
|
41
45
|
filters=out_channels,
|
|
42
|
-
kernel_size=converted_ks,
|
|
46
|
+
kernel_size=self.converted_ks,
|
|
43
47
|
strides=stride,
|
|
44
48
|
dilation_rate=dilation,
|
|
45
49
|
groups=groups,
|
|
@@ -48,13 +52,13 @@ class FASTConvLayer(layers.Layer, NestedObject):
|
|
|
48
52
|
|
|
49
53
|
self.bn = layers.BatchNormalization()
|
|
50
54
|
|
|
51
|
-
if converted_ks[1] != 1:
|
|
55
|
+
if self.converted_ks[1] != 1:
|
|
52
56
|
self.ver_pad = layers.ZeroPadding2D(
|
|
53
|
-
padding=(int(((converted_ks[0] - 1) * dilation) / 2), 0),
|
|
57
|
+
padding=(int(((self.converted_ks[0] - 1) * dilation) / 2), 0),
|
|
54
58
|
)
|
|
55
59
|
self.ver_conv = layers.Conv2D(
|
|
56
60
|
filters=out_channels,
|
|
57
|
-
kernel_size=(converted_ks[0], 1),
|
|
61
|
+
kernel_size=(self.converted_ks[0], 1),
|
|
58
62
|
strides=stride,
|
|
59
63
|
dilation_rate=dilation,
|
|
60
64
|
groups=groups,
|
|
@@ -62,13 +66,13 @@ class FASTConvLayer(layers.Layer, NestedObject):
|
|
|
62
66
|
)
|
|
63
67
|
self.ver_bn = layers.BatchNormalization()
|
|
64
68
|
|
|
65
|
-
if converted_ks[0] != 1:
|
|
69
|
+
if self.converted_ks[0] != 1:
|
|
66
70
|
self.hor_pad = layers.ZeroPadding2D(
|
|
67
|
-
padding=(0, int(((converted_ks[1] - 1) * dilation) / 2)),
|
|
71
|
+
padding=(0, int(((self.converted_ks[1] - 1) * dilation) / 2)),
|
|
68
72
|
)
|
|
69
73
|
self.hor_conv = layers.Conv2D(
|
|
70
74
|
filters=out_channels,
|
|
71
|
-
kernel_size=(1, converted_ks[1]),
|
|
75
|
+
kernel_size=(1, self.converted_ks[1]),
|
|
72
76
|
strides=stride,
|
|
73
77
|
dilation_rate=dilation,
|
|
74
78
|
groups=groups,
|
|
@@ -79,6 +83,9 @@ class FASTConvLayer(layers.Layer, NestedObject):
|
|
|
79
83
|
self.rbr_identity = layers.BatchNormalization() if out_channels == in_channels and stride == 1 else None
|
|
80
84
|
|
|
81
85
|
def call(self, x: tf.Tensor, **kwargs: Any) -> tf.Tensor:
|
|
86
|
+
if hasattr(self, "fused_conv"):
|
|
87
|
+
return self.activation(self.fused_conv(self.conv_pad(x, **kwargs), **kwargs))
|
|
88
|
+
|
|
82
89
|
main_outputs = self.bn(self.conv(self.conv_pad(x, **kwargs), **kwargs), **kwargs)
|
|
83
90
|
vertical_outputs = (
|
|
84
91
|
self.ver_bn(self.ver_conv(self.ver_pad(x, **kwargs), **kwargs), **kwargs)
|
|
@@ -93,3 +100,76 @@ class FASTConvLayer(layers.Layer, NestedObject):
|
|
|
93
100
|
id_out = self.rbr_identity(x, **kwargs) if self.rbr_identity is not None and self.ver_bn is not None else 0
|
|
94
101
|
|
|
95
102
|
return self.activation(main_outputs + vertical_outputs + horizontal_outputs + id_out)
|
|
103
|
+
|
|
104
|
+
# The following logic is used to reparametrize the layer
|
|
105
|
+
# Adapted from: https://github.com/mindee/doctr/blob/main/doctr/models/modules/layers/pytorch.py
|
|
106
|
+
def _identity_to_conv(
|
|
107
|
+
self, identity: layers.BatchNormalization
|
|
108
|
+
) -> Union[Tuple[tf.Tensor, tf.Tensor], Tuple[int, int]]:
|
|
109
|
+
if identity is None or not hasattr(identity, "moving_mean") or not hasattr(identity, "moving_variance"):
|
|
110
|
+
return 0, 0
|
|
111
|
+
if not hasattr(self, "id_tensor"):
|
|
112
|
+
input_dim = self.in_channels // self.groups
|
|
113
|
+
kernel_value = np.zeros((self.in_channels, input_dim, 1, 1), dtype=np.float32)
|
|
114
|
+
for i in range(self.in_channels):
|
|
115
|
+
kernel_value[i, i % input_dim, 0, 0] = 1
|
|
116
|
+
id_tensor = tf.constant(kernel_value, dtype=tf.float32)
|
|
117
|
+
self.id_tensor = self._pad_to_mxn_tensor(id_tensor)
|
|
118
|
+
kernel = self.id_tensor
|
|
119
|
+
std = tf.sqrt(identity.moving_variance + identity.epsilon)
|
|
120
|
+
t = tf.reshape(identity.gamma / std, (-1, 1, 1, 1))
|
|
121
|
+
return kernel * t, identity.beta - identity.moving_mean * identity.gamma / std
|
|
122
|
+
|
|
123
|
+
def _fuse_bn_tensor(self, conv: layers.Conv2D, bn: layers.BatchNormalization) -> Tuple[tf.Tensor, tf.Tensor]:
|
|
124
|
+
kernel = conv.kernel
|
|
125
|
+
kernel = self._pad_to_mxn_tensor(kernel)
|
|
126
|
+
std = tf.sqrt(bn.moving_variance + bn.epsilon)
|
|
127
|
+
t = tf.reshape(bn.gamma / std, (1, 1, 1, -1))
|
|
128
|
+
return kernel * t, bn.beta - bn.moving_mean * bn.gamma / std
|
|
129
|
+
|
|
130
|
+
def _get_equivalent_kernel_bias(self):
|
|
131
|
+
kernel_mxn, bias_mxn = self._fuse_bn_tensor(self.conv, self.bn)
|
|
132
|
+
if self.ver_conv is not None:
|
|
133
|
+
kernel_mx1, bias_mx1 = self._fuse_bn_tensor(self.ver_conv, self.ver_bn)
|
|
134
|
+
else:
|
|
135
|
+
kernel_mx1, bias_mx1 = 0, 0
|
|
136
|
+
if self.hor_conv is not None:
|
|
137
|
+
kernel_1xn, bias_1xn = self._fuse_bn_tensor(self.hor_conv, self.hor_bn)
|
|
138
|
+
else:
|
|
139
|
+
kernel_1xn, bias_1xn = 0, 0
|
|
140
|
+
kernel_id, bias_id = self._identity_to_conv(self.rbr_identity)
|
|
141
|
+
if not isinstance(kernel_id, int):
|
|
142
|
+
kernel_id = tf.transpose(kernel_id, (2, 3, 0, 1))
|
|
143
|
+
kernel_mxn = kernel_mxn + kernel_mx1 + kernel_1xn + kernel_id
|
|
144
|
+
bias_mxn = bias_mxn + bias_mx1 + bias_1xn + bias_id
|
|
145
|
+
return kernel_mxn, bias_mxn
|
|
146
|
+
|
|
147
|
+
def _pad_to_mxn_tensor(self, kernel: tf.Tensor) -> tf.Tensor:
|
|
148
|
+
kernel_height, kernel_width = self.converted_ks
|
|
149
|
+
height, width = kernel.shape[2:]
|
|
150
|
+
pad_left_right = tf.maximum(0, (kernel_width - width) // 2)
|
|
151
|
+
pad_top_down = tf.maximum(0, (kernel_height - height) // 2)
|
|
152
|
+
return tf.pad(kernel, [[0, 0], [0, 0], [pad_top_down, pad_top_down], [pad_left_right, pad_left_right]])
|
|
153
|
+
|
|
154
|
+
def reparameterize_layer(self):
|
|
155
|
+
kernel, bias = self._get_equivalent_kernel_bias()
|
|
156
|
+
self.fused_conv = layers.Conv2D(
|
|
157
|
+
filters=self.conv.filters,
|
|
158
|
+
kernel_size=self.conv.kernel_size,
|
|
159
|
+
strides=self.conv.strides,
|
|
160
|
+
padding=self.conv.padding,
|
|
161
|
+
dilation_rate=self.conv.dilation_rate,
|
|
162
|
+
groups=self.conv.groups,
|
|
163
|
+
use_bias=True,
|
|
164
|
+
)
|
|
165
|
+
# build layer to initialize weights and biases
|
|
166
|
+
self.fused_conv.build(input_shape=(None, None, None, kernel.shape[-2]))
|
|
167
|
+
self.fused_conv.set_weights([kernel.numpy(), bias.numpy()])
|
|
168
|
+
for para in self.trainable_variables:
|
|
169
|
+
para._trainable = False
|
|
170
|
+
for attr in ["conv", "bn", "ver_conv", "ver_bn", "hor_conv", "hor_bn"]:
|
|
171
|
+
if hasattr(self, attr):
|
|
172
|
+
delattr(self, attr)
|
|
173
|
+
|
|
174
|
+
if hasattr(self, "rbr_identity"):
|
|
175
|
+
delattr(self, "rbr_identity")
|
doctr/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = 'v0.8.
|
|
1
|
+
__version__ = 'v0.8.1'
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: python-doctr
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.1
|
|
4
4
|
Summary: Document Text Recognition (docTR): deep Learning for high-performance OCR on documents.
|
|
5
5
|
Author-email: Mindee <contact@mindee.com>
|
|
6
6
|
Maintainer: François-Guillaume Fernandez, Charles Gaillard, Olivier Dulcy, Felix Dittrich
|
|
@@ -209,7 +209,7 @@ License: Apache License
|
|
|
209
209
|
Project-URL: documentation, https://mindee.github.io/doctr
|
|
210
210
|
Project-URL: repository, https://github.com/mindee/doctr
|
|
211
211
|
Project-URL: tracker, https://github.com/mindee/doctr/issues
|
|
212
|
-
Project-URL: changelog, https://github.
|
|
212
|
+
Project-URL: changelog, https://mindee.github.io/doctr/changelog.html
|
|
213
213
|
Keywords: OCR,deep learning,computer vision,tensorflow,pytorch,text detection,text recognition
|
|
214
214
|
Classifier: Development Status :: 4 - Beta
|
|
215
215
|
Classifier: Intended Audience :: Developers
|
|
@@ -296,10 +296,10 @@ Requires-Dist: torchvision >=0.13.0 ; extra == 'torch'
|
|
|
296
296
|
Requires-Dist: onnx <3.0.0,>=1.12.0 ; extra == 'torch'
|
|
297
297
|
|
|
298
298
|
<p align="center">
|
|
299
|
-
<img src="docs/images/Logo_doctr.gif" width="40%">
|
|
299
|
+
<img src="https://github.com/mindee/doctr/raw/main/docs/images/Logo_doctr.gif" width="40%">
|
|
300
300
|
</p>
|
|
301
301
|
|
|
302
|
-
[](https://slack.mindee.com) [](LICENSE)  [](https://github.com/mindee/doctr/pkgs/container/doctr) [](https://codecov.io/gh/mindee/doctr) [](https://www.codefactor.io/repository/github/mindee/doctr) [](https://app.codacy.com/gh/mindee/doctr?utm_source=github.com&utm_medium=referral&utm_content=mindee/doctr&utm_campaign=Badge_Grade) [](https://mindee.github.io/doctr) [](https://slack.mindee.com) [](LICENSE)  [](https://github.com/mindee/doctr/pkgs/container/doctr) [](https://codecov.io/gh/mindee/doctr) [](https://www.codefactor.io/repository/github/mindee/doctr) [](https://app.codacy.com/gh/mindee/doctr?utm_source=github.com&utm_medium=referral&utm_content=mindee/doctr&utm_campaign=Badge_Grade) [](https://mindee.github.io/doctr) [](https://pypi.org/project/python-doctr/) [](https://huggingface.co/spaces/mindee/doctr) [](https://colab.research.google.com/github/mindee/notebooks/blob/main/doctr/quicktour.ipynb)
|
|
303
303
|
|
|
304
304
|
|
|
305
305
|
**Optical Character Recognition made seamless & accessible to anyone, powered by TensorFlow 2 & PyTorch**
|
|
@@ -309,7 +309,7 @@ What you can expect from this repository:
|
|
|
309
309
|
- efficient ways to parse textual information (localize and identify each word) from your documents
|
|
310
310
|
- guidance on how to integrate this in your current architecture
|
|
311
311
|
|
|
312
|
-

|
|
312
|
+

|
|
313
313
|
|
|
314
314
|
## Quick Tour
|
|
315
315
|
|
|
@@ -375,7 +375,7 @@ To interpret your model's predictions, you can visualize them interactively as f
|
|
|
375
375
|
result.show()
|
|
376
376
|
```
|
|
377
377
|
|
|
378
|
-

|
|
378
|
+

|
|
379
379
|
|
|
380
380
|
Or even rebuild the original document from its predictions:
|
|
381
381
|
|
|
@@ -386,7 +386,7 @@ synthetic_pages = result.synthesize()
|
|
|
386
386
|
plt.imshow(synthetic_pages[0]); plt.axis('off'); plt.show()
|
|
387
387
|
```
|
|
388
388
|
|
|
389
|
-

|
|
389
|
+

|
|
390
390
|
|
|
391
391
|
The `ocr_predictor` returns a `Document` object with a nested structure (with `Page`, `Block`, `Line`, `Word`, `Artefact`).
|
|
392
392
|
To get a better understanding of our document model, check our [documentation](https://mindee.github.io/doctr/modules/io.html#document-structure):
|
|
@@ -425,7 +425,7 @@ The KIE predictor results per page are in a dictionary format with each key repr
|
|
|
425
425
|
|
|
426
426
|
### If you are looking for support from the Mindee team
|
|
427
427
|
|
|
428
|
-
[](https://mindee.com/product/doctr)
|
|
428
|
+
[](https://mindee.com/product/doctr)
|
|
429
429
|
|
|
430
430
|
## Installation
|
|
431
431
|
|
|
@@ -494,6 +494,7 @@ Credits where it's due: this repository is implementing, among others, architect
|
|
|
494
494
|
|
|
495
495
|
- DBNet: [Real-time Scene Text Detection with Differentiable Binarization](https://arxiv.org/pdf/1911.08947.pdf).
|
|
496
496
|
- LinkNet: [LinkNet: Exploiting Encoder Representations for Efficient Semantic Segmentation](https://arxiv.org/pdf/1707.03718.pdf)
|
|
497
|
+
- FAST: [FAST: Faster Arbitrarily-Shaped Text Detector with Minimalist Kernel Representation](https://arxiv.org/pdf/2111.02394.pdf)
|
|
497
498
|
|
|
498
499
|
### Text Recognition
|
|
499
500
|
|
|
@@ -513,7 +514,7 @@ The full package documentation is available [here](https://mindee.github.io/doct
|
|
|
513
514
|
|
|
514
515
|
A minimal demo app is provided for you to play with our end-to-end OCR models!
|
|
515
516
|
|
|
516
|
-

|
|
517
|
+

|
|
517
518
|
|
|
518
519
|
#### Live demo
|
|
519
520
|
|
|
@@ -553,11 +554,11 @@ USE_TORCH=1 streamlit run demo/app.py
|
|
|
553
554
|
Instead of having your demo actually running Python, you would prefer to run everything in your web browser?
|
|
554
555
|
Check out our [TensorFlow.js demo](https://github.com/mindee/doctr-tfjs-demo) to get started!
|
|
555
556
|
|
|
556
|
-

|
|
557
|
+

|
|
557
558
|
|
|
558
559
|
### Docker container
|
|
559
560
|
|
|
560
|
-
[We
|
|
561
|
+
[We offer Docker container support for easy testing and deployment](https://github.com/mindee/doctr/pkgs/container/doctr).
|
|
561
562
|
|
|
562
563
|
#### Using GPU with docTR Docker Images
|
|
563
564
|
|
|
@@ -673,8 +674,8 @@ If you wish to cite this project, feel free to use this [BibTeX](http://www.bibt
|
|
|
673
674
|
|
|
674
675
|
If you scrolled down to this section, you most likely appreciate open source. Do you feel like extending the range of our supported characters? Or perhaps submitting a paper implementation? Or contributing in any other way?
|
|
675
676
|
|
|
676
|
-
You're in luck, we compiled a short guide (cf. [`CONTRIBUTING`](
|
|
677
|
+
You're in luck, we compiled a short guide (cf. [`CONTRIBUTING`](https://mindee.github.io/doctr/contributing/contributing.html)) for you to easily do so!
|
|
677
678
|
|
|
678
679
|
## License
|
|
679
680
|
|
|
680
|
-
Distributed under the Apache 2.0 License. See [`LICENSE`](
|
|
681
|
+
Distributed under the Apache 2.0 License. See [`LICENSE`](https://github.com/mindee/doctr?tab=Apache-2.0-1-ov-file#readme) for more information.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
doctr/__init__.py,sha256=m6lezpfDosfTVFssFVrN7aH0tDzM4h9OgCCi0Nevq8g,161
|
|
2
2
|
doctr/file_utils.py,sha256=P6Ld5_rFSMwv1m91yhARdJgF7KIXWzgUJXUXaUiUNgc,3156
|
|
3
|
-
doctr/version.py,sha256=
|
|
3
|
+
doctr/version.py,sha256=M-PQbswhc8YrXcs4GoOEbQ4Ze9_y4MlJlfbrXibvsTs,23
|
|
4
4
|
doctr/datasets/__init__.py,sha256=umI2ABbgWIKuhswl8RGaF6CefFiI8DdEGVb0Kbd8aZA,574
|
|
5
5
|
doctr/datasets/cord.py,sha256=p9ObLgqV3uB7TYoS5Puag0q-JtFTPrXUztkxL36U69U,4746
|
|
6
6
|
doctr/datasets/detection.py,sha256=H6inFO6rjdvU_Asm9UTod5r5bjjpmJJWGityv0RTJ8M,3607
|
|
@@ -71,16 +71,20 @@ doctr/models/classification/vgg/tensorflow.py,sha256=mVuyIXtX7iu622K0GwXkALOM7gz
|
|
|
71
71
|
doctr/models/classification/vit/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
|
|
72
72
|
doctr/models/classification/vit/pytorch.py,sha256=770ZrCPX7LlVUjE9XNFrzcb2i_0lHStJ8Q4vXEhXEHs,6096
|
|
73
73
|
doctr/models/classification/vit/tensorflow.py,sha256=TtglXtKAE6y_gfzk8DOhUwoQNIMhK86tmhCB1SbT-k0,5869
|
|
74
|
-
doctr/models/detection/__init__.py,sha256=
|
|
74
|
+
doctr/models/detection/__init__.py,sha256=RqSz5beehLiqhW0PwFLFmCfTyMjofO-0umcQJLDMHjY,105
|
|
75
75
|
doctr/models/detection/core.py,sha256=K2uQTIu3ttgxj7YF7i1a-X6djIGCSFjZnQQ57JQBDv0,3566
|
|
76
|
-
doctr/models/detection/zoo.py,sha256=
|
|
76
|
+
doctr/models/detection/zoo.py,sha256=c0wm0g6ihkCkSR1G7iaY-IyAnfbEKpa-jbY6kVv4zrY,3032
|
|
77
77
|
doctr/models/detection/_utils/__init__.py,sha256=jDHErtF1nkN-uICx8prmdvmGTSoN6U27ZVmHLoqtcNo,131
|
|
78
78
|
doctr/models/detection/_utils/pytorch.py,sha256=UZ-PK5Uw0dVN978JGj5MVtF7kLXTL4EtugCoq_VVkVk,1063
|
|
79
79
|
doctr/models/detection/_utils/tensorflow.py,sha256=9D2ita4ZqJus2byLe7bkSIhyYExAiOLAGBbC7-oRZDU,979
|
|
80
80
|
doctr/models/detection/differentiable_binarization/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
|
|
81
81
|
doctr/models/detection/differentiable_binarization/base.py,sha256=ucjNmclcN0SfTBZxgucaGC1qWqI3UvBLHfMGzR6LsZI,16323
|
|
82
82
|
doctr/models/detection/differentiable_binarization/pytorch.py,sha256=-bByMRDipo_0WIXuFPf9DUPfWduVgLd0UIn48GP3f94,15983
|
|
83
|
-
doctr/models/detection/differentiable_binarization/tensorflow.py,sha256=
|
|
83
|
+
doctr/models/detection/differentiable_binarization/tensorflow.py,sha256=l4QltrgDMLK_eY0dxEaCDzrB8rlhVpwUmOAPNIzd_70,14506
|
|
84
|
+
doctr/models/detection/fast/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
|
|
85
|
+
doctr/models/detection/fast/base.py,sha256=AL3OZ0_OOxOgeE8EJO8EO7RR1c-TVUKepuXlHpG2a74,10818
|
|
86
|
+
doctr/models/detection/fast/pytorch.py,sha256=u6yjf3zYNrF-qdPoDlUXlWx9cbrw2CH6oX0tfzs3zRI,15920
|
|
87
|
+
doctr/models/detection/fast/tensorflow.py,sha256=yz7Eb9siNSVba4USDHJIOE9BLXFNDEvA_ZZsLcOqmvQ,15571
|
|
84
88
|
doctr/models/detection/linknet/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
|
|
85
89
|
doctr/models/detection/linknet/base.py,sha256=51teowVwuJ_PKJnL7die_KWLNpdnZTIYVh1TXstfIOs,10508
|
|
86
90
|
doctr/models/detection/linknet/pytorch.py,sha256=sodWXaCDv1taRl3g6lgwxitvhU-ZszfN-OIofsorkp8,13810
|
|
@@ -96,8 +100,8 @@ doctr/models/kie_predictor/pytorch.py,sha256=OW0BAVT11R7PKCminD8VbyZ6En12TyaE103
|
|
|
96
100
|
doctr/models/kie_predictor/tensorflow.py,sha256=JAbHyhEE-OEx1r3NIqWYTlhAb9ECY7ZfW5Jc4d-LwVw,6697
|
|
97
101
|
doctr/models/modules/__init__.py,sha256=pouP7obVTu4p6aHkyaqa1yHKbynpvT0Hgo-LO_1U2R4,83
|
|
98
102
|
doctr/models/modules/layers/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
|
|
99
|
-
doctr/models/modules/layers/pytorch.py,sha256=
|
|
100
|
-
doctr/models/modules/layers/tensorflow.py,sha256=
|
|
103
|
+
doctr/models/modules/layers/pytorch.py,sha256=wKdfoUDCn_jYOgZiLVc4_4K0DlkYY9If4-NhJwUBYOY,6938
|
|
104
|
+
doctr/models/modules/layers/tensorflow.py,sha256=yslZpOtR2fP5Do7fOxR-GD08DPm6mCLo2fDMhp23QTI,7278
|
|
101
105
|
doctr/models/modules/transformer/__init__.py,sha256=zwLK6mpproUGFH-1PUNiQyoR9IrAAakj7RgOiTJaBjk,200
|
|
102
106
|
doctr/models/modules/transformer/pytorch.py,sha256=Bn6KPvhBdtS2MlRQmQT7c_d63maRfwfMias3P8eJ9fA,7725
|
|
103
107
|
doctr/models/modules/transformer/tensorflow.py,sha256=NTF-Q6ClUIMdSWDqus6kPZjOlKC3XcJ3HqUeyZTqtnU,9113
|
|
@@ -161,9 +165,9 @@ doctr/utils/metrics.py,sha256=hYRRlIW-e8onLPsYvnJL9HzBtwZT3x-p_yu52INz4uw,25935
|
|
|
161
165
|
doctr/utils/multithreading.py,sha256=iEM6o_qjutH-CxFTz7K1VQseYpVaHH3Hpw_yNDoQBSw,1989
|
|
162
166
|
doctr/utils/repr.py,sha256=3GdMquo1NtwNkQPoB-nmDm_AFmU3sLc4T3VfGck9uoQ,2111
|
|
163
167
|
doctr/utils/visualization.py,sha256=iIO6mEqqVKvkxGpDQJomJmGeplCxAuwuS8Vur0vEtYg,17758
|
|
164
|
-
python_doctr-0.8.
|
|
165
|
-
python_doctr-0.8.
|
|
166
|
-
python_doctr-0.8.
|
|
167
|
-
python_doctr-0.8.
|
|
168
|
-
python_doctr-0.8.
|
|
169
|
-
python_doctr-0.8.
|
|
168
|
+
python_doctr-0.8.1.dist-info/LICENSE,sha256=75RTSsXOsAYhGpxsHc9U41ep6GS7vrUPufeekgoeOXM,11336
|
|
169
|
+
python_doctr-0.8.1.dist-info/METADATA,sha256=E2AtXeYk0nHqDzCDhfnGceCC4rR2HFU9lZvLDKYVIa4,33155
|
|
170
|
+
python_doctr-0.8.1.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
|
171
|
+
python_doctr-0.8.1.dist-info/top_level.txt,sha256=lCgp4pmjPI3HYph62XhfzA3jRwM715kGtJPmqIUJ9t8,6
|
|
172
|
+
python_doctr-0.8.1.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
173
|
+
python_doctr-0.8.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|