python-doctr 0.11.0__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- doctr/__init__.py +0 -1
- doctr/datasets/__init__.py +1 -5
- doctr/datasets/coco_text.py +139 -0
- doctr/datasets/cord.py +2 -1
- doctr/datasets/datasets/__init__.py +1 -6
- doctr/datasets/datasets/pytorch.py +2 -2
- doctr/datasets/funsd.py +2 -2
- doctr/datasets/generator/__init__.py +1 -6
- doctr/datasets/ic03.py +1 -1
- doctr/datasets/ic13.py +2 -1
- doctr/datasets/iiit5k.py +4 -1
- doctr/datasets/imgur5k.py +9 -2
- doctr/datasets/ocr.py +1 -1
- doctr/datasets/recognition.py +1 -1
- doctr/datasets/svhn.py +1 -1
- doctr/datasets/svt.py +2 -2
- doctr/datasets/synthtext.py +15 -2
- doctr/datasets/utils.py +7 -6
- doctr/datasets/vocabs.py +1100 -54
- doctr/file_utils.py +2 -92
- doctr/io/elements.py +37 -3
- doctr/io/image/__init__.py +1 -7
- doctr/io/image/pytorch.py +1 -1
- doctr/models/_utils.py +4 -4
- doctr/models/classification/__init__.py +1 -0
- doctr/models/classification/magc_resnet/__init__.py +1 -6
- doctr/models/classification/magc_resnet/pytorch.py +3 -4
- doctr/models/classification/mobilenet/__init__.py +1 -6
- doctr/models/classification/mobilenet/pytorch.py +15 -1
- doctr/models/classification/predictor/__init__.py +1 -6
- doctr/models/classification/predictor/pytorch.py +2 -2
- doctr/models/classification/resnet/__init__.py +1 -6
- doctr/models/classification/resnet/pytorch.py +26 -3
- doctr/models/classification/textnet/__init__.py +1 -6
- doctr/models/classification/textnet/pytorch.py +11 -2
- doctr/models/classification/vgg/__init__.py +1 -6
- doctr/models/classification/vgg/pytorch.py +16 -1
- doctr/models/classification/vip/__init__.py +1 -0
- doctr/models/classification/vip/layers/__init__.py +1 -0
- doctr/models/classification/vip/layers/pytorch.py +615 -0
- doctr/models/classification/vip/pytorch.py +505 -0
- doctr/models/classification/vit/__init__.py +1 -6
- doctr/models/classification/vit/pytorch.py +12 -3
- doctr/models/classification/zoo.py +7 -8
- doctr/models/detection/_utils/__init__.py +1 -6
- doctr/models/detection/core.py +1 -1
- doctr/models/detection/differentiable_binarization/__init__.py +1 -6
- doctr/models/detection/differentiable_binarization/base.py +7 -16
- doctr/models/detection/differentiable_binarization/pytorch.py +13 -4
- doctr/models/detection/fast/__init__.py +1 -6
- doctr/models/detection/fast/base.py +6 -17
- doctr/models/detection/fast/pytorch.py +17 -8
- doctr/models/detection/linknet/__init__.py +1 -6
- doctr/models/detection/linknet/base.py +5 -15
- doctr/models/detection/linknet/pytorch.py +12 -3
- doctr/models/detection/predictor/__init__.py +1 -6
- doctr/models/detection/predictor/pytorch.py +1 -1
- doctr/models/detection/zoo.py +15 -32
- doctr/models/factory/hub.py +9 -22
- doctr/models/kie_predictor/__init__.py +1 -6
- doctr/models/kie_predictor/pytorch.py +3 -7
- doctr/models/modules/layers/__init__.py +1 -6
- doctr/models/modules/layers/pytorch.py +52 -4
- doctr/models/modules/transformer/__init__.py +1 -6
- doctr/models/modules/transformer/pytorch.py +2 -2
- doctr/models/modules/vision_transformer/__init__.py +1 -6
- doctr/models/predictor/__init__.py +1 -6
- doctr/models/predictor/base.py +3 -8
- doctr/models/predictor/pytorch.py +3 -6
- doctr/models/preprocessor/__init__.py +1 -6
- doctr/models/preprocessor/pytorch.py +27 -32
- doctr/models/recognition/__init__.py +1 -0
- doctr/models/recognition/crnn/__init__.py +1 -6
- doctr/models/recognition/crnn/pytorch.py +16 -7
- doctr/models/recognition/master/__init__.py +1 -6
- doctr/models/recognition/master/pytorch.py +15 -6
- doctr/models/recognition/parseq/__init__.py +1 -6
- doctr/models/recognition/parseq/pytorch.py +26 -8
- doctr/models/recognition/predictor/__init__.py +1 -6
- doctr/models/recognition/predictor/_utils.py +100 -47
- doctr/models/recognition/predictor/pytorch.py +4 -5
- doctr/models/recognition/sar/__init__.py +1 -6
- doctr/models/recognition/sar/pytorch.py +13 -4
- doctr/models/recognition/utils.py +56 -47
- doctr/models/recognition/viptr/__init__.py +1 -0
- doctr/models/recognition/viptr/pytorch.py +277 -0
- doctr/models/recognition/vitstr/__init__.py +1 -6
- doctr/models/recognition/vitstr/pytorch.py +13 -4
- doctr/models/recognition/zoo.py +13 -8
- doctr/models/utils/__init__.py +1 -6
- doctr/models/utils/pytorch.py +29 -19
- doctr/transforms/functional/__init__.py +1 -6
- doctr/transforms/functional/pytorch.py +4 -4
- doctr/transforms/modules/__init__.py +1 -7
- doctr/transforms/modules/base.py +26 -92
- doctr/transforms/modules/pytorch.py +28 -26
- doctr/utils/data.py +1 -1
- doctr/utils/geometry.py +7 -11
- doctr/utils/visualization.py +1 -1
- doctr/version.py +1 -1
- {python_doctr-0.11.0.dist-info → python_doctr-1.0.0.dist-info}/METADATA +22 -63
- python_doctr-1.0.0.dist-info/RECORD +149 -0
- {python_doctr-0.11.0.dist-info → python_doctr-1.0.0.dist-info}/WHEEL +1 -1
- doctr/datasets/datasets/tensorflow.py +0 -59
- doctr/datasets/generator/tensorflow.py +0 -58
- doctr/datasets/loader.py +0 -94
- doctr/io/image/tensorflow.py +0 -101
- doctr/models/classification/magc_resnet/tensorflow.py +0 -196
- doctr/models/classification/mobilenet/tensorflow.py +0 -433
- doctr/models/classification/predictor/tensorflow.py +0 -60
- doctr/models/classification/resnet/tensorflow.py +0 -397
- doctr/models/classification/textnet/tensorflow.py +0 -266
- doctr/models/classification/vgg/tensorflow.py +0 -116
- doctr/models/classification/vit/tensorflow.py +0 -192
- doctr/models/detection/_utils/tensorflow.py +0 -34
- doctr/models/detection/differentiable_binarization/tensorflow.py +0 -414
- doctr/models/detection/fast/tensorflow.py +0 -419
- doctr/models/detection/linknet/tensorflow.py +0 -369
- doctr/models/detection/predictor/tensorflow.py +0 -70
- doctr/models/kie_predictor/tensorflow.py +0 -187
- doctr/models/modules/layers/tensorflow.py +0 -171
- doctr/models/modules/transformer/tensorflow.py +0 -235
- doctr/models/modules/vision_transformer/tensorflow.py +0 -100
- doctr/models/predictor/tensorflow.py +0 -155
- doctr/models/preprocessor/tensorflow.py +0 -122
- doctr/models/recognition/crnn/tensorflow.py +0 -308
- doctr/models/recognition/master/tensorflow.py +0 -313
- doctr/models/recognition/parseq/tensorflow.py +0 -508
- doctr/models/recognition/predictor/tensorflow.py +0 -79
- doctr/models/recognition/sar/tensorflow.py +0 -416
- doctr/models/recognition/vitstr/tensorflow.py +0 -278
- doctr/models/utils/tensorflow.py +0 -182
- doctr/transforms/functional/tensorflow.py +0 -254
- doctr/transforms/modules/tensorflow.py +0 -562
- python_doctr-0.11.0.dist-info/RECORD +0 -173
- {python_doctr-0.11.0.dist-info → python_doctr-1.0.0.dist-info/licenses}/LICENSE +0 -0
- {python_doctr-0.11.0.dist-info → python_doctr-1.0.0.dist-info}/top_level.txt +0 -0
- {python_doctr-0.11.0.dist-info → python_doctr-1.0.0.dist-info}/zip-safe +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
2
|
Name: python-doctr
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 1.0.0
|
|
4
4
|
Summary: Document Text Recognition (docTR): deep Learning for high-performance OCR on documents.
|
|
5
5
|
Author-email: Mindee <contact@mindee.com>
|
|
6
6
|
Maintainer: François-Guillaume Fernandez, Charles Gaillard, Olivier Dulcy, Felix Dittrich
|
|
@@ -210,7 +210,7 @@ Project-URL: documentation, https://mindee.github.io/doctr
|
|
|
210
210
|
Project-URL: repository, https://github.com/mindee/doctr
|
|
211
211
|
Project-URL: tracker, https://github.com/mindee/doctr/issues
|
|
212
212
|
Project-URL: changelog, https://mindee.github.io/doctr/changelog.html
|
|
213
|
-
Keywords: OCR,deep learning,computer vision,
|
|
213
|
+
Keywords: OCR,deep learning,computer vision,pytorch,text detection,text recognition
|
|
214
214
|
Classifier: Development Status :: 4 - Beta
|
|
215
215
|
Classifier: Intended Audience :: Developers
|
|
216
216
|
Classifier: Intended Audience :: Education
|
|
@@ -226,6 +226,9 @@ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
|
226
226
|
Requires-Python: <4,>=3.10.0
|
|
227
227
|
Description-Content-Type: text/markdown
|
|
228
228
|
License-File: LICENSE
|
|
229
|
+
Requires-Dist: torch<3.0.0,>=2.0.0
|
|
230
|
+
Requires-Dist: torchvision>=0.15.0
|
|
231
|
+
Requires-Dist: onnx<3.0.0,>=1.12.0
|
|
229
232
|
Requires-Dist: numpy<3.0.0,>=1.16.0
|
|
230
233
|
Requires-Dist: scipy<2.0.0,>=1.4.0
|
|
231
234
|
Requires-Dist: h5py<4.0.0,>=3.1.0
|
|
@@ -239,16 +242,8 @@ Requires-Dist: huggingface-hub<1.0.0,>=0.20.0
|
|
|
239
242
|
Requires-Dist: Pillow>=9.2.0
|
|
240
243
|
Requires-Dist: defusedxml>=0.7.0
|
|
241
244
|
Requires-Dist: anyascii>=0.3.2
|
|
245
|
+
Requires-Dist: validators>=0.18.0
|
|
242
246
|
Requires-Dist: tqdm>=4.30.0
|
|
243
|
-
Provides-Extra: tf
|
|
244
|
-
Requires-Dist: tensorflow[and-cuda]<3.0.0,>=2.15.0; sys_platform == "linux" and extra == "tf"
|
|
245
|
-
Requires-Dist: tensorflow<3.0.0,>=2.15.0; sys_platform != "linux" and extra == "tf"
|
|
246
|
-
Requires-Dist: tf-keras<3.0.0,>=2.15.0; extra == "tf"
|
|
247
|
-
Requires-Dist: tf2onnx<2.0.0,>=1.16.0; extra == "tf"
|
|
248
|
-
Provides-Extra: torch
|
|
249
|
-
Requires-Dist: torch<3.0.0,>=2.0.0; extra == "torch"
|
|
250
|
-
Requires-Dist: torchvision>=0.15.0; extra == "torch"
|
|
251
|
-
Requires-Dist: onnx<3.0.0,>=1.12.0; extra == "torch"
|
|
252
247
|
Provides-Extra: html
|
|
253
248
|
Requires-Dist: weasyprint>=55.0; extra == "html"
|
|
254
249
|
Provides-Extra: viz
|
|
@@ -276,10 +271,6 @@ Requires-Dist: sphinx-markdown-tables>=0.0.15; extra == "docs"
|
|
|
276
271
|
Requires-Dist: sphinx-tabs>=3.3.0; extra == "docs"
|
|
277
272
|
Requires-Dist: furo>=2022.3.4; extra == "docs"
|
|
278
273
|
Provides-Extra: dev
|
|
279
|
-
Requires-Dist: tensorflow[and-cuda]<3.0.0,>=2.15.0; sys_platform == "linux" and extra == "dev"
|
|
280
|
-
Requires-Dist: tensorflow<3.0.0,>=2.15.0; sys_platform != "linux" and extra == "dev"
|
|
281
|
-
Requires-Dist: tf-keras<3.0.0,>=2.15.0; extra == "dev"
|
|
282
|
-
Requires-Dist: tf2onnx<2.0.0,>=1.16.0; extra == "dev"
|
|
283
274
|
Requires-Dist: torch<3.0.0,>=2.0.0; extra == "dev"
|
|
284
275
|
Requires-Dist: torchvision>=0.15.0; extra == "dev"
|
|
285
276
|
Requires-Dist: onnx<3.0.0,>=1.12.0; extra == "dev"
|
|
@@ -302,15 +293,16 @@ Requires-Dist: recommonmark>=0.7.1; extra == "dev"
|
|
|
302
293
|
Requires-Dist: sphinx-markdown-tables>=0.0.15; extra == "dev"
|
|
303
294
|
Requires-Dist: sphinx-tabs>=3.3.0; extra == "dev"
|
|
304
295
|
Requires-Dist: furo>=2022.3.4; extra == "dev"
|
|
296
|
+
Dynamic: license-file
|
|
305
297
|
|
|
306
298
|
<p align="center">
|
|
307
299
|
<img src="https://github.com/mindee/doctr/raw/main/docs/images/Logo_doctr.gif" width="40%">
|
|
308
300
|
</p>
|
|
309
301
|
|
|
310
|
-
[](https://slack.mindee.com) [](LICENSE)  [](https://github.com/mindee/doctr/pkgs/container/doctr) [](https://codecov.io/gh/mindee/doctr) [](https://www.codefactor.io/repository/github/mindee/doctr) [](https://app.codacy.com/gh/mindee/doctr?utm_source=github.com&utm_medium=referral&utm_content=mindee/doctr&utm_campaign=Badge_Grade) [](https://mindee.github.io/doctr) [](https://slack.mindee.com) [](LICENSE)  [](https://github.com/mindee/doctr/pkgs/container/doctr) [](https://codecov.io/gh/mindee/doctr) [](https://www.codefactor.io/repository/github/mindee/doctr) [](https://app.codacy.com/gh/mindee/doctr?utm_source=github.com&utm_medium=referral&utm_content=mindee/doctr&utm_campaign=Badge_Grade) [](https://mindee.github.io/doctr) [](https://pypi.org/project/python-doctr/) [](https://huggingface.co/spaces/mindee/doctr) [](https://colab.research.google.com/github/mindee/notebooks/blob/main/doctr/quicktour.ipynb) [](https://gurubase.io/g/doctr)
|
|
311
303
|
|
|
312
304
|
|
|
313
|
-
**Optical Character Recognition made seamless & accessible to anyone, powered by
|
|
305
|
+
**Optical Character Recognition made seamless & accessible to anyone, powered by PyTorch**
|
|
314
306
|
|
|
315
307
|
What you can expect from this repository:
|
|
316
308
|
|
|
@@ -450,24 +442,15 @@ You can then install the latest release of the package using [pypi](https://pypi
|
|
|
450
442
|
pip install python-doctr
|
|
451
443
|
```
|
|
452
444
|
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
We try to keep framework-specific dependencies to a minimum. You can install framework-specific builds as follows:
|
|
445
|
+
We try to keep extra dependencies to a minimum. You can install specific builds as follows:
|
|
456
446
|
|
|
457
447
|
```shell
|
|
458
|
-
#
|
|
459
|
-
pip install
|
|
460
|
-
# for PyTorch
|
|
461
|
-
pip install "python-doctr[torch]"
|
|
448
|
+
# standard build
|
|
449
|
+
pip install python-doctr
|
|
462
450
|
# optional dependencies for visualization, html, and contrib modules can be installed as follows:
|
|
463
|
-
pip install "python-doctr[
|
|
451
|
+
pip install "python-doctr[viz,html,contrib]"
|
|
464
452
|
```
|
|
465
453
|
|
|
466
|
-
For MacBooks with M1 chip, you will need some additional packages or specific versions:
|
|
467
|
-
|
|
468
|
-
- TensorFlow 2: [metal plugin](https://developer.apple.com/metal/tensorflow-plugin/)
|
|
469
|
-
- PyTorch: [version >= 2.0.0](https://pytorch.org/get-started/locally/#start-locally)
|
|
470
|
-
|
|
471
454
|
### Developer mode
|
|
472
455
|
|
|
473
456
|
Alternatively, you can install it from source, which will require you to install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git).
|
|
@@ -478,13 +461,10 @@ git clone https://github.com/mindee/doctr.git
|
|
|
478
461
|
pip install -e doctr/.
|
|
479
462
|
```
|
|
480
463
|
|
|
481
|
-
Again, if you prefer to avoid the risk of missing dependencies, you can install the
|
|
464
|
+
Again, if you prefer to avoid the risk of missing dependencies, you can install the build:
|
|
482
465
|
|
|
483
466
|
```shell
|
|
484
|
-
|
|
485
|
-
pip install -e doctr/.[tf]
|
|
486
|
-
# for PyTorch
|
|
487
|
-
pip install -e doctr/.[torch]
|
|
467
|
+
pip install -e doctr/.
|
|
488
468
|
```
|
|
489
469
|
|
|
490
470
|
## Models architectures
|
|
@@ -504,6 +484,7 @@ Credits where it's due: this repository is implementing, among others, architect
|
|
|
504
484
|
- MASTER: [MASTER: Multi-Aspect Non-local Network for Scene Text Recognition](https://arxiv.org/pdf/1910.02562.pdf).
|
|
505
485
|
- ViTSTR: [Vision Transformer for Fast and Efficient Scene Text Recognition](https://arxiv.org/pdf/2105.08582.pdf).
|
|
506
486
|
- PARSeq: [Scene Text Recognition with Permuted Autoregressive Sequence Models](https://arxiv.org/pdf/2207.06966).
|
|
487
|
+
- VIPTR: [A Vision Permutable Extractor for Fast and Efficient Scene Text Recognition](https://arxiv.org/abs/2401.10110).
|
|
507
488
|
|
|
508
489
|
## More goodies
|
|
509
490
|
|
|
@@ -526,20 +507,6 @@ Check it out [) that is required.
|
|
528
509
|
|
|
529
|
-
##### Tensorflow version
|
|
530
|
-
|
|
531
|
-
```shell
|
|
532
|
-
pip install -r demo/tf-requirements.txt
|
|
533
|
-
```
|
|
534
|
-
|
|
535
|
-
Then run your app in your default browser with:
|
|
536
|
-
|
|
537
|
-
```shell
|
|
538
|
-
USE_TF=1 streamlit run demo/app.py
|
|
539
|
-
```
|
|
540
|
-
|
|
541
|
-
##### PyTorch version
|
|
542
|
-
|
|
543
510
|
```shell
|
|
544
511
|
pip install -r demo/pt-requirements.txt
|
|
545
512
|
```
|
|
@@ -547,23 +514,16 @@ pip install -r demo/pt-requirements.txt
|
|
|
547
514
|
Then run your app in your default browser with:
|
|
548
515
|
|
|
549
516
|
```shell
|
|
550
|
-
|
|
517
|
+
streamlit run demo/app.py
|
|
551
518
|
```
|
|
552
519
|
|
|
553
|
-
#### TensorFlow.js
|
|
554
|
-
|
|
555
|
-
Instead of having your demo actually running Python, you would prefer to run everything in your web browser?
|
|
556
|
-
Check out our [TensorFlow.js demo](https://github.com/mindee/doctr-tfjs-demo) to get started!
|
|
557
|
-
|
|
558
|
-

|
|
559
|
-
|
|
560
520
|
### Docker container
|
|
561
521
|
|
|
562
522
|
We offer Docker container support for easy testing and deployment. [Here are the available docker tags.](https://github.com/mindee/doctr/pkgs/container/doctr).
|
|
563
523
|
|
|
564
524
|
#### Using GPU with docTR Docker Images
|
|
565
525
|
|
|
566
|
-
The docTR Docker images are GPU-ready and based on CUDA `12.2`. Make sure your host is **at least `12.2`**, otherwise Torch
|
|
526
|
+
The docTR Docker images are GPU-ready and based on CUDA `12.2`. Make sure your host is **at least `12.2`**, otherwise Torch won't be able to initialize the GPU.
|
|
567
527
|
Please ensure that Docker is configured to use your GPU.
|
|
568
528
|
|
|
569
529
|
To verify and configure GPU support for Docker, please follow the instructions provided in the [NVIDIA Container Toolkit Installation Guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html).
|
|
@@ -578,7 +538,7 @@ docker run -it --gpus all ghcr.io/mindee/doctr:torch-py3.9.18-2024-10 bash
|
|
|
578
538
|
|
|
579
539
|
The Docker images for docTR follow a specific tag nomenclature: `<deps>-py<python_version>-<doctr_version|YYYY-MM>`. Here's a breakdown of the tag structure:
|
|
580
540
|
|
|
581
|
-
- `<deps>`: `
|
|
541
|
+
- `<deps>`: `torch`, `torch-viz-html-contrib`.
|
|
582
542
|
- `<python_version>`: `3.9.18`, `3.10.13` or `3.11.8`.
|
|
583
543
|
- `<doctr_version>`: a tag >= `v0.11.0`
|
|
584
544
|
- `<YYYY-MM>`: e.g. `2014-10`
|
|
@@ -587,7 +547,6 @@ Here are examples of different image tags:
|
|
|
587
547
|
|
|
588
548
|
| Tag | Description |
|
|
589
549
|
|----------------------------|---------------------------------------------------|
|
|
590
|
-
| `tf-py3.10.13-v0.11.0` | TensorFlow version `3.10.13` with docTR `v0.11.0`. |
|
|
591
550
|
| `torch-viz-html-contrib-py3.11.8-2024-10` | Torch with extra dependencies version `3.11.8` from latest commit on `main` in `2024-10`. |
|
|
592
551
|
| `torch-py3.11.8-2024-10`| PyTorch version `3.11.8` from latest commit on `main` in `2024-10`. |
|
|
593
552
|
|
|
@@ -599,10 +558,10 @@ You can also build docTR Docker images locally on your computer.
|
|
|
599
558
|
docker build -t doctr .
|
|
600
559
|
```
|
|
601
560
|
|
|
602
|
-
You can specify custom Python versions and docTR versions using build arguments. For example, to build a docTR image with
|
|
561
|
+
You can specify custom Python versions and docTR versions using build arguments. For example, to build a docTR image with PyTorch, Python version `3.9.10`, and docTR version `v0.7.0`, run the following command:
|
|
603
562
|
|
|
604
563
|
```shell
|
|
605
|
-
docker build -t doctr --build-arg FRAMEWORK=
|
|
564
|
+
docker build -t doctr --build-arg FRAMEWORK=torch --build-arg PYTHON_VERSION=3.9.10 --build-arg DOCTR_VERSION=v0.7.0 .
|
|
606
565
|
```
|
|
607
566
|
|
|
608
567
|
### Example script
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
doctr/__init__.py,sha256=sdqGeYFfPLRsRH54PsedllScz5FD8yWwyekcsOq3JNc,110
|
|
2
|
+
doctr/file_utils.py,sha256=z2R4VoyDW2Y-P5VvrNJQhV8eOQkN9rCbm5khI8iqmHc,999
|
|
3
|
+
doctr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
+
doctr/version.py,sha256=kgXjUv1-JWgHAxO9DO1HCqJL8uFnPENHVFy5p7lbOc8,23
|
|
5
|
+
doctr/contrib/__init__.py,sha256=EKeAGa3TOuJVWR4H_HJvuKO5VPEnJWXW305JMw3ufic,40
|
|
6
|
+
doctr/contrib/artefacts.py,sha256=sIVEx21GIXKCaYG4RVRvXRmjyGkAdTI9O8GB8vIw8kc,5291
|
|
7
|
+
doctr/contrib/base.py,sha256=g0VAg51NZNB5etQe3gZy-7kSm_NLzg26j7Jb4v8lLBk,3273
|
|
8
|
+
doctr/datasets/__init__.py,sha256=QzWe91FU1d3Vi5h4ayPtJUE-Y_SSs_4Xp6MAkZ8XKTc,504
|
|
9
|
+
doctr/datasets/coco_text.py,sha256=VXTnzgUpj3akmVuaUwzRTKc6wvtJGcUVkJeSlj5M3-M,5496
|
|
10
|
+
doctr/datasets/cord.py,sha256=8xPawKP4rgTbxNJRwQT1MychXQWNY2vcZ9awbzgsRsM,5315
|
|
11
|
+
doctr/datasets/detection.py,sha256=CXz_qzMw5SjjhEGtgl83YZIpN1sLXUqDKXm1rAfQNBU,3531
|
|
12
|
+
doctr/datasets/doc_artefacts.py,sha256=h8NtwwZmC_xIlpJyJRi83dVuOFZPF_FhtK1v7f7aXlY,3230
|
|
13
|
+
doctr/datasets/funsd.py,sha256=cbUrLa7fI2AFUzx4HRzHxIio6XOJuiV2RjwK6CQGkgM,4733
|
|
14
|
+
doctr/datasets/ic03.py,sha256=7wtXRQaV_lG_I2sBrP17yCVZ3HegQUQrRaaAUUKlyxM,5564
|
|
15
|
+
doctr/datasets/ic13.py,sha256=LEbSiGDBVywqBM_qJjzz6g9Qmd1hnpi2x_stHMSLz-Q,4528
|
|
16
|
+
doctr/datasets/iiit5k.py,sha256=MeqqoJMlXNDabS04GvQvsdg77g_PmdR2zdzG97acBcQ,4583
|
|
17
|
+
doctr/datasets/iiithws.py,sha256=cgitit-ePAtZFTO6tRZFNua7MuCJuXYzgkgHybqjnzk,2768
|
|
18
|
+
doctr/datasets/imgur5k.py,sha256=o9epJAeZkrNNxs1BXIYlx4_dzrVrxMJgFGEaM_GwNvo,7525
|
|
19
|
+
doctr/datasets/mjsynth.py,sha256=RuVtTjLtIe0tEOF5AibwmMx8Z_hdu3gE04jxQ1iMCz0,4075
|
|
20
|
+
doctr/datasets/ocr.py,sha256=ON447DE1kvXLYwVgTtJ1vNnYo_Rf2drBxShPfhnJ7EY,2523
|
|
21
|
+
doctr/datasets/orientation.py,sha256=HaF2dKyK7kDUFluY5B2mBtxw8nyO9UNOsbP1dDrD8Lk,1091
|
|
22
|
+
doctr/datasets/recognition.py,sha256=RdGw1W_NOlzaVqQlOgGrAB01Z-x9UCJ7dQJdPUp0two,1858
|
|
23
|
+
doctr/datasets/sroie.py,sha256=bKrsvw98zcEfOw5Re0F7-06MzJRv-E73t7BYwmSokoA,4430
|
|
24
|
+
doctr/datasets/svhn.py,sha256=3c7jnAm9JVY3osO-crgQZDNNp1ArTtPHqigBulcQk48,5784
|
|
25
|
+
doctr/datasets/svt.py,sha256=hOzr4QcKGSLj3qn3uG_6PYC3RdTWv9ouUX0JbmlW0qM,5046
|
|
26
|
+
doctr/datasets/synthtext.py,sha256=G-xaVVuv445Kaus0OSA5cvjo0BTPsjB8QxZhFX3wfLA,6301
|
|
27
|
+
doctr/datasets/utils.py,sha256=PDY9kwfqXzVUhy7QCtBF31bGs5boMrxSY-HWAoNSSrY,8106
|
|
28
|
+
doctr/datasets/vocabs.py,sha256=dPhTl7E98cVdcRzA7LGqYcn5bOUN5wrnQMF-wfItfaI,99032
|
|
29
|
+
doctr/datasets/wildreceipt.py,sha256=ipnLArx_CGDmmkegm6rQUxVGHzFF0EzJREcezY0ap7s,5213
|
|
30
|
+
doctr/datasets/datasets/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
31
|
+
doctr/datasets/datasets/base.py,sha256=wxpVVYPLnuPBYFGIR-5vG_FDEzT8wx57xPaUipiM-6c,4826
|
|
32
|
+
doctr/datasets/datasets/pytorch.py,sha256=wS7BkexqpFjIKJXIPMaSsSI8g-XxOexKOazofmIxJoI,2030
|
|
33
|
+
doctr/datasets/generator/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
34
|
+
doctr/datasets/generator/base.py,sha256=xMymMfWhzQwsfpWGG6a7KxPP8T262ilUWrE0OelT90k,5706
|
|
35
|
+
doctr/datasets/generator/pytorch.py,sha256=-6WD696LOS09Caq0ohHlivK22RTK2Ke3YwqAx8n8qQ8,2116
|
|
36
|
+
doctr/io/__init__.py,sha256=kS7tKGFvzxOCWBOun-Y8n9CsziwRKNynjwpZEUUI03M,106
|
|
37
|
+
doctr/io/elements.py,sha256=tdBiILkTeKci7nbbIYxHikf1-GQy5uBHHgc6ZbZbOP4,26474
|
|
38
|
+
doctr/io/html.py,sha256=3wSKH5qD03f2RqxA7D_d8oEBRVdTs4lLVvFZCkR2jrc,698
|
|
39
|
+
doctr/io/pdf.py,sha256=1fbmMKU4XAk4sY-wVES48_5EwMdGz4SiilAFcaPq94M,1308
|
|
40
|
+
doctr/io/reader.py,sha256=yES-J4KVIMqG2X-LHriqLm1R6kyQWcNlOAnfkW9Vjsw,2732
|
|
41
|
+
doctr/io/image/__init__.py,sha256=bJLj2I8OOTYLuTDjdinao0nkOIWQOLbzIuww23EX3gw,43
|
|
42
|
+
doctr/io/image/base.py,sha256=fD6hfJ6myvxo_5LSUVbzOqExK-aSE0qi_l0MY3Pm0fQ,1681
|
|
43
|
+
doctr/io/image/pytorch.py,sha256=ai8KMIzuPd9blcEZrOCzzEExXQ6DPk_DqgU49ihDWLY,3201
|
|
44
|
+
doctr/models/__init__.py,sha256=yn_mXUL8B5L27Uaat1rLGRQHgLR8VLVxzBuPfNuN1YE,124
|
|
45
|
+
doctr/models/_utils.py,sha256=c9gc3zQOADBFXHjWKtE9wqmp6Dh9bhiZqH3O_YPCQ8I,7353
|
|
46
|
+
doctr/models/builder.py,sha256=UKZBuKutxA0iVM7i8owFdKiwQp9KwyC5Hb4Aio0l0hg,20370
|
|
47
|
+
doctr/models/core.py,sha256=CAgymBtEsVAJ70SHdtaBroM67nbNf6osXN6dWC6qAF4,482
|
|
48
|
+
doctr/models/zoo.py,sha256=89E60O61USlQ9a1bTP5SRY2_Gjk4pTvk3TNoCQC4AiI,9276
|
|
49
|
+
doctr/models/classification/__init__.py,sha256=piTKyOGbTiKBzZSAHQLXf622mqjxaAJPicNtH3b-A0k,173
|
|
50
|
+
doctr/models/classification/zoo.py,sha256=5g6zERM7Y-EYPr5GPgkELnIFHATyPVgJKhaZvHCBEbs,4265
|
|
51
|
+
doctr/models/classification/magc_resnet/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
52
|
+
doctr/models/classification/magc_resnet/pytorch.py,sha256=gl1rHozKCWhWyW5KY0RAjrTilbGkg_uHCiHvU-RlQSI,5480
|
|
53
|
+
doctr/models/classification/mobilenet/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
54
|
+
doctr/models/classification/mobilenet/pytorch.py,sha256=H1NwgxV8vni2vYsw2A9xSTnaXdToy4Xy_G-ZcRzC1mg,9826
|
|
55
|
+
doctr/models/classification/predictor/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
56
|
+
doctr/models/classification/predictor/pytorch.py,sha256=-9x4UaRY16lwrFMSpo808IHjXMQTYDryPY-uYnrEZA4,2523
|
|
57
|
+
doctr/models/classification/resnet/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
58
|
+
doctr/models/classification/resnet/pytorch.py,sha256=gmuJ0IfWTZSHgWij2rW81kfO5WjsvKxJ94SLSWjxmyA,13250
|
|
59
|
+
doctr/models/classification/textnet/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
60
|
+
doctr/models/classification/textnet/pytorch.py,sha256=oPt3oBN3HEID-WFlAtgcW7v--1Gov1x1RcktSzrLe40,10424
|
|
61
|
+
doctr/models/classification/vgg/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
62
|
+
doctr/models/classification/vgg/pytorch.py,sha256=LB3T1SALv6RG2P8v5p2R5lrpaGN7xtKCULqudIT5WCc,3679
|
|
63
|
+
doctr/models/classification/vip/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
64
|
+
doctr/models/classification/vip/pytorch.py,sha256=dKexvWprSWdE_y2iViqw-4GXFUnRRm4y0J1Lgme9kuo,16134
|
|
65
|
+
doctr/models/classification/vip/layers/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
66
|
+
doctr/models/classification/vip/layers/pytorch.py,sha256=FmVzFnDMVl1JlgKrJtxSTLmr3Qp3BqKo3eqJNvRMI1Q,21019
|
|
67
|
+
doctr/models/classification/vit/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
68
|
+
doctr/models/classification/vit/pytorch.py,sha256=Cni9o9hw9OZMdi-XfMRA0P7mWQtik9Oaf6DXfh50CEk,6371
|
|
69
|
+
doctr/models/detection/__init__.py,sha256=RqSz5beehLiqhW0PwFLFmCfTyMjofO-0umcQJLDMHjY,105
|
|
70
|
+
doctr/models/detection/core.py,sha256=-fHsDHgCBMi0FenckvJD1-Vwc1N1mdLXtxLIN9IXB50,3444
|
|
71
|
+
doctr/models/detection/zoo.py,sha256=95fHBUJure77kstR-b6gDXfep-kKpXqsgy6bV63diPc,3671
|
|
72
|
+
doctr/models/detection/_utils/__init__.py,sha256=bJLj2I8OOTYLuTDjdinao0nkOIWQOLbzIuww23EX3gw,43
|
|
73
|
+
doctr/models/detection/_utils/base.py,sha256=fi8XLUVcWG4J-dhxHeTuB6gYL_brt02T0HIP0TVR8jQ,2475
|
|
74
|
+
doctr/models/detection/_utils/pytorch.py,sha256=81AlwGUZGmqCF18Svh_Mwm96-MPXYg-iR6xHXCEE3u0,1021
|
|
75
|
+
doctr/models/detection/differentiable_binarization/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
76
|
+
doctr/models/detection/differentiable_binarization/base.py,sha256=1NNEPt8rlTZrmYNYUhZj56ex9BQVTW6eQbSYDLh2oUM,15788
|
|
77
|
+
doctr/models/detection/differentiable_binarization/pytorch.py,sha256=MBa3nLufNQywn7GMQl0RHQd-ZMZzKNYjZXzzKA9QmSc,16453
|
|
78
|
+
doctr/models/detection/fast/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
79
|
+
doctr/models/detection/fast/base.py,sha256=714-NSdvjjGz3YsdURRHPzSV-K6DJO0F0UygyAUjKBM,10225
|
|
80
|
+
doctr/models/detection/fast/pytorch.py,sha256=i0UQMOn1g_YtF3WVWWyYAANQqiOBl1r837rUaNaA0ws,16702
|
|
81
|
+
doctr/models/detection/linknet/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
82
|
+
doctr/models/detection/linknet/base.py,sha256=avY0YeCv8-IL0TiDmxaI04UdUZY5dTmDn3TbM0uEVLM,10007
|
|
83
|
+
doctr/models/detection/linknet/pytorch.py,sha256=nEV06dEFzSlFw517EUpFZ_bF8oU6xKxjyWdgbzU9c74,14333
|
|
84
|
+
doctr/models/detection/predictor/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
85
|
+
doctr/models/detection/predictor/pytorch.py,sha256=UyxTh3SPXGohU7oMu11xnwiOg-xD3HOkMlIz48cgA5M,2627
|
|
86
|
+
doctr/models/factory/__init__.py,sha256=cKPoH2V2157lLMTR2zsljG3_IQHziodqR-XK_LG0D_I,19
|
|
87
|
+
doctr/models/factory/hub.py,sha256=XYCdnwD2zThM2ZHmz0EexCEL5Ag4IdvbXxFhaQyptLM,6893
|
|
88
|
+
doctr/models/kie_predictor/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
89
|
+
doctr/models/kie_predictor/base.py,sha256=c8mmLJMLlpnVb5GNDP3hm-oefZ42fbk2dkmKMYVf8Gc,2291
|
|
90
|
+
doctr/models/kie_predictor/pytorch.py,sha256=2-iF5a0VUGg-PKifyhxX8F3BruL68ZW2GpQ5OpzagXU,7704
|
|
91
|
+
doctr/models/modules/__init__.py,sha256=pouP7obVTu4p6aHkyaqa1yHKbynpvT0Hgo-LO_1U2R4,83
|
|
92
|
+
doctr/models/modules/layers/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
93
|
+
doctr/models/modules/layers/pytorch.py,sha256=uH-plEGsdm4BzQ6iuIiWZhJnnYovwUO6fnu7FzX8SaI,8667
|
|
94
|
+
doctr/models/modules/transformer/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
95
|
+
doctr/models/modules/transformer/pytorch.py,sha256=MiJMXMbotK9pAl1ITtitGjho86dxqnQBH9SfEjwJqHs,7657
|
|
96
|
+
doctr/models/modules/vision_transformer/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
97
|
+
doctr/models/modules/vision_transformer/pytorch.py,sha256=Tpl-zdePOXj_uHJvnfsg-IAZ5cb3klE2rvJKugppjaI,3943
|
|
98
|
+
doctr/models/predictor/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
99
|
+
doctr/models/predictor/base.py,sha256=73-otJgr4FVEONv2aijMxv1hcp2qd0lAUMgRlqmdkKQ,8541
|
|
100
|
+
doctr/models/predictor/pytorch.py,sha256=_fL3sZTMh_MWzdgzJOf6y2Rv31GKzw0s2Av-H2bDICY,6241
|
|
101
|
+
doctr/models/preprocessor/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
102
|
+
doctr/models/preprocessor/pytorch.py,sha256=m5UHLKXh6n0gp1NAIWe05IYZbhYXOK9-yjU7jRMLVvw,4395
|
|
103
|
+
doctr/models/recognition/__init__.py,sha256=bgAvbwjO14Z2RQFD2XKZcSYJNsyxPa96SGHmX_nqbAQ,145
|
|
104
|
+
doctr/models/recognition/core.py,sha256=VLfNOFc9lx6YOLCOK3f7lRuIS9lMXDSvs6je3FmfaZ0,1524
|
|
105
|
+
doctr/models/recognition/utils.py,sha256=W5zwcv7bIjg_Mr-cH_x6JBe9brCBQvUB1IZNFiN6T-Y,3739
|
|
106
|
+
doctr/models/recognition/zoo.py,sha256=j4lRRFlBtSqQl5C0-jrZPJWFspuAPo81GKhQ06tfEWc,2991
|
|
107
|
+
doctr/models/recognition/crnn/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
108
|
+
doctr/models/recognition/crnn/pytorch.py,sha256=mZsoRVvCRL6pZaXkyjtPTMemXVHpEwEqPkjXDRa6wb4,12234
|
|
109
|
+
doctr/models/recognition/master/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
110
|
+
doctr/models/recognition/master/base.py,sha256=SFo8AyoSC8sCkelPiVXB6dBVcdAmZVObCzzel0_RXM4,1471
|
|
111
|
+
doctr/models/recognition/master/pytorch.py,sha256=taUk6_LJd4FHpXLhe3uO1eEIYzra4OyoGynWPwQ8Ohg,12756
|
|
112
|
+
doctr/models/recognition/parseq/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
113
|
+
doctr/models/recognition/parseq/base.py,sha256=C-J0mm1kWmiVH3X_A1GZizmtqzVYzkFb8Z2bb9e5uww,1465
|
|
114
|
+
doctr/models/recognition/parseq/pytorch.py,sha256=FlYL89YCKEgWjDbOeZSzH03ufCRX3EqY6k6exNUmNrM,20801
|
|
115
|
+
doctr/models/recognition/predictor/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
116
|
+
doctr/models/recognition/predictor/_utils.py,sha256=qRuVsPECmuokPWbbM4JjKR1L8VHBbk0w8LSyYMGdYfA,4695
|
|
117
|
+
doctr/models/recognition/predictor/pytorch.py,sha256=96FIIQl1ZSQMvx6a1Q6HQnvPNNC9T7rusP2SegO3hQo,2753
|
|
118
|
+
doctr/models/recognition/sar/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
119
|
+
doctr/models/recognition/sar/pytorch.py,sha256=cYqNvOIE0tESKrZ-J1mStbf7YxKUpht0GHQgYnhrbJQ,15639
|
|
120
|
+
doctr/models/recognition/viptr/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
121
|
+
doctr/models/recognition/viptr/pytorch.py,sha256=dQP3s76kmSx8M7EQ0A5hoDYB8MMsaaAw2m5cNTRDqvo,9355
|
|
122
|
+
doctr/models/recognition/vitstr/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
123
|
+
doctr/models/recognition/vitstr/base.py,sha256=5ZjYF0WPgH5sSSaoHeoiw4Os2IZey-souszvkYKgzV8,1419
|
|
124
|
+
doctr/models/recognition/vitstr/pytorch.py,sha256=N_7YShzNGNjSS64Uj8b9rbOIwBvVkbqMRitMR5kiwAI,10080
|
|
125
|
+
doctr/models/utils/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
126
|
+
doctr/models/utils/pytorch.py,sha256=8ENZmoOmyTi4X1fOgcEo7_RnfWIvSrh8TkELYB7zv04,5782
|
|
127
|
+
doctr/transforms/__init__.py,sha256=0VHbvUZ7llFd1e_7_JdWTaxYMCvaR6KbUJaYJequmQI,23
|
|
128
|
+
doctr/transforms/functional/__init__.py,sha256=hxLu7FmRYn01ysoOQalrMa5K_qg4rLNf8OAuoemzd6I,23
|
|
129
|
+
doctr/transforms/functional/base.py,sha256=K3taCHB1WGIWJ1FE47VPONZFPr2iAU1wgy6S8fRuODA,6842
|
|
130
|
+
doctr/transforms/functional/pytorch.py,sha256=_imUaMkfk4-9L7ipZ1FpNweWApD9Q0Hup10_Ixm8V5A,5102
|
|
131
|
+
doctr/transforms/modules/__init__.py,sha256=bJLj2I8OOTYLuTDjdinao0nkOIWQOLbzIuww23EX3gw,43
|
|
132
|
+
doctr/transforms/modules/base.py,sha256=M8K-g4Q1KKZYJA52QKloE8vR-DE2wK4RP8rhKjFnfm0,7678
|
|
133
|
+
doctr/transforms/modules/pytorch.py,sha256=IiK4AKvKGIhMKe1G3dJzGuINbLnSA3AX0dcN2Q8VId4,11802
|
|
134
|
+
doctr/utils/__init__.py,sha256=uQY9ibZ24V896fmihIsK23QOIZdKtk0HyKoCVJ_lLuM,95
|
|
135
|
+
doctr/utils/common_types.py,sha256=ebBlz_61A2gUp_CD2R_8o1pJ5CpCSJimJWzAyGas1uQ,534
|
|
136
|
+
doctr/utils/data.py,sha256=2yv95B2t9b5Hd-SGgEWGqe8ooBrrl-k-kxBd8Ig0SuM,4188
|
|
137
|
+
doctr/utils/fonts.py,sha256=1vEakG5wfOe-XmsXC63Pi_nox6XZaoHcDsJPjafGo-Q,1265
|
|
138
|
+
doctr/utils/geometry.py,sha256=-oKDdCYq7MRYms8kp1ui_rx7HrmhYZhmglwgtCE_DG0,18678
|
|
139
|
+
doctr/utils/metrics.py,sha256=u2d9nz5NY3gHKpmj5DlcK9OjMlB6zwoZrr-vxVntvp4,20249
|
|
140
|
+
doctr/utils/multithreading.py,sha256=FinxQSGRpMs6WL8paixeOGd5CNAkG2VYqC-5d1izYBE,1972
|
|
141
|
+
doctr/utils/reconstitution.py,sha256=mWEGVPhtxnVDKbUDr8E1tad4HaGISGC3DenJvm8I0uM,7256
|
|
142
|
+
doctr/utils/repr.py,sha256=zw4vLHFIqpTGqsEFosGLkM7aLAjy6RLu_yUz5SyI_YA,2087
|
|
143
|
+
doctr/utils/visualization.py,sha256=fUnmwjIUsh8rfqUWf-eDAycK5RAVO28r0krc0gq96Z0,13118
|
|
144
|
+
python_doctr-1.0.0.dist-info/licenses/LICENSE,sha256=75RTSsXOsAYhGpxsHc9U41ep6GS7vrUPufeekgoeOXM,11336
|
|
145
|
+
python_doctr-1.0.0.dist-info/METADATA,sha256=JlKh78_1zbnvhgyLvlwDd6-U_c1fGu59JG-2qleSugI,32161
|
|
146
|
+
python_doctr-1.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
147
|
+
python_doctr-1.0.0.dist-info/top_level.txt,sha256=lCgp4pmjPI3HYph62XhfzA3jRwM715kGtJPmqIUJ9t8,6
|
|
148
|
+
python_doctr-1.0.0.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
149
|
+
python_doctr-1.0.0.dist-info/RECORD,,
|
|
@@ -1,59 +0,0 @@
|
|
|
1
|
-
# Copyright (C) 2021-2025, Mindee.
|
|
2
|
-
|
|
3
|
-
# This program is licensed under the Apache License 2.0.
|
|
4
|
-
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
|
|
5
|
-
|
|
6
|
-
import os
|
|
7
|
-
from copy import deepcopy
|
|
8
|
-
from typing import Any
|
|
9
|
-
|
|
10
|
-
import numpy as np
|
|
11
|
-
import tensorflow as tf
|
|
12
|
-
|
|
13
|
-
from doctr.io import read_img_as_tensor, tensor_from_numpy
|
|
14
|
-
|
|
15
|
-
from .base import _AbstractDataset, _VisionDataset
|
|
16
|
-
|
|
17
|
-
__all__ = ["AbstractDataset", "VisionDataset"]
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
class AbstractDataset(_AbstractDataset):
|
|
21
|
-
"""Abstract class for all datasets"""
|
|
22
|
-
|
|
23
|
-
def _read_sample(self, index: int) -> tuple[tf.Tensor, Any]:
|
|
24
|
-
img_name, target = self.data[index]
|
|
25
|
-
|
|
26
|
-
# Check target
|
|
27
|
-
if isinstance(target, dict):
|
|
28
|
-
assert "boxes" in target, "Target should contain 'boxes' key"
|
|
29
|
-
assert "labels" in target, "Target should contain 'labels' key"
|
|
30
|
-
elif isinstance(target, tuple):
|
|
31
|
-
assert len(target) == 2
|
|
32
|
-
assert isinstance(target[0], str) or isinstance(target[0], np.ndarray), (
|
|
33
|
-
"first element of the tuple should be a string or a numpy array"
|
|
34
|
-
)
|
|
35
|
-
assert isinstance(target[1], list), "second element of the tuple should be a list"
|
|
36
|
-
else:
|
|
37
|
-
assert isinstance(target, str) or isinstance(target, np.ndarray), (
|
|
38
|
-
"Target should be a string or a numpy array"
|
|
39
|
-
)
|
|
40
|
-
|
|
41
|
-
# Read image
|
|
42
|
-
img = (
|
|
43
|
-
tensor_from_numpy(img_name, dtype=tf.float32)
|
|
44
|
-
if isinstance(img_name, np.ndarray)
|
|
45
|
-
else read_img_as_tensor(os.path.join(self.root, img_name), dtype=tf.float32)
|
|
46
|
-
)
|
|
47
|
-
|
|
48
|
-
return img, deepcopy(target)
|
|
49
|
-
|
|
50
|
-
@staticmethod
|
|
51
|
-
def collate_fn(samples: list[tuple[tf.Tensor, Any]]) -> tuple[tf.Tensor, list[Any]]:
|
|
52
|
-
images, targets = zip(*samples)
|
|
53
|
-
images = tf.stack(images, axis=0)
|
|
54
|
-
|
|
55
|
-
return images, list(targets)
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
class VisionDataset(AbstractDataset, _VisionDataset): # noqa: D101
|
|
59
|
-
pass
|
|
@@ -1,58 +0,0 @@
|
|
|
1
|
-
# Copyright (C) 2021-2025, Mindee.
|
|
2
|
-
|
|
3
|
-
# This program is licensed under the Apache License 2.0.
|
|
4
|
-
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
|
|
5
|
-
|
|
6
|
-
import tensorflow as tf
|
|
7
|
-
|
|
8
|
-
from .base import _CharacterGenerator, _WordGenerator
|
|
9
|
-
|
|
10
|
-
__all__ = ["CharacterGenerator", "WordGenerator"]
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
class CharacterGenerator(_CharacterGenerator):
|
|
14
|
-
"""Implements a character image generation dataset
|
|
15
|
-
|
|
16
|
-
>>> from doctr.datasets import CharacterGenerator
|
|
17
|
-
>>> ds = CharacterGenerator(vocab='abdef', num_samples=100)
|
|
18
|
-
>>> img, target = ds[0]
|
|
19
|
-
|
|
20
|
-
Args:
|
|
21
|
-
vocab: vocabulary to take the character from
|
|
22
|
-
num_samples: number of samples that will be generated iterating over the dataset
|
|
23
|
-
cache_samples: whether generated images should be cached firsthand
|
|
24
|
-
font_family: font to use to generate the text images
|
|
25
|
-
img_transforms: composable transformations that will be applied to each image
|
|
26
|
-
sample_transforms: composable transformations that will be applied to both the image and the target
|
|
27
|
-
"""
|
|
28
|
-
|
|
29
|
-
def __init__(self, *args, **kwargs) -> None:
|
|
30
|
-
super().__init__(*args, **kwargs)
|
|
31
|
-
|
|
32
|
-
@staticmethod
|
|
33
|
-
def collate_fn(samples):
|
|
34
|
-
images, targets = zip(*samples)
|
|
35
|
-
images = tf.stack(images, axis=0)
|
|
36
|
-
|
|
37
|
-
return images, tf.convert_to_tensor(targets)
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
class WordGenerator(_WordGenerator):
|
|
41
|
-
"""Implements a character image generation dataset
|
|
42
|
-
|
|
43
|
-
>>> from doctr.datasets import WordGenerator
|
|
44
|
-
>>> ds = WordGenerator(vocab='abdef', min_chars=1, max_chars=32, num_samples=100)
|
|
45
|
-
>>> img, target = ds[0]
|
|
46
|
-
|
|
47
|
-
Args:
|
|
48
|
-
vocab: vocabulary to take the character from
|
|
49
|
-
min_chars: minimum number of characters in a word
|
|
50
|
-
max_chars: maximum number of characters in a word
|
|
51
|
-
num_samples: number of samples that will be generated iterating over the dataset
|
|
52
|
-
cache_samples: whether generated images should be cached firsthand
|
|
53
|
-
font_family: font to use to generate the text images
|
|
54
|
-
img_transforms: composable transformations that will be applied to each image
|
|
55
|
-
sample_transforms: composable transformations that will be applied to both the image and the target
|
|
56
|
-
"""
|
|
57
|
-
|
|
58
|
-
pass
|
doctr/datasets/loader.py
DELETED
|
@@ -1,94 +0,0 @@
|
|
|
1
|
-
# Copyright (C) 2021-2025, Mindee.
|
|
2
|
-
|
|
3
|
-
# This program is licensed under the Apache License 2.0.
|
|
4
|
-
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
|
|
5
|
-
|
|
6
|
-
import math
|
|
7
|
-
from collections.abc import Callable
|
|
8
|
-
|
|
9
|
-
import numpy as np
|
|
10
|
-
import tensorflow as tf
|
|
11
|
-
|
|
12
|
-
__all__ = ["DataLoader"]
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
def default_collate(samples):
|
|
16
|
-
"""Collate multiple elements into batches
|
|
17
|
-
|
|
18
|
-
Args:
|
|
19
|
-
samples: list of N tuples containing M elements
|
|
20
|
-
|
|
21
|
-
Returns:
|
|
22
|
-
tuple of M sequences contianing N elements each
|
|
23
|
-
"""
|
|
24
|
-
batch_data = zip(*samples)
|
|
25
|
-
|
|
26
|
-
tf_data = tuple(tf.stack(elt, axis=0) for elt in batch_data)
|
|
27
|
-
|
|
28
|
-
return tf_data
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
class DataLoader:
|
|
32
|
-
"""Implements a dataset wrapper for fast data loading
|
|
33
|
-
|
|
34
|
-
>>> from doctr.datasets import CORD, DataLoader
|
|
35
|
-
>>> train_set = CORD(train=True, download=True)
|
|
36
|
-
>>> train_loader = DataLoader(train_set, batch_size=32)
|
|
37
|
-
>>> train_iter = iter(train_loader)
|
|
38
|
-
>>> images, targets = next(train_iter)
|
|
39
|
-
|
|
40
|
-
Args:
|
|
41
|
-
dataset: the dataset
|
|
42
|
-
shuffle: whether the samples should be shuffled before passing it to the iterator
|
|
43
|
-
batch_size: number of elements in each batch
|
|
44
|
-
drop_last: if `True`, drops the last batch if it isn't full
|
|
45
|
-
collate_fn: function to merge samples into a batch
|
|
46
|
-
"""
|
|
47
|
-
|
|
48
|
-
def __init__(
|
|
49
|
-
self,
|
|
50
|
-
dataset,
|
|
51
|
-
shuffle: bool = True,
|
|
52
|
-
batch_size: int = 1,
|
|
53
|
-
drop_last: bool = False,
|
|
54
|
-
collate_fn: Callable | None = None,
|
|
55
|
-
) -> None:
|
|
56
|
-
self.dataset = dataset
|
|
57
|
-
self.shuffle = shuffle
|
|
58
|
-
self.batch_size = batch_size
|
|
59
|
-
nb = len(self.dataset) / batch_size
|
|
60
|
-
self.num_batches = math.floor(nb) if drop_last else math.ceil(nb)
|
|
61
|
-
if collate_fn is None:
|
|
62
|
-
self.collate_fn = self.dataset.collate_fn if hasattr(self.dataset, "collate_fn") else default_collate
|
|
63
|
-
else:
|
|
64
|
-
self.collate_fn = collate_fn
|
|
65
|
-
self.reset()
|
|
66
|
-
|
|
67
|
-
def __len__(self) -> int:
|
|
68
|
-
return self.num_batches
|
|
69
|
-
|
|
70
|
-
def reset(self) -> None:
|
|
71
|
-
# Updates indices after each epoch
|
|
72
|
-
self._num_yielded = 0
|
|
73
|
-
self.indices = np.arange(len(self.dataset))
|
|
74
|
-
if self.shuffle is True:
|
|
75
|
-
np.random.shuffle(self.indices)
|
|
76
|
-
|
|
77
|
-
def __iter__(self):
|
|
78
|
-
self.reset()
|
|
79
|
-
return self
|
|
80
|
-
|
|
81
|
-
def __next__(self):
|
|
82
|
-
if self._num_yielded < self.num_batches:
|
|
83
|
-
# Get next indices
|
|
84
|
-
idx = self._num_yielded * self.batch_size
|
|
85
|
-
indices = self.indices[idx : min(len(self.dataset), idx + self.batch_size)]
|
|
86
|
-
|
|
87
|
-
samples = list(map(self.dataset.__getitem__, indices))
|
|
88
|
-
|
|
89
|
-
batch_data = self.collate_fn(samples)
|
|
90
|
-
|
|
91
|
-
self._num_yielded += 1
|
|
92
|
-
return batch_data
|
|
93
|
-
else:
|
|
94
|
-
raise StopIteration
|