docling-eval 0.4.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- docling_eval-0.4.0/LICENSE +21 -0
- docling_eval-0.4.0/PKG-INFO +134 -0
- docling_eval-0.4.0/README.md +81 -0
- docling_eval-0.4.0/docling_eval/__init__.py +0 -0
- docling_eval-0.4.0/docling_eval/aggregations/consolidator.py +276 -0
- docling_eval-0.4.0/docling_eval/aggregations/multi_evalutor.py +433 -0
- docling_eval-0.4.0/docling_eval/cli/__init__.py +0 -0
- docling_eval-0.4.0/docling_eval/cli/main.py +1139 -0
- docling_eval-0.4.0/docling_eval/datamodels/__init__.py +0 -0
- docling_eval-0.4.0/docling_eval/datamodels/cvat_types.py +242 -0
- docling_eval-0.4.0/docling_eval/datamodels/dataset_record.py +321 -0
- docling_eval-0.4.0/docling_eval/datamodels/types.py +133 -0
- docling_eval-0.4.0/docling_eval/dataset_builders/__init__.py +0 -0
- docling_eval-0.4.0/docling_eval/dataset_builders/cvat_dataset_builder.py +1486 -0
- docling_eval-0.4.0/docling_eval/dataset_builders/cvat_preannotation_builder.py +824 -0
- docling_eval-0.4.0/docling_eval/dataset_builders/dataset_builder.py +349 -0
- docling_eval-0.4.0/docling_eval/dataset_builders/doclaynet_v1_builder.py +391 -0
- docling_eval-0.4.0/docling_eval/dataset_builders/doclaynet_v2_builder.py +716 -0
- docling_eval-0.4.0/docling_eval/dataset_builders/doclingdpbench_builder.py +103 -0
- docling_eval-0.4.0/docling_eval/dataset_builders/docvqa_builder.py +247 -0
- docling_eval-0.4.0/docling_eval/dataset_builders/dpbench_builder.py +361 -0
- docling_eval-0.4.0/docling_eval/dataset_builders/file_dataset_builder.py +161 -0
- docling_eval-0.4.0/docling_eval/dataset_builders/funsd_builder.py +428 -0
- docling_eval-0.4.0/docling_eval/dataset_builders/omnidocbench_builder.py +421 -0
- docling_eval-0.4.0/docling_eval/dataset_builders/otsl_table_dataset_builder.py +343 -0
- docling_eval-0.4.0/docling_eval/dataset_builders/pixparse_builder.py +200 -0
- docling_eval-0.4.0/docling_eval/dataset_builders/xfund_builder.py +435 -0
- docling_eval-0.4.0/docling_eval/evaluators/__init__.py +0 -0
- docling_eval-0.4.0/docling_eval/evaluators/adapters.py +64 -0
- docling_eval-0.4.0/docling_eval/evaluators/base_evaluator.py +137 -0
- docling_eval-0.4.0/docling_eval/evaluators/bbox_text_evaluator.py +327 -0
- docling_eval-0.4.0/docling_eval/evaluators/layout_evaluator.py +779 -0
- docling_eval-0.4.0/docling_eval/evaluators/markdown_text_evaluator.py +268 -0
- docling_eval-0.4.0/docling_eval/evaluators/ocr_evaluator.py +162 -0
- docling_eval-0.4.0/docling_eval/evaluators/readingorder_evaluator.py +498 -0
- docling_eval-0.4.0/docling_eval/evaluators/stats.py +97 -0
- docling_eval-0.4.0/docling_eval/evaluators/table_evaluator.py +324 -0
- docling_eval-0.4.0/docling_eval/evaluators/teds.py +129 -0
- docling_eval-0.4.0/docling_eval/evaluators/timings_evaluator.py +130 -0
- docling_eval-0.4.0/docling_eval/legacy/__init__.py +0 -0
- docling_eval-0.4.0/docling_eval/legacy/conversion.py +134 -0
- docling_eval-0.4.0/docling_eval/prediction_providers/__init__.py +0 -0
- docling_eval-0.4.0/docling_eval/prediction_providers/aws_prediction_provider.py +585 -0
- docling_eval-0.4.0/docling_eval/prediction_providers/azure_prediction_provider.py +411 -0
- docling_eval-0.4.0/docling_eval/prediction_providers/base_prediction_provider.py +418 -0
- docling_eval-0.4.0/docling_eval/prediction_providers/docling_provider.py +149 -0
- docling_eval-0.4.0/docling_eval/prediction_providers/file_provider.py +294 -0
- docling_eval-0.4.0/docling_eval/prediction_providers/google_prediction_provider.py +411 -0
- docling_eval-0.4.0/docling_eval/prediction_providers/tableformer_provider.py +477 -0
- docling_eval-0.4.0/docling_eval/py.typed +0 -0
- docling_eval-0.4.0/docling_eval/utils/__init__.py +0 -0
- docling_eval-0.4.0/docling_eval/utils/utils.py +626 -0
- docling_eval-0.4.0/docling_eval/visualisation/constants.py +454 -0
- docling_eval-0.4.0/docling_eval/visualisation/visualisations.py +226 -0
- docling_eval-0.4.0/docling_eval.egg-info/PKG-INFO +134 -0
- docling_eval-0.4.0/docling_eval.egg-info/SOURCES.txt +83 -0
- docling_eval-0.4.0/docling_eval.egg-info/dependency_links.txt +1 -0
- docling_eval-0.4.0/docling_eval.egg-info/entry_points.txt +2 -0
- docling_eval-0.4.0/docling_eval.egg-info/requires.txt +31 -0
- docling_eval-0.4.0/docling_eval.egg-info/top_level.txt +1 -0
- docling_eval-0.4.0/pyproject.toml +153 -0
- docling_eval-0.4.0/setup.cfg +4 -0
- docling_eval-0.4.0/tests/test_bboxtext_evaluator.py +33 -0
- docling_eval-0.4.0/tests/test_consolidator.py +27 -0
- docling_eval-0.4.0/tests/test_cvat.py +235 -0
- docling_eval-0.4.0/tests/test_dataset_builder.py +603 -0
- docling_eval-0.4.0/tests/test_layout_aws.py +99 -0
- docling_eval-0.4.0/tests/test_layout_azure.py +102 -0
- docling_eval-0.4.0/tests/test_layout_evaluator.py +59 -0
- docling_eval-0.4.0/tests/test_markdown_aws.py +103 -0
- docling_eval-0.4.0/tests/test_markdown_azure.py +102 -0
- docling_eval-0.4.0/tests/test_markdown_text_evaluator.py +38 -0
- docling_eval-0.4.0/tests/test_multi_evaluator.py +86 -0
- docling_eval-0.4.0/tests/test_ocr_aws.py +57 -0
- docling_eval-0.4.0/tests/test_ocr_azure.py +58 -0
- docling_eval-0.4.0/tests/test_ocr_evaluator.py +19 -0
- docling_eval-0.4.0/tests/test_ocr_google.py +100 -0
- docling_eval-0.4.0/tests/test_readingorder_aws.py +99 -0
- docling_eval-0.4.0/tests/test_readingorder_azure.py +99 -0
- docling_eval-0.4.0/tests/test_readingorder_evaluator.py +33 -0
- docling_eval-0.4.0/tests/test_s3source.py +99 -0
- docling_eval-0.4.0/tests/test_table_evaluator.py +324 -0
- docling_eval-0.4.0/tests/test_tables_aws.py +178 -0
- docling_eval-0.4.0/tests/test_tables_azure.py +179 -0
- docling_eval-0.4.0/tests/test_tables_google.py +58 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024 International Business Machines
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: docling-eval
|
|
3
|
+
Version: 0.4.0
|
|
4
|
+
Summary: Evaluation of Docling
|
|
5
|
+
Author-email: Peter Staar <taa@zurich.ibm.com>, Panos Vagenas <pva@zurich.ibm.com>, Ahmed Nassar <ahn@zurich.ibm.com>, Nikos Livathinos <nli@zurich.ibm.com>, Maxim Lysak <mly@zurich.ibm.com>, Michele Dolfi <dol@zurich.ibm.com>, Christoph Auer <cau@zurich.ibm.com>
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: homepage, https://github.com/docling-project/docling-eval
|
|
8
|
+
Project-URL: repository, https://github.com/docling-project/docling-eval
|
|
9
|
+
Project-URL: issues, https://github.com/docling-project/docling-eval/issues
|
|
10
|
+
Project-URL: changelog, https://github.com/docling-project/docling-eval/blob/main/CHANGELOG.md
|
|
11
|
+
Keywords: docling,evaluation,convert,document,pdf,docx,html,markdown,layout model,segmentation,table structure,table former
|
|
12
|
+
Classifier: Operating System :: MacOS :: MacOS X
|
|
13
|
+
Classifier: Operating System :: POSIX :: Linux
|
|
14
|
+
Classifier: Development Status :: 5 - Production/Stable
|
|
15
|
+
Classifier: Intended Audience :: Developers
|
|
16
|
+
Classifier: Intended Audience :: Science/Research
|
|
17
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
18
|
+
Classifier: Programming Language :: Python :: 3
|
|
19
|
+
Requires-Python: <4.0,>=3.10
|
|
20
|
+
Description-Content-Type: text/markdown
|
|
21
|
+
License-File: LICENSE
|
|
22
|
+
Requires-Dist: docling[vlm]<3.0.0,>=2.31.0
|
|
23
|
+
Requires-Dist: docling-core<3.0.0,>=2.30.1
|
|
24
|
+
Requires-Dist: pydantic<3.0.0,>=2.0.0
|
|
25
|
+
Requires-Dist: lxml<6.0.0,>=5.3.0
|
|
26
|
+
Requires-Dist: datasets<4.0.0,>=3.2.0
|
|
27
|
+
Requires-Dist: apted<2.0.0,>=1.0.3
|
|
28
|
+
Requires-Dist: Distance<0.2.0,>=0.1.3
|
|
29
|
+
Requires-Dist: matplotlib<4.0.0,>=3.10.0
|
|
30
|
+
Requires-Dist: torch<3.0.0,>=2.5.1
|
|
31
|
+
Requires-Dist: torchmetrics<2.0.0,>=1.6.0
|
|
32
|
+
Requires-Dist: pycocotools<3.0.0,>=2.0.8
|
|
33
|
+
Requires-Dist: tabulate<0.10.0,>=0.9.0
|
|
34
|
+
Requires-Dist: tqdm<5.0.0,>=4.67.1
|
|
35
|
+
Requires-Dist: pillow<11.0.0,>=10.3.0
|
|
36
|
+
Requires-Dist: evaluate<0.5.0,>=0.4.3
|
|
37
|
+
Requires-Dist: nltk<4.0.0,>=3.9.1
|
|
38
|
+
Requires-Dist: ibm-cos-sdk<3.0.0,>=2.1.40
|
|
39
|
+
Requires-Dist: beautifulsoup4<5.0.0,>=4.12.3
|
|
40
|
+
Requires-Dist: jiwer<4.0.0,>=3.1.0
|
|
41
|
+
Requires-Dist: urllib3<2.0.0,>=1.24.2
|
|
42
|
+
Requires-Dist: pandas<3.0.0,>=2.2.3
|
|
43
|
+
Requires-Dist: openpyxl<4.0.0,>=3.1.5
|
|
44
|
+
Requires-Dist: pypdf2<4.0.0,>=3.0.1
|
|
45
|
+
Requires-Dist: xmltodict<0.15.0,>=0.14.2
|
|
46
|
+
Provides-Extra: hyperscalers
|
|
47
|
+
Requires-Dist: azure-ai-documentintelligence<2.0.0,>=1.0.2; extra == "hyperscalers"
|
|
48
|
+
Requires-Dist: azure-common<2.0.0,>=1.1.28; extra == "hyperscalers"
|
|
49
|
+
Requires-Dist: azure-core<2.0.0,>=1.33.0; extra == "hyperscalers"
|
|
50
|
+
Requires-Dist: boto3<2.0.0,>=1.37.8; extra == "hyperscalers"
|
|
51
|
+
Requires-Dist: google-cloud-documentai<4.0.0,>=3.2.0; extra == "hyperscalers"
|
|
52
|
+
Dynamic: license-file
|
|
53
|
+
|
|
54
|
+
<p align="center">
|
|
55
|
+
<a href="https://github.com/docling-project/docling-eval">
|
|
56
|
+
<img loading="lazy" alt="Docling" src="docs/assets/docling-eval-pic.png" width="40%"/>
|
|
57
|
+
</a>
|
|
58
|
+
</p>
|
|
59
|
+
|
|
60
|
+
# Docling-eval
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
[](https://arxiv.org/abs/2408.09869)
|
|
64
|
+
[](https://pypi.org/project/docling-eval/)
|
|
65
|
+
[](https://pypi.org/project/docling-eval/)
|
|
66
|
+
[](https://github.com/astral-sh/uv)
|
|
67
|
+
[](https://github.com/psf/black)
|
|
68
|
+
[](https://pycqa.github.io/isort/)
|
|
69
|
+
[](https://pydantic.dev)
|
|
70
|
+
[](https://github.com/pre-commit/pre-commit)
|
|
71
|
+
[](https://opensource.org/licenses/MIT)
|
|
72
|
+
|
|
73
|
+
Evaluate [Docling](https://github.com/docling-project/docling) on various datasets.
|
|
74
|
+
|
|
75
|
+
## Features
|
|
76
|
+
|
|
77
|
+
Evaluate docling on various datasets. You can use the cli
|
|
78
|
+
|
|
79
|
+
```shell
|
|
80
|
+
terminal %> docling-eval --help
|
|
81
|
+
|
|
82
|
+
Usage: docling_eval [OPTIONS] COMMAND [ARGS]...
|
|
83
|
+
|
|
84
|
+
Docling Evaluation CLI for benchmarking document processing tasks.
|
|
85
|
+
|
|
86
|
+
╭─ Options ────────────────────────────────────────────────────────────────────────────╮
|
|
87
|
+
│ --help Show this message and exit. │
|
|
88
|
+
╰──────────────────────────────────────────────────────────────────────────────────────╯
|
|
89
|
+
╭─ Commands ───────────────────────────────────────────────────────────────────────────╮
|
|
90
|
+
│ create Create both ground truth and evaluation datasets in one step. │
|
|
91
|
+
│ create-eval Create evaluation dataset from existing ground truth. │
|
|
92
|
+
│ create-gt Create ground truth dataset only. │
|
|
93
|
+
│ evaluate Evaluate predictions against ground truth. │
|
|
94
|
+
│ visualize Visualize evaluation results. │
|
|
95
|
+
╰──────────────────────────────────────────────────────────────────────────────────────╯
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
## Benchmarks
|
|
101
|
+
|
|
102
|
+
- General
|
|
103
|
+
- [DP-Bench benchmarks](docs/DP-Bench_benchmarks.md): Text, layout, reading order and table structure evaluation on the DP-Bench dataset.
|
|
104
|
+
- [OmniDocBench benchmarks](docs/OmniDocBench_benchmarks.md): Text, layout, reading order and table structure evaluation on the OmniDocBench dataset.
|
|
105
|
+
- Layout
|
|
106
|
+
- [DocLayNetV1 Benchmarks](docs/DocLayNetv1_benchmarks.md): Text and layout evaluation on the DocLayNet v1.2 dataset.
|
|
107
|
+
- Table-Structure
|
|
108
|
+
- [FinTabnet Benchmarks](docs/FinTabNet_benchmarks.md): Table structure evaluation on the FinTabNet dataset.
|
|
109
|
+
- [PubTabNet benchmarks](docs/PubTabNet_benchmarks.md): Table structure evaluation on the PubTabNet dataset.
|
|
110
|
+
- [Pub1M benchmarks](docs/P1M_benchmarks.md): Table structure evaluation on the Pub1M dataset.
|
|
111
|
+
|
|
112
|
+
On our list for next benchmarks:
|
|
113
|
+
|
|
114
|
+
- [OmniOCR](getomni-ai/ocr-benchmark)
|
|
115
|
+
- Hyperscalers
|
|
116
|
+
- [CoMix](https://github.com/emanuelevivoli/CoMix/tree/main/docs/datasets)
|
|
117
|
+
- [DocVQA](https://huggingface.co/datasets/lmms-lab/DocVQA)
|
|
118
|
+
- [rd-tablebench](https://huggingface.co/datasets/reducto/rd-tablebench)
|
|
119
|
+
- [BigDocs-Bench](https://huggingface.co/datasets/ServiceNow/BigDocs-Bench)
|
|
120
|
+
|
|
121
|
+
## Contributing
|
|
122
|
+
|
|
123
|
+
Please read [Contributing to Docling](https://github.com/docling-project/docling/blob/main/CONTRIBUTING.md) for details.
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
## License
|
|
127
|
+
|
|
128
|
+
The Docling codebase is under MIT license.
|
|
129
|
+
For individual model usage, please refer to the model licenses found in the original packages.
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
## IBM ❤️ Open Source AI
|
|
133
|
+
|
|
134
|
+
Docling-eval has been brought to you by IBM.
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
<p align="center">
|
|
2
|
+
<a href="https://github.com/docling-project/docling-eval">
|
|
3
|
+
<img loading="lazy" alt="Docling" src="docs/assets/docling-eval-pic.png" width="40%"/>
|
|
4
|
+
</a>
|
|
5
|
+
</p>
|
|
6
|
+
|
|
7
|
+
# Docling-eval
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
[](https://arxiv.org/abs/2408.09869)
|
|
11
|
+
[](https://pypi.org/project/docling-eval/)
|
|
12
|
+
[](https://pypi.org/project/docling-eval/)
|
|
13
|
+
[](https://github.com/astral-sh/uv)
|
|
14
|
+
[](https://github.com/psf/black)
|
|
15
|
+
[](https://pycqa.github.io/isort/)
|
|
16
|
+
[](https://pydantic.dev)
|
|
17
|
+
[](https://github.com/pre-commit/pre-commit)
|
|
18
|
+
[](https://opensource.org/licenses/MIT)
|
|
19
|
+
|
|
20
|
+
Evaluate [Docling](https://github.com/docling-project/docling) on various datasets.
|
|
21
|
+
|
|
22
|
+
## Features
|
|
23
|
+
|
|
24
|
+
Evaluate docling on various datasets. You can use the cli
|
|
25
|
+
|
|
26
|
+
```shell
|
|
27
|
+
terminal %> docling-eval --help
|
|
28
|
+
|
|
29
|
+
Usage: docling_eval [OPTIONS] COMMAND [ARGS]...
|
|
30
|
+
|
|
31
|
+
Docling Evaluation CLI for benchmarking document processing tasks.
|
|
32
|
+
|
|
33
|
+
╭─ Options ────────────────────────────────────────────────────────────────────────────╮
|
|
34
|
+
│ --help Show this message and exit. │
|
|
35
|
+
╰──────────────────────────────────────────────────────────────────────────────────────╯
|
|
36
|
+
╭─ Commands ───────────────────────────────────────────────────────────────────────────╮
|
|
37
|
+
│ create Create both ground truth and evaluation datasets in one step. │
|
|
38
|
+
│ create-eval Create evaluation dataset from existing ground truth. │
|
|
39
|
+
│ create-gt Create ground truth dataset only. │
|
|
40
|
+
│ evaluate Evaluate predictions against ground truth. │
|
|
41
|
+
│ visualize Visualize evaluation results. │
|
|
42
|
+
╰──────────────────────────────────────────────────────────────────────────────────────╯
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
```
|
|
46
|
+
|
|
47
|
+
## Benchmarks
|
|
48
|
+
|
|
49
|
+
- General
|
|
50
|
+
- [DP-Bench benchmarks](docs/DP-Bench_benchmarks.md): Text, layout, reading order and table structure evaluation on the DP-Bench dataset.
|
|
51
|
+
- [OmniDocBench benchmarks](docs/OmniDocBench_benchmarks.md): Text, layout, reading order and table structure evaluation on the OmniDocBench dataset.
|
|
52
|
+
- Layout
|
|
53
|
+
- [DocLayNetV1 Benchmarks](docs/DocLayNetv1_benchmarks.md): Text and layout evaluation on the DocLayNet v1.2 dataset.
|
|
54
|
+
- Table-Structure
|
|
55
|
+
- [FinTabnet Benchmarks](docs/FinTabNet_benchmarks.md): Table structure evaluation on the FinTabNet dataset.
|
|
56
|
+
- [PubTabNet benchmarks](docs/PubTabNet_benchmarks.md): Table structure evaluation on the PubTabNet dataset.
|
|
57
|
+
- [Pub1M benchmarks](docs/P1M_benchmarks.md): Table structure evaluation on the Pub1M dataset.
|
|
58
|
+
|
|
59
|
+
On our list for next benchmarks:
|
|
60
|
+
|
|
61
|
+
- [OmniOCR](getomni-ai/ocr-benchmark)
|
|
62
|
+
- Hyperscalers
|
|
63
|
+
- [CoMix](https://github.com/emanuelevivoli/CoMix/tree/main/docs/datasets)
|
|
64
|
+
- [DocVQA](https://huggingface.co/datasets/lmms-lab/DocVQA)
|
|
65
|
+
- [rd-tablebench](https://huggingface.co/datasets/reducto/rd-tablebench)
|
|
66
|
+
- [BigDocs-Bench](https://huggingface.co/datasets/ServiceNow/BigDocs-Bench)
|
|
67
|
+
|
|
68
|
+
## Contributing
|
|
69
|
+
|
|
70
|
+
Please read [Contributing to Docling](https://github.com/docling-project/docling/blob/main/CONTRIBUTING.md) for details.
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
## License
|
|
74
|
+
|
|
75
|
+
The Docling codebase is under MIT license.
|
|
76
|
+
For individual model usage, please refer to the model licenses found in the original packages.
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
## IBM ❤️ Open Source AI
|
|
80
|
+
|
|
81
|
+
Docling-eval has been brought to you by IBM.
|
|
File without changes
|
|
@@ -0,0 +1,276 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import Dict, List, Optional, Tuple, Union
|
|
4
|
+
|
|
5
|
+
import openpyxl
|
|
6
|
+
import pandas as pd
|
|
7
|
+
from openpyxl import load_workbook
|
|
8
|
+
from openpyxl.cell.cell import Cell
|
|
9
|
+
from openpyxl.styles import Font
|
|
10
|
+
from pandas import DataFrame
|
|
11
|
+
|
|
12
|
+
from docling_eval.aggregations.multi_evalutor import MultiEvaluation
|
|
13
|
+
from docling_eval.datamodels.types import ConsolidationFormats, EvaluationModality
|
|
14
|
+
from docling_eval.evaluators.base_evaluator import EvaluationRejectionType
|
|
15
|
+
from docling_eval.evaluators.bbox_text_evaluator import DatasetBoxesTextEvaluation
|
|
16
|
+
from docling_eval.evaluators.layout_evaluator import DatasetLayoutEvaluation
|
|
17
|
+
from docling_eval.evaluators.markdown_text_evaluator import DatasetMarkdownEvaluation
|
|
18
|
+
from docling_eval.evaluators.readingorder_evaluator import DatasetReadingOrderEvaluation
|
|
19
|
+
from docling_eval.evaluators.stats import DatasetStatistics
|
|
20
|
+
from docling_eval.evaluators.table_evaluator import DatasetTableEvaluation
|
|
21
|
+
|
|
22
|
+
_log = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def export_value(val: Union[float, DatasetStatistics]) -> str:
|
|
26
|
+
r"""Get statistics value"""
|
|
27
|
+
if isinstance(val, DatasetStatistics):
|
|
28
|
+
fmt_val = f"{val.mean:.2f}±{val.std:.2f}"
|
|
29
|
+
else:
|
|
30
|
+
fmt_val = f"{val:.2f}"
|
|
31
|
+
|
|
32
|
+
return fmt_val
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class Consolidator:
|
|
36
|
+
r"""
|
|
37
|
+
Consolidate a MultiEvaluation into a comparison matrix
|
|
38
|
+
|
|
39
|
+
The comparison matrix has 3 dimensions:
|
|
40
|
+
- Benchmarks
|
|
41
|
+
- ConversionProviders
|
|
42
|
+
- Modalities
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
def __init__(self, output_path: Path):
|
|
46
|
+
r""" """
|
|
47
|
+
self._output_path = output_path
|
|
48
|
+
self._excel_engine = "openpyxl"
|
|
49
|
+
self._sheet_name = "matrix"
|
|
50
|
+
self._excel_filename = "consolidation_matrix.xlsx"
|
|
51
|
+
|
|
52
|
+
self._output_path.mkdir(parents=True, exist_ok=True)
|
|
53
|
+
|
|
54
|
+
def __call__(
|
|
55
|
+
self,
|
|
56
|
+
multi_evaluation: MultiEvaluation,
|
|
57
|
+
consolidation_format: Optional[
|
|
58
|
+
ConsolidationFormats
|
|
59
|
+
] = ConsolidationFormats.EXCEL,
|
|
60
|
+
) -> Tuple[Dict[EvaluationModality, DataFrame], Optional[Path]]:
|
|
61
|
+
r""" """
|
|
62
|
+
dfs = self._build_dataframes(multi_evaluation)
|
|
63
|
+
|
|
64
|
+
# Export dataframe
|
|
65
|
+
if consolidation_format == ConsolidationFormats.EXCEL:
|
|
66
|
+
produced_fn = self._to_excel(dfs)
|
|
67
|
+
_log.info("Produced excel: %s", str(produced_fn))
|
|
68
|
+
else:
|
|
69
|
+
_log.info("Unsupported consolidation format: %s", consolidation_format)
|
|
70
|
+
|
|
71
|
+
return dfs, produced_fn
|
|
72
|
+
|
|
73
|
+
def _to_excel(self, dfs: Dict[EvaluationModality, DataFrame]) -> Path:
|
|
74
|
+
r""" """
|
|
75
|
+
excel_fn = self._output_path / self._excel_filename
|
|
76
|
+
startrow = 0
|
|
77
|
+
header_rows: List[int] = []
|
|
78
|
+
with pd.ExcelWriter(excel_fn, engine=self._excel_engine) as writer: # type: ignore
|
|
79
|
+
for modality, df in dfs.items():
|
|
80
|
+
if self._sheet_name in writer.book.sheetnames:
|
|
81
|
+
sheet = writer.book[self._sheet_name]
|
|
82
|
+
startrow = sheet.max_row + 2
|
|
83
|
+
|
|
84
|
+
# Add the modality as a "header" for the metrics subtable
|
|
85
|
+
header_df = DataFrame([modality.name])
|
|
86
|
+
header_rows.append(startrow + 1)
|
|
87
|
+
header_df.to_excel(
|
|
88
|
+
writer,
|
|
89
|
+
sheet_name=self._sheet_name,
|
|
90
|
+
startrow=startrow,
|
|
91
|
+
index=False,
|
|
92
|
+
header=False,
|
|
93
|
+
)
|
|
94
|
+
startrow += 1
|
|
95
|
+
|
|
96
|
+
# Metrics subtable
|
|
97
|
+
df.to_excel(
|
|
98
|
+
writer,
|
|
99
|
+
sheet_name=self._sheet_name,
|
|
100
|
+
startrow=startrow,
|
|
101
|
+
index=False,
|
|
102
|
+
)
|
|
103
|
+
# Format the excel
|
|
104
|
+
self._format_excel(excel_fn, header_rows)
|
|
105
|
+
|
|
106
|
+
return excel_fn
|
|
107
|
+
|
|
108
|
+
def _format_excel(self, excel_fn: Path, header_rows: List[int]):
|
|
109
|
+
r"""Do some proper formatting of the generated excel"""
|
|
110
|
+
workbook = load_workbook(excel_fn)
|
|
111
|
+
sheet = workbook[self._sheet_name]
|
|
112
|
+
|
|
113
|
+
# Adjust the cell width
|
|
114
|
+
for col in sheet.columns:
|
|
115
|
+
# Find the maximum length of strings in this column (excluding empty cells)
|
|
116
|
+
max_length = 0
|
|
117
|
+
for cell in col:
|
|
118
|
+
try:
|
|
119
|
+
if len(str(cell.value)) > max_length:
|
|
120
|
+
max_length = len(str(cell.value))
|
|
121
|
+
except:
|
|
122
|
+
pass
|
|
123
|
+
adjusted_width = max_length + 2 # Add some padding to make it look better
|
|
124
|
+
first_cell = col[0]
|
|
125
|
+
assert isinstance(first_cell, Cell)
|
|
126
|
+
sheet.column_dimensions[first_cell.column_letter].width = adjusted_width
|
|
127
|
+
|
|
128
|
+
# Iterate through each cell in the worksheet and remove borders
|
|
129
|
+
for row in sheet.iter_rows():
|
|
130
|
+
for cell in row:
|
|
131
|
+
cell.border = openpyxl.styles.Border() # Remove borders
|
|
132
|
+
|
|
133
|
+
# Make bold the subtable headers
|
|
134
|
+
bold_font = Font(bold=True)
|
|
135
|
+
for header_row in header_rows:
|
|
136
|
+
cell = sheet.cell(row=header_row, column=1)
|
|
137
|
+
cell.font = bold_font
|
|
138
|
+
x = 0
|
|
139
|
+
|
|
140
|
+
# Save back the excel
|
|
141
|
+
workbook.save(excel_fn)
|
|
142
|
+
|
|
143
|
+
def _build_dataframes(
|
|
144
|
+
self,
|
|
145
|
+
multi_evaluation: MultiEvaluation,
|
|
146
|
+
) -> Dict[EvaluationModality, DataFrame]:
|
|
147
|
+
r"""
|
|
148
|
+
Return a Dict with dataframes per modality
|
|
149
|
+
"""
|
|
150
|
+
# Collect all data to build the dataframes
|
|
151
|
+
df_data: Dict[EvaluationModality, List[Dict[str, Union[str, float, int]]]] = {}
|
|
152
|
+
|
|
153
|
+
# Collect the dataframe data
|
|
154
|
+
for benchmark, exp_mod_eval in multi_evaluation.evaluations.items():
|
|
155
|
+
for experiment, mod_eval in exp_mod_eval.items():
|
|
156
|
+
for modality, single_evaluation in mod_eval.items():
|
|
157
|
+
evaluation = single_evaluation.evaluation
|
|
158
|
+
|
|
159
|
+
if modality == EvaluationModality.LAYOUT:
|
|
160
|
+
metrics = self._layout_metrics(evaluation)
|
|
161
|
+
elif modality == EvaluationModality.MARKDOWN_TEXT:
|
|
162
|
+
metrics = self._markdowntext_metrics(evaluation)
|
|
163
|
+
elif modality == EvaluationModality.TABLE_STRUCTURE:
|
|
164
|
+
metrics = self._tablestructure_metrics(evaluation)
|
|
165
|
+
elif modality == EvaluationModality.READING_ORDER:
|
|
166
|
+
metrics = self._readingorder_metrics(evaluation)
|
|
167
|
+
elif modality == EvaluationModality.BBOXES_TEXT:
|
|
168
|
+
metrics = self._bboxestext_metrics(evaluation)
|
|
169
|
+
else:
|
|
170
|
+
_log.error(
|
|
171
|
+
"Evaluation modality unsupported for export: %s", modality
|
|
172
|
+
)
|
|
173
|
+
continue
|
|
174
|
+
|
|
175
|
+
# Gather the dataframe data
|
|
176
|
+
provider = (
|
|
177
|
+
single_evaluation.prediction_provider_type.value
|
|
178
|
+
if single_evaluation.prediction_provider_type is not None
|
|
179
|
+
else "Unkown"
|
|
180
|
+
)
|
|
181
|
+
data: Dict[str, Union[str, float]] = {
|
|
182
|
+
"Benchmark": benchmark.value,
|
|
183
|
+
"Provider": provider,
|
|
184
|
+
"Experiment": experiment,
|
|
185
|
+
"evaluated_samples": evaluation.evaluated_samples,
|
|
186
|
+
}
|
|
187
|
+
for rej_type in EvaluationRejectionType:
|
|
188
|
+
if rej_type not in evaluation.rejected_samples:
|
|
189
|
+
data[rej_type.value] = 0
|
|
190
|
+
else:
|
|
191
|
+
data[rej_type.value] = evaluation.rejected_samples[rej_type]
|
|
192
|
+
|
|
193
|
+
data |= metrics
|
|
194
|
+
if modality not in df_data:
|
|
195
|
+
df_data[modality] = []
|
|
196
|
+
df_data[modality].append(data)
|
|
197
|
+
|
|
198
|
+
# Build the dataframes
|
|
199
|
+
dfs: Dict[EvaluationModality, DataFrame] = {}
|
|
200
|
+
for modality, m_data in df_data.items():
|
|
201
|
+
df = DataFrame(m_data)
|
|
202
|
+
df = df.sort_values(by=["Benchmark", "Provider"], ascending=[True, True])
|
|
203
|
+
dfs[modality] = df
|
|
204
|
+
|
|
205
|
+
return dfs
|
|
206
|
+
|
|
207
|
+
def _layout_metrics(self, evaluation: DatasetLayoutEvaluation) -> Dict[str, str]:
|
|
208
|
+
r"""Get the metrics for the LayoutEvaluation"""
|
|
209
|
+
metrics = {
|
|
210
|
+
"mAP": export_value(evaluation.map_stats),
|
|
211
|
+
"mAP_50": export_value(evaluation.map_50_stats),
|
|
212
|
+
"mAP_75": export_value(evaluation.map_75_stats),
|
|
213
|
+
"weighted_mAP_50": export_value(evaluation.weighted_map_50_stats),
|
|
214
|
+
"weighted_mAP_75": export_value(evaluation.weighted_map_75_stats),
|
|
215
|
+
"weighted_mAP_90": export_value(evaluation.weighted_map_90_stats),
|
|
216
|
+
"weighted_mAP_95": export_value(evaluation.weighted_map_95_stats),
|
|
217
|
+
}
|
|
218
|
+
for class_evaluation in evaluation.evaluations_per_class:
|
|
219
|
+
key = f"class_{class_evaluation.label}"
|
|
220
|
+
metrics[key] = export_value(class_evaluation.value)
|
|
221
|
+
|
|
222
|
+
return metrics
|
|
223
|
+
|
|
224
|
+
def _markdowntext_metrics(
|
|
225
|
+
self,
|
|
226
|
+
evaluation: DatasetMarkdownEvaluation,
|
|
227
|
+
) -> Dict[str, str]:
|
|
228
|
+
r""" """
|
|
229
|
+
metrics = {
|
|
230
|
+
"BLEU": export_value(evaluation.bleu_stats),
|
|
231
|
+
"F1": export_value(evaluation.f1_score_stats),
|
|
232
|
+
"Precision": export_value(evaluation.precision_stats),
|
|
233
|
+
"Recall": export_value(evaluation.recall_stats),
|
|
234
|
+
"Edit_Distance": export_value(evaluation.edit_distance_stats),
|
|
235
|
+
"METEOR": export_value(evaluation.meteor_stats),
|
|
236
|
+
}
|
|
237
|
+
return metrics
|
|
238
|
+
|
|
239
|
+
def _tablestructure_metrics(
|
|
240
|
+
self,
|
|
241
|
+
evaluation: DatasetTableEvaluation,
|
|
242
|
+
) -> Dict[str, str]:
|
|
243
|
+
r""" """
|
|
244
|
+
metrics = {
|
|
245
|
+
"TEDS": export_value(evaluation.TEDS),
|
|
246
|
+
"TEDS_struct": export_value(evaluation.TEDS_struct),
|
|
247
|
+
"TEDS_simple": export_value(evaluation.TEDS_simple),
|
|
248
|
+
"TEDS_complex": export_value(evaluation.TEDS_complex),
|
|
249
|
+
}
|
|
250
|
+
return metrics
|
|
251
|
+
|
|
252
|
+
def _readingorder_metrics(
|
|
253
|
+
self,
|
|
254
|
+
evaluation: DatasetReadingOrderEvaluation,
|
|
255
|
+
) -> Dict[str, str]:
|
|
256
|
+
r""" """
|
|
257
|
+
metrics = {
|
|
258
|
+
"ARD": export_value(evaluation.ard_stats),
|
|
259
|
+
"Weighted_ARD": export_value(evaluation.w_ard_stats),
|
|
260
|
+
}
|
|
261
|
+
return metrics
|
|
262
|
+
|
|
263
|
+
def _bboxestext_metrics(
|
|
264
|
+
self,
|
|
265
|
+
evaluation: DatasetBoxesTextEvaluation,
|
|
266
|
+
) -> Dict[str, str]:
|
|
267
|
+
r""" """
|
|
268
|
+
metrics = {
|
|
269
|
+
"BLEU": export_value(evaluation.bleu_stats),
|
|
270
|
+
"F1": export_value(evaluation.f1_score_stats),
|
|
271
|
+
"Precision": export_value(evaluation.precision_stats),
|
|
272
|
+
"Recall": export_value(evaluation.recall_stats),
|
|
273
|
+
"Edit_Distance": export_value(evaluation.edit_distance_stats),
|
|
274
|
+
"METEOR": export_value(evaluation.meteor_stats),
|
|
275
|
+
}
|
|
276
|
+
return metrics
|