docling 2.69.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of docling might be problematic. Click here for more details.
- docling/__init__.py +0 -0
- docling/backend/__init__.py +0 -0
- docling/backend/abstract_backend.py +84 -0
- docling/backend/asciidoc_backend.py +443 -0
- docling/backend/csv_backend.py +125 -0
- docling/backend/docling_parse_backend.py +237 -0
- docling/backend/docling_parse_v2_backend.py +276 -0
- docling/backend/docling_parse_v4_backend.py +260 -0
- docling/backend/docx/__init__.py +0 -0
- docling/backend/docx/drawingml/utils.py +131 -0
- docling/backend/docx/latex/__init__.py +0 -0
- docling/backend/docx/latex/latex_dict.py +274 -0
- docling/backend/docx/latex/omml.py +459 -0
- docling/backend/html_backend.py +1502 -0
- docling/backend/image_backend.py +188 -0
- docling/backend/json/__init__.py +0 -0
- docling/backend/json/docling_json_backend.py +58 -0
- docling/backend/md_backend.py +618 -0
- docling/backend/mets_gbs_backend.py +399 -0
- docling/backend/msexcel_backend.py +686 -0
- docling/backend/mspowerpoint_backend.py +398 -0
- docling/backend/msword_backend.py +1663 -0
- docling/backend/noop_backend.py +51 -0
- docling/backend/pdf_backend.py +82 -0
- docling/backend/pypdfium2_backend.py +417 -0
- docling/backend/webvtt_backend.py +572 -0
- docling/backend/xml/__init__.py +0 -0
- docling/backend/xml/jats_backend.py +819 -0
- docling/backend/xml/uspto_backend.py +1905 -0
- docling/chunking/__init__.py +12 -0
- docling/cli/__init__.py +0 -0
- docling/cli/main.py +974 -0
- docling/cli/models.py +196 -0
- docling/cli/tools.py +17 -0
- docling/datamodel/__init__.py +0 -0
- docling/datamodel/accelerator_options.py +69 -0
- docling/datamodel/asr_model_specs.py +494 -0
- docling/datamodel/backend_options.py +102 -0
- docling/datamodel/base_models.py +493 -0
- docling/datamodel/document.py +699 -0
- docling/datamodel/extraction.py +39 -0
- docling/datamodel/layout_model_specs.py +91 -0
- docling/datamodel/pipeline_options.py +457 -0
- docling/datamodel/pipeline_options_asr_model.py +78 -0
- docling/datamodel/pipeline_options_vlm_model.py +136 -0
- docling/datamodel/settings.py +65 -0
- docling/datamodel/vlm_model_specs.py +365 -0
- docling/document_converter.py +559 -0
- docling/document_extractor.py +327 -0
- docling/exceptions.py +10 -0
- docling/experimental/__init__.py +5 -0
- docling/experimental/datamodel/__init__.py +1 -0
- docling/experimental/datamodel/table_crops_layout_options.py +13 -0
- docling/experimental/datamodel/threaded_layout_vlm_pipeline_options.py +45 -0
- docling/experimental/models/__init__.py +3 -0
- docling/experimental/models/table_crops_layout_model.py +114 -0
- docling/experimental/pipeline/__init__.py +1 -0
- docling/experimental/pipeline/threaded_layout_vlm_pipeline.py +439 -0
- docling/models/__init__.py +0 -0
- docling/models/base_layout_model.py +39 -0
- docling/models/base_model.py +230 -0
- docling/models/base_ocr_model.py +241 -0
- docling/models/base_table_model.py +45 -0
- docling/models/extraction/__init__.py +0 -0
- docling/models/extraction/nuextract_transformers_model.py +305 -0
- docling/models/factories/__init__.py +47 -0
- docling/models/factories/base_factory.py +122 -0
- docling/models/factories/layout_factory.py +7 -0
- docling/models/factories/ocr_factory.py +11 -0
- docling/models/factories/picture_description_factory.py +11 -0
- docling/models/factories/table_factory.py +7 -0
- docling/models/picture_description_base_model.py +149 -0
- docling/models/plugins/__init__.py +0 -0
- docling/models/plugins/defaults.py +60 -0
- docling/models/stages/__init__.py +0 -0
- docling/models/stages/code_formula/__init__.py +0 -0
- docling/models/stages/code_formula/code_formula_model.py +342 -0
- docling/models/stages/layout/__init__.py +0 -0
- docling/models/stages/layout/layout_model.py +249 -0
- docling/models/stages/ocr/__init__.py +0 -0
- docling/models/stages/ocr/auto_ocr_model.py +132 -0
- docling/models/stages/ocr/easyocr_model.py +200 -0
- docling/models/stages/ocr/ocr_mac_model.py +145 -0
- docling/models/stages/ocr/rapid_ocr_model.py +328 -0
- docling/models/stages/ocr/tesseract_ocr_cli_model.py +331 -0
- docling/models/stages/ocr/tesseract_ocr_model.py +262 -0
- docling/models/stages/page_assemble/__init__.py +0 -0
- docling/models/stages/page_assemble/page_assemble_model.py +156 -0
- docling/models/stages/page_preprocessing/__init__.py +0 -0
- docling/models/stages/page_preprocessing/page_preprocessing_model.py +145 -0
- docling/models/stages/picture_classifier/__init__.py +0 -0
- docling/models/stages/picture_classifier/document_picture_classifier.py +246 -0
- docling/models/stages/picture_description/__init__.py +0 -0
- docling/models/stages/picture_description/picture_description_api_model.py +66 -0
- docling/models/stages/picture_description/picture_description_vlm_model.py +123 -0
- docling/models/stages/reading_order/__init__.py +0 -0
- docling/models/stages/reading_order/readingorder_model.py +431 -0
- docling/models/stages/table_structure/__init__.py +0 -0
- docling/models/stages/table_structure/table_structure_model.py +305 -0
- docling/models/utils/__init__.py +0 -0
- docling/models/utils/generation_utils.py +157 -0
- docling/models/utils/hf_model_download.py +45 -0
- docling/models/vlm_pipeline_models/__init__.py +1 -0
- docling/models/vlm_pipeline_models/api_vlm_model.py +180 -0
- docling/models/vlm_pipeline_models/hf_transformers_model.py +391 -0
- docling/models/vlm_pipeline_models/mlx_model.py +325 -0
- docling/models/vlm_pipeline_models/vllm_model.py +344 -0
- docling/pipeline/__init__.py +0 -0
- docling/pipeline/asr_pipeline.py +431 -0
- docling/pipeline/base_extraction_pipeline.py +72 -0
- docling/pipeline/base_pipeline.py +326 -0
- docling/pipeline/extraction_vlm_pipeline.py +207 -0
- docling/pipeline/legacy_standard_pdf_pipeline.py +262 -0
- docling/pipeline/simple_pipeline.py +55 -0
- docling/pipeline/standard_pdf_pipeline.py +859 -0
- docling/pipeline/threaded_standard_pdf_pipeline.py +5 -0
- docling/pipeline/vlm_pipeline.py +416 -0
- docling/py.typed +1 -0
- docling/utils/__init__.py +0 -0
- docling/utils/accelerator_utils.py +97 -0
- docling/utils/api_image_request.py +205 -0
- docling/utils/deepseekocr_utils.py +388 -0
- docling/utils/export.py +146 -0
- docling/utils/glm_utils.py +361 -0
- docling/utils/layout_postprocessor.py +683 -0
- docling/utils/locks.py +3 -0
- docling/utils/model_downloader.py +168 -0
- docling/utils/ocr_utils.py +69 -0
- docling/utils/orientation.py +65 -0
- docling/utils/profiling.py +65 -0
- docling/utils/utils.py +65 -0
- docling/utils/visualization.py +85 -0
- docling-2.69.0.dist-info/METADATA +237 -0
- docling-2.69.0.dist-info/RECORD +138 -0
- docling-2.69.0.dist-info/WHEEL +5 -0
- docling-2.69.0.dist-info/entry_points.txt +6 -0
- docling-2.69.0.dist-info/licenses/LICENSE +21 -0
- docling-2.69.0.dist-info/top_level.txt +1 -0
docling/__init__.py
ADDED
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from io import BytesIO
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import TYPE_CHECKING, Union
|
|
5
|
+
|
|
6
|
+
from docling_core.types.doc import DoclingDocument
|
|
7
|
+
|
|
8
|
+
from docling.datamodel.backend_options import (
|
|
9
|
+
BackendOptions,
|
|
10
|
+
BaseBackendOptions,
|
|
11
|
+
DeclarativeBackendOptions,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from docling.datamodel.base_models import InputFormat
|
|
16
|
+
from docling.datamodel.document import InputDocument
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class AbstractDocumentBackend(ABC):
|
|
20
|
+
@abstractmethod
|
|
21
|
+
def __init__(
|
|
22
|
+
self,
|
|
23
|
+
in_doc: "InputDocument",
|
|
24
|
+
path_or_stream: Union[BytesIO, Path],
|
|
25
|
+
options: BaseBackendOptions = BaseBackendOptions(),
|
|
26
|
+
):
|
|
27
|
+
self.file = in_doc.file
|
|
28
|
+
self.path_or_stream = path_or_stream
|
|
29
|
+
self.document_hash = in_doc.document_hash
|
|
30
|
+
self.input_format = in_doc.format
|
|
31
|
+
self.options = options
|
|
32
|
+
|
|
33
|
+
@abstractmethod
|
|
34
|
+
def is_valid(self) -> bool:
|
|
35
|
+
pass
|
|
36
|
+
|
|
37
|
+
@classmethod
|
|
38
|
+
@abstractmethod
|
|
39
|
+
def supports_pagination(cls) -> bool:
|
|
40
|
+
pass
|
|
41
|
+
|
|
42
|
+
def unload(self):
|
|
43
|
+
if isinstance(self.path_or_stream, BytesIO):
|
|
44
|
+
self.path_or_stream.close()
|
|
45
|
+
|
|
46
|
+
self.path_or_stream = None
|
|
47
|
+
|
|
48
|
+
@classmethod
|
|
49
|
+
@abstractmethod
|
|
50
|
+
def supported_formats(cls) -> set["InputFormat"]:
|
|
51
|
+
pass
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class PaginatedDocumentBackend(AbstractDocumentBackend):
|
|
55
|
+
"""DeclarativeDocumentBackend.
|
|
56
|
+
|
|
57
|
+
A declarative document backend is a backend that can transform to DoclingDocument
|
|
58
|
+
straight without a recognition pipeline.
|
|
59
|
+
"""
|
|
60
|
+
|
|
61
|
+
@abstractmethod
|
|
62
|
+
def page_count(self) -> int:
|
|
63
|
+
pass
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class DeclarativeDocumentBackend(AbstractDocumentBackend):
|
|
67
|
+
"""DeclarativeDocumentBackend.
|
|
68
|
+
|
|
69
|
+
A declarative document backend is a backend that can transform to DoclingDocument
|
|
70
|
+
straight without a recognition pipeline.
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
@abstractmethod
|
|
74
|
+
def __init__(
|
|
75
|
+
self,
|
|
76
|
+
in_doc: "InputDocument",
|
|
77
|
+
path_or_stream: Union[BytesIO, Path],
|
|
78
|
+
options: BackendOptions = DeclarativeBackendOptions(),
|
|
79
|
+
) -> None:
|
|
80
|
+
super().__init__(in_doc, path_or_stream, options)
|
|
81
|
+
|
|
82
|
+
@abstractmethod
|
|
83
|
+
def convert(self) -> DoclingDocument:
|
|
84
|
+
pass
|
|
@@ -0,0 +1,443 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import re
|
|
3
|
+
from io import BytesIO
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Final, Union
|
|
6
|
+
|
|
7
|
+
from docling_core.types.doc import (
|
|
8
|
+
DocItemLabel,
|
|
9
|
+
DoclingDocument,
|
|
10
|
+
DocumentOrigin,
|
|
11
|
+
GroupItem,
|
|
12
|
+
GroupLabel,
|
|
13
|
+
ImageRef,
|
|
14
|
+
Size,
|
|
15
|
+
TableCell,
|
|
16
|
+
TableData,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
from docling.backend.abstract_backend import DeclarativeDocumentBackend
|
|
20
|
+
from docling.datamodel.base_models import InputFormat
|
|
21
|
+
from docling.datamodel.document import InputDocument
|
|
22
|
+
|
|
23
|
+
_log = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
DEFAULT_IMAGE_WIDTH: Final = 128
|
|
26
|
+
DEFAULT_IMAGE_HEIGHT: Final = 128
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class AsciiDocBackend(DeclarativeDocumentBackend):
|
|
30
|
+
def __init__(self, in_doc: "InputDocument", path_or_stream: Union[BytesIO, Path]):
|
|
31
|
+
super().__init__(in_doc, path_or_stream)
|
|
32
|
+
|
|
33
|
+
self.path_or_stream = path_or_stream
|
|
34
|
+
|
|
35
|
+
try:
|
|
36
|
+
if isinstance(self.path_or_stream, BytesIO):
|
|
37
|
+
text_stream = self.path_or_stream.getvalue().decode("utf-8")
|
|
38
|
+
self.lines = text_stream.split("\n")
|
|
39
|
+
if isinstance(self.path_or_stream, Path):
|
|
40
|
+
with open(self.path_or_stream, encoding="utf-8") as f:
|
|
41
|
+
self.lines = f.readlines()
|
|
42
|
+
self.valid = True
|
|
43
|
+
|
|
44
|
+
except Exception as e:
|
|
45
|
+
raise RuntimeError(
|
|
46
|
+
f"Could not initialize AsciiDoc backend for file with hash {self.document_hash}."
|
|
47
|
+
) from e
|
|
48
|
+
return
|
|
49
|
+
|
|
50
|
+
def is_valid(self) -> bool:
|
|
51
|
+
return self.valid
|
|
52
|
+
|
|
53
|
+
@classmethod
|
|
54
|
+
def supports_pagination(cls) -> bool:
|
|
55
|
+
return False
|
|
56
|
+
|
|
57
|
+
def unload(self):
|
|
58
|
+
return
|
|
59
|
+
|
|
60
|
+
@classmethod
|
|
61
|
+
def supported_formats(cls) -> set[InputFormat]:
|
|
62
|
+
return {InputFormat.ASCIIDOC}
|
|
63
|
+
|
|
64
|
+
def convert(self) -> DoclingDocument:
|
|
65
|
+
"""
|
|
66
|
+
Parses the ASCII into a structured document model.
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
origin = DocumentOrigin(
|
|
70
|
+
filename=self.file.name or "file",
|
|
71
|
+
mimetype="text/asciidoc",
|
|
72
|
+
binary_hash=self.document_hash,
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
doc = DoclingDocument(name=self.file.stem or "file", origin=origin)
|
|
76
|
+
|
|
77
|
+
doc = self._parse(doc)
|
|
78
|
+
|
|
79
|
+
return doc
|
|
80
|
+
|
|
81
|
+
def _parse(self, doc: DoclingDocument):
|
|
82
|
+
"""
|
|
83
|
+
Main function that orchestrates the parsing by yielding components:
|
|
84
|
+
title, section headers, text, lists, and tables.
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
in_list = False
|
|
88
|
+
in_table = False
|
|
89
|
+
|
|
90
|
+
text_data: list[str] = []
|
|
91
|
+
table_data: list[str] = []
|
|
92
|
+
caption_data: list[str] = []
|
|
93
|
+
|
|
94
|
+
# parents: dict[int, Union[DocItem, GroupItem, None]] = {}
|
|
95
|
+
parents: dict[int, Union[GroupItem, None]] = {}
|
|
96
|
+
# indents: dict[int, Union[DocItem, GroupItem, None]] = {}
|
|
97
|
+
indents: dict[int, Union[GroupItem, None]] = {}
|
|
98
|
+
|
|
99
|
+
for i in range(10):
|
|
100
|
+
parents[i] = None
|
|
101
|
+
indents[i] = None
|
|
102
|
+
|
|
103
|
+
for line in self.lines:
|
|
104
|
+
# line = line.strip()
|
|
105
|
+
|
|
106
|
+
# Title
|
|
107
|
+
if self._is_title(line):
|
|
108
|
+
item = self._parse_title(line)
|
|
109
|
+
level = item["level"]
|
|
110
|
+
|
|
111
|
+
parents[level] = doc.add_text(
|
|
112
|
+
text=item["text"], label=DocItemLabel.TITLE
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Section headers
|
|
116
|
+
elif self._is_section_header(line):
|
|
117
|
+
item = self._parse_section_header(line)
|
|
118
|
+
level = item["level"]
|
|
119
|
+
|
|
120
|
+
parents[level] = doc.add_heading(
|
|
121
|
+
text=item["text"], level=item["level"], parent=parents[level - 1]
|
|
122
|
+
)
|
|
123
|
+
for k, v in parents.items():
|
|
124
|
+
if k > level:
|
|
125
|
+
parents[k] = None
|
|
126
|
+
|
|
127
|
+
# Lists
|
|
128
|
+
elif self._is_list_item(line):
|
|
129
|
+
_log.debug(f"line: {line}")
|
|
130
|
+
item = self._parse_list_item(line)
|
|
131
|
+
_log.debug(f"parsed list-item: {item}")
|
|
132
|
+
|
|
133
|
+
level = self._get_current_level(parents)
|
|
134
|
+
|
|
135
|
+
if not in_list:
|
|
136
|
+
in_list = True
|
|
137
|
+
|
|
138
|
+
parents[level + 1] = doc.add_group(
|
|
139
|
+
parent=parents[level], name="list", label=GroupLabel.LIST
|
|
140
|
+
)
|
|
141
|
+
indents[level + 1] = item["indent"]
|
|
142
|
+
|
|
143
|
+
elif in_list and item["indent"] > indents[level]:
|
|
144
|
+
parents[level + 1] = doc.add_group(
|
|
145
|
+
parent=parents[level], name="list", label=GroupLabel.LIST
|
|
146
|
+
)
|
|
147
|
+
indents[level + 1] = item["indent"]
|
|
148
|
+
|
|
149
|
+
elif in_list and item["indent"] < indents[level]:
|
|
150
|
+
# print(item["indent"], " => ", indents[level])
|
|
151
|
+
while item["indent"] < indents[level]:
|
|
152
|
+
# print(item["indent"], " => ", indents[level])
|
|
153
|
+
parents[level] = None
|
|
154
|
+
indents[level] = None
|
|
155
|
+
level -= 1
|
|
156
|
+
|
|
157
|
+
doc.add_list_item(
|
|
158
|
+
item["text"], parent=self._get_current_parent(parents)
|
|
159
|
+
)
|
|
160
|
+
|
|
161
|
+
elif in_list and not self._is_list_item(line):
|
|
162
|
+
in_list = False
|
|
163
|
+
|
|
164
|
+
level = self._get_current_level(parents)
|
|
165
|
+
parents[level] = None
|
|
166
|
+
|
|
167
|
+
# Tables
|
|
168
|
+
elif line.strip() == "|===" and not in_table: # start of table
|
|
169
|
+
in_table = True
|
|
170
|
+
|
|
171
|
+
elif self._is_table_line(line): # within a table
|
|
172
|
+
in_table = True
|
|
173
|
+
table_data.append(self._parse_table_line(line))
|
|
174
|
+
|
|
175
|
+
elif in_table and (
|
|
176
|
+
(not self._is_table_line(line)) or line.strip() == "|==="
|
|
177
|
+
): # end of table
|
|
178
|
+
caption = None
|
|
179
|
+
if len(caption_data) > 0:
|
|
180
|
+
caption = doc.add_text(
|
|
181
|
+
text=" ".join(caption_data), label=DocItemLabel.CAPTION
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
caption_data = []
|
|
185
|
+
|
|
186
|
+
data = self._populate_table_as_grid(table_data)
|
|
187
|
+
doc.add_table(
|
|
188
|
+
data=data, parent=self._get_current_parent(parents), caption=caption
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
in_table = False
|
|
192
|
+
table_data = []
|
|
193
|
+
|
|
194
|
+
# Picture
|
|
195
|
+
elif self._is_picture(line):
|
|
196
|
+
caption = None
|
|
197
|
+
if len(caption_data) > 0:
|
|
198
|
+
caption = doc.add_text(
|
|
199
|
+
text=" ".join(caption_data), label=DocItemLabel.CAPTION
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
caption_data = []
|
|
203
|
+
|
|
204
|
+
item = self._parse_picture(line)
|
|
205
|
+
|
|
206
|
+
size: Size
|
|
207
|
+
if "width" in item and "height" in item:
|
|
208
|
+
size = Size(width=int(item["width"]), height=int(item["height"]))
|
|
209
|
+
else:
|
|
210
|
+
size = Size(width=DEFAULT_IMAGE_WIDTH, height=DEFAULT_IMAGE_HEIGHT)
|
|
211
|
+
|
|
212
|
+
uri = None
|
|
213
|
+
if (
|
|
214
|
+
"uri" in item
|
|
215
|
+
and not item["uri"].startswith("http")
|
|
216
|
+
and item["uri"].startswith("//")
|
|
217
|
+
):
|
|
218
|
+
uri = "file:" + item["uri"]
|
|
219
|
+
elif (
|
|
220
|
+
"uri" in item
|
|
221
|
+
and not item["uri"].startswith("http")
|
|
222
|
+
and item["uri"].startswith("/")
|
|
223
|
+
):
|
|
224
|
+
uri = "file:/" + item["uri"]
|
|
225
|
+
elif "uri" in item and not item["uri"].startswith("http"):
|
|
226
|
+
uri = "file://" + item["uri"]
|
|
227
|
+
|
|
228
|
+
image = ImageRef(mimetype="image/png", size=size, dpi=70, uri=uri)
|
|
229
|
+
doc.add_picture(image=image, caption=caption)
|
|
230
|
+
|
|
231
|
+
# Caption
|
|
232
|
+
elif self._is_caption(line) and len(caption_data) == 0:
|
|
233
|
+
item = self._parse_caption(line)
|
|
234
|
+
caption_data.append(item["text"])
|
|
235
|
+
|
|
236
|
+
elif (
|
|
237
|
+
len(line.strip()) > 0 and len(caption_data) > 0
|
|
238
|
+
): # allow multiline captions
|
|
239
|
+
item = self._parse_text(line)
|
|
240
|
+
caption_data.append(item["text"])
|
|
241
|
+
|
|
242
|
+
# Plain text
|
|
243
|
+
elif len(line.strip()) == 0 and len(text_data) > 0:
|
|
244
|
+
doc.add_text(
|
|
245
|
+
text=" ".join(text_data),
|
|
246
|
+
label=DocItemLabel.PARAGRAPH,
|
|
247
|
+
parent=self._get_current_parent(parents),
|
|
248
|
+
)
|
|
249
|
+
text_data = []
|
|
250
|
+
|
|
251
|
+
elif len(line.strip()) > 0: # allow multiline texts
|
|
252
|
+
item = self._parse_text(line)
|
|
253
|
+
text_data.append(item["text"])
|
|
254
|
+
|
|
255
|
+
if len(text_data) > 0:
|
|
256
|
+
doc.add_text(
|
|
257
|
+
text=" ".join(text_data),
|
|
258
|
+
label=DocItemLabel.PARAGRAPH,
|
|
259
|
+
parent=self._get_current_parent(parents),
|
|
260
|
+
)
|
|
261
|
+
text_data = []
|
|
262
|
+
|
|
263
|
+
if in_table and len(table_data) > 0:
|
|
264
|
+
data = self._populate_table_as_grid(table_data)
|
|
265
|
+
doc.add_table(data=data, parent=self._get_current_parent(parents))
|
|
266
|
+
|
|
267
|
+
in_table = False
|
|
268
|
+
table_data = []
|
|
269
|
+
|
|
270
|
+
return doc
|
|
271
|
+
|
|
272
|
+
@staticmethod
|
|
273
|
+
def _get_current_level(parents):
|
|
274
|
+
for k, v in parents.items():
|
|
275
|
+
if v is None and k > 0:
|
|
276
|
+
return k - 1
|
|
277
|
+
|
|
278
|
+
return 0
|
|
279
|
+
|
|
280
|
+
@staticmethod
|
|
281
|
+
def _get_current_parent(parents):
|
|
282
|
+
for k, v in parents.items():
|
|
283
|
+
if v is None and k > 0:
|
|
284
|
+
return parents[k - 1]
|
|
285
|
+
|
|
286
|
+
return None
|
|
287
|
+
|
|
288
|
+
# ========= Title
|
|
289
|
+
@staticmethod
|
|
290
|
+
def _is_title(line):
|
|
291
|
+
return re.match(r"^= ", line)
|
|
292
|
+
|
|
293
|
+
@staticmethod
|
|
294
|
+
def _parse_title(line):
|
|
295
|
+
return {"type": "title", "text": line[2:].strip(), "level": 0}
|
|
296
|
+
|
|
297
|
+
# ========= Section headers
|
|
298
|
+
@staticmethod
|
|
299
|
+
def _is_section_header(line):
|
|
300
|
+
return re.match(r"^==+\s+", line)
|
|
301
|
+
|
|
302
|
+
@staticmethod
|
|
303
|
+
def _parse_section_header(line):
|
|
304
|
+
match = re.match(r"^(=+)\s+(.*)", line)
|
|
305
|
+
|
|
306
|
+
marker = match.group(1) # The list marker (e.g., "*", "-", "1.")
|
|
307
|
+
text = match.group(2) # The actual text of the list item
|
|
308
|
+
|
|
309
|
+
header_level = marker.count("=") # number of '=' represents level
|
|
310
|
+
return {
|
|
311
|
+
"type": "header",
|
|
312
|
+
"level": header_level - 1,
|
|
313
|
+
"text": text.strip(),
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
# ========= Lists
|
|
317
|
+
@staticmethod
|
|
318
|
+
def _is_list_item(line):
|
|
319
|
+
return re.match(r"^(\s)*(\*|-|\d+\.|\w+\.) ", line)
|
|
320
|
+
|
|
321
|
+
@staticmethod
|
|
322
|
+
def _parse_list_item(line):
|
|
323
|
+
"""Extract the item marker (number or bullet symbol) and the text of the item."""
|
|
324
|
+
|
|
325
|
+
match = re.match(r"^(\s*)(\*|-|\d+\.)\s+(.*)", line)
|
|
326
|
+
if match:
|
|
327
|
+
indent = match.group(1)
|
|
328
|
+
marker = match.group(2) # The list marker (e.g., "*", "-", "1.")
|
|
329
|
+
text = match.group(3) # The actual text of the list item
|
|
330
|
+
|
|
331
|
+
if marker == "*" or marker == "-":
|
|
332
|
+
return {
|
|
333
|
+
"type": "list_item",
|
|
334
|
+
"marker": marker,
|
|
335
|
+
"text": text.strip(),
|
|
336
|
+
"numbered": False,
|
|
337
|
+
"indent": 0 if indent is None else len(indent),
|
|
338
|
+
}
|
|
339
|
+
else:
|
|
340
|
+
return {
|
|
341
|
+
"type": "list_item",
|
|
342
|
+
"marker": marker,
|
|
343
|
+
"text": text.strip(),
|
|
344
|
+
"numbered": True,
|
|
345
|
+
"indent": 0 if indent is None else len(indent),
|
|
346
|
+
}
|
|
347
|
+
else:
|
|
348
|
+
# Fallback if no match
|
|
349
|
+
return {
|
|
350
|
+
"type": "list_item",
|
|
351
|
+
"marker": "-",
|
|
352
|
+
"text": line,
|
|
353
|
+
"numbered": False,
|
|
354
|
+
"indent": 0,
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
# ========= Tables
|
|
358
|
+
@staticmethod
|
|
359
|
+
def _is_table_line(line):
|
|
360
|
+
return re.match(r"^\|.*\|", line)
|
|
361
|
+
|
|
362
|
+
@staticmethod
|
|
363
|
+
def _parse_table_line(line):
|
|
364
|
+
# Split table cells and trim extra spaces
|
|
365
|
+
return [cell.strip() for cell in line.split("|") if cell.strip()]
|
|
366
|
+
|
|
367
|
+
@staticmethod
|
|
368
|
+
def _populate_table_as_grid(table_data):
|
|
369
|
+
num_rows = len(table_data)
|
|
370
|
+
|
|
371
|
+
# Adjust the table data into a grid format
|
|
372
|
+
num_cols = max(len(row) for row in table_data)
|
|
373
|
+
|
|
374
|
+
data = TableData(num_rows=num_rows, num_cols=num_cols, table_cells=[])
|
|
375
|
+
for row_idx, row in enumerate(table_data):
|
|
376
|
+
# Pad rows with empty strings to match column count
|
|
377
|
+
# grid.append(row + [''] * (max_cols - len(row)))
|
|
378
|
+
|
|
379
|
+
for col_idx, text in enumerate(row):
|
|
380
|
+
row_span = 1
|
|
381
|
+
col_span = 1
|
|
382
|
+
|
|
383
|
+
cell = TableCell(
|
|
384
|
+
text=text,
|
|
385
|
+
row_span=row_span,
|
|
386
|
+
col_span=col_span,
|
|
387
|
+
start_row_offset_idx=row_idx,
|
|
388
|
+
end_row_offset_idx=row_idx + row_span,
|
|
389
|
+
start_col_offset_idx=col_idx,
|
|
390
|
+
end_col_offset_idx=col_idx + col_span,
|
|
391
|
+
column_header=row_idx == 0,
|
|
392
|
+
row_header=False,
|
|
393
|
+
)
|
|
394
|
+
data.table_cells.append(cell)
|
|
395
|
+
|
|
396
|
+
return data
|
|
397
|
+
|
|
398
|
+
# ========= Pictures
|
|
399
|
+
@staticmethod
|
|
400
|
+
def _is_picture(line):
|
|
401
|
+
return re.match(r"^image::", line)
|
|
402
|
+
|
|
403
|
+
@staticmethod
|
|
404
|
+
def _parse_picture(line):
|
|
405
|
+
"""
|
|
406
|
+
Parse an image macro, extracting its path and attributes.
|
|
407
|
+
Syntax: image::path/to/image.png[Alt Text, width=200, height=150, align=center]
|
|
408
|
+
"""
|
|
409
|
+
mtch = re.match(r"^image::(.+)\[(.*)\]$", line)
|
|
410
|
+
if mtch:
|
|
411
|
+
picture_path = mtch.group(1).strip()
|
|
412
|
+
attributes = mtch.group(2).split(",")
|
|
413
|
+
picture_info = {"type": "picture", "uri": picture_path}
|
|
414
|
+
|
|
415
|
+
# Extract optional attributes (alt text, width, height, alignment)
|
|
416
|
+
if attributes:
|
|
417
|
+
picture_info["alt"] = attributes[0].strip() if attributes[0] else ""
|
|
418
|
+
for attr in attributes[1:]:
|
|
419
|
+
key, value = attr.split("=")
|
|
420
|
+
picture_info[key.strip()] = value.strip()
|
|
421
|
+
|
|
422
|
+
return picture_info
|
|
423
|
+
|
|
424
|
+
return {"type": "picture", "uri": line}
|
|
425
|
+
|
|
426
|
+
# ========= Captions
|
|
427
|
+
@staticmethod
|
|
428
|
+
def _is_caption(line):
|
|
429
|
+
return re.match(r"^\.(.+)", line)
|
|
430
|
+
|
|
431
|
+
@staticmethod
|
|
432
|
+
def _parse_caption(line):
|
|
433
|
+
mtch = re.match(r"^\.(.+)", line)
|
|
434
|
+
if mtch:
|
|
435
|
+
text = mtch.group(1)
|
|
436
|
+
return {"type": "caption", "text": text}
|
|
437
|
+
|
|
438
|
+
return {"type": "caption", "text": ""}
|
|
439
|
+
|
|
440
|
+
# ========= Plain text
|
|
441
|
+
@staticmethod
|
|
442
|
+
def _parse_text(line):
|
|
443
|
+
return {"type": "text", "text": line.strip()}
|
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
import csv
|
|
2
|
+
import logging
|
|
3
|
+
import warnings
|
|
4
|
+
from io import BytesIO, StringIO
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Set, Union
|
|
7
|
+
|
|
8
|
+
from docling_core.types.doc import DoclingDocument, DocumentOrigin, TableCell, TableData
|
|
9
|
+
|
|
10
|
+
from docling.backend.abstract_backend import DeclarativeDocumentBackend
|
|
11
|
+
from docling.datamodel.base_models import InputFormat
|
|
12
|
+
from docling.datamodel.document import InputDocument
|
|
13
|
+
|
|
14
|
+
_log = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class CsvDocumentBackend(DeclarativeDocumentBackend):
|
|
18
|
+
content: StringIO
|
|
19
|
+
|
|
20
|
+
def __init__(self, in_doc: "InputDocument", path_or_stream: Union[BytesIO, Path]):
|
|
21
|
+
super().__init__(in_doc, path_or_stream)
|
|
22
|
+
|
|
23
|
+
# Load content
|
|
24
|
+
try:
|
|
25
|
+
if isinstance(self.path_or_stream, BytesIO):
|
|
26
|
+
self.content = StringIO(self.path_or_stream.getvalue().decode("utf-8"))
|
|
27
|
+
elif isinstance(self.path_or_stream, Path):
|
|
28
|
+
self.content = StringIO(self.path_or_stream.read_text("utf-8"))
|
|
29
|
+
self.valid = True
|
|
30
|
+
except Exception as e:
|
|
31
|
+
raise RuntimeError(
|
|
32
|
+
f"CsvDocumentBackend could not load document with hash {self.document_hash}"
|
|
33
|
+
) from e
|
|
34
|
+
return
|
|
35
|
+
|
|
36
|
+
def is_valid(self) -> bool:
|
|
37
|
+
return self.valid
|
|
38
|
+
|
|
39
|
+
@classmethod
|
|
40
|
+
def supports_pagination(cls) -> bool:
|
|
41
|
+
return False
|
|
42
|
+
|
|
43
|
+
def unload(self):
|
|
44
|
+
if isinstance(self.path_or_stream, BytesIO):
|
|
45
|
+
self.path_or_stream.close()
|
|
46
|
+
self.path_or_stream = None
|
|
47
|
+
|
|
48
|
+
@classmethod
|
|
49
|
+
def supported_formats(cls) -> Set[InputFormat]:
|
|
50
|
+
return {InputFormat.CSV}
|
|
51
|
+
|
|
52
|
+
def convert(self) -> DoclingDocument:
|
|
53
|
+
"""
|
|
54
|
+
Parses the CSV data into a structured document model.
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
# Detect CSV dialect
|
|
58
|
+
head = self.content.readline()
|
|
59
|
+
dialect = csv.Sniffer().sniff(head, ",;\t|:")
|
|
60
|
+
_log.info(f'Parsing CSV with delimiter: "{dialect.delimiter}"')
|
|
61
|
+
if dialect.delimiter not in {",", ";", "\t", "|", ":"}:
|
|
62
|
+
raise RuntimeError(
|
|
63
|
+
f"Cannot convert csv with unknown delimiter {dialect.delimiter}."
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
# Parce CSV
|
|
67
|
+
self.content.seek(0)
|
|
68
|
+
result = csv.reader(self.content, dialect=dialect, strict=True)
|
|
69
|
+
self.csv_data = list(result)
|
|
70
|
+
_log.info(f"Detected {len(self.csv_data)} lines")
|
|
71
|
+
|
|
72
|
+
# Ensure uniform column length
|
|
73
|
+
expected_length = len(self.csv_data[0])
|
|
74
|
+
is_uniform = all(len(row) == expected_length for row in self.csv_data)
|
|
75
|
+
if not is_uniform:
|
|
76
|
+
warnings.warn(
|
|
77
|
+
f"Inconsistent column lengths detected in CSV data. "
|
|
78
|
+
f"Expected {expected_length} columns, but found rows with varying lengths. "
|
|
79
|
+
f"Ensure all rows have the same number of columns."
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
# Parse the CSV into a structured document model
|
|
83
|
+
origin = DocumentOrigin(
|
|
84
|
+
filename=self.file.name or "file.csv",
|
|
85
|
+
mimetype="text/csv",
|
|
86
|
+
binary_hash=self.document_hash,
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
doc = DoclingDocument(name=self.file.stem or "file.csv", origin=origin)
|
|
90
|
+
|
|
91
|
+
if self.is_valid():
|
|
92
|
+
# Convert CSV data to table
|
|
93
|
+
if self.csv_data:
|
|
94
|
+
num_rows = len(self.csv_data)
|
|
95
|
+
num_cols = max(len(row) for row in self.csv_data)
|
|
96
|
+
|
|
97
|
+
table_data = TableData(
|
|
98
|
+
num_rows=num_rows,
|
|
99
|
+
num_cols=num_cols,
|
|
100
|
+
table_cells=[],
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
# Convert each cell to TableCell
|
|
104
|
+
for row_idx, row in enumerate(self.csv_data):
|
|
105
|
+
for col_idx, cell_value in enumerate(row):
|
|
106
|
+
cell = TableCell(
|
|
107
|
+
text=str(cell_value),
|
|
108
|
+
row_span=1, # CSV doesn't support merged cells
|
|
109
|
+
col_span=1,
|
|
110
|
+
start_row_offset_idx=row_idx,
|
|
111
|
+
end_row_offset_idx=row_idx + 1,
|
|
112
|
+
start_col_offset_idx=col_idx,
|
|
113
|
+
end_col_offset_idx=col_idx + 1,
|
|
114
|
+
column_header=row_idx == 0, # First row as header
|
|
115
|
+
row_header=False,
|
|
116
|
+
)
|
|
117
|
+
table_data.table_cells.append(cell)
|
|
118
|
+
|
|
119
|
+
doc.add_table(data=table_data)
|
|
120
|
+
else:
|
|
121
|
+
raise RuntimeError(
|
|
122
|
+
f"Cannot convert doc with {self.document_hash} because the backend failed to init."
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
return doc
|