datapizza-ai-parsers-azure 0.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,391 @@
|
|
1
|
+
import json
|
2
|
+
from typing import Any
|
3
|
+
|
4
|
+
import aiofiles
|
5
|
+
from datapizza.core.modules.parser import Parser
|
6
|
+
from datapizza.core.utils import extract_media
|
7
|
+
from datapizza.type import Media, MediaNode, Node, NodeType
|
8
|
+
|
9
|
+
from azure.ai.documentintelligence import DocumentIntelligenceClient
|
10
|
+
from azure.ai.documentintelligence.aio import (
|
11
|
+
DocumentIntelligenceClient as AsyncDocumentIntelligenceClient,
|
12
|
+
)
|
13
|
+
from azure.ai.documentintelligence.models import AnalyzeDocumentRequest, AnalyzeResult
|
14
|
+
from azure.core.credentials import AzureKeyCredential
|
15
|
+
|
16
|
+
|
17
|
+
class AzureParser(Parser):
|
18
|
+
"""
|
19
|
+
Parser that creates a hierarchical tree structure from Azure AI Document Intelligence response.
|
20
|
+
The hierarchy goes from document -> pages -> paragraphs/tables -> lines/cells -> words.
|
21
|
+
|
22
|
+
params:
|
23
|
+
api_key: str
|
24
|
+
endpoint: str
|
25
|
+
result_type: str = "markdown", "text"
|
26
|
+
"""
|
27
|
+
|
28
|
+
def __init__(self, api_key: str, endpoint: str, result_type: str = "text"):
|
29
|
+
self.api_key = api_key
|
30
|
+
self.endpoint = endpoint
|
31
|
+
self.result_type = result_type
|
32
|
+
self.parser = None # self._create_parser()
|
33
|
+
self.a_parser = None # self._create_a_parser()
|
34
|
+
|
35
|
+
def _create_parser(self):
|
36
|
+
document_intelligence_client = DocumentIntelligenceClient(
|
37
|
+
endpoint=self.endpoint, credential=AzureKeyCredential(self.api_key)
|
38
|
+
)
|
39
|
+
return document_intelligence_client
|
40
|
+
|
41
|
+
def _create_a_parser(self):
|
42
|
+
parser = AsyncDocumentIntelligenceClient(
|
43
|
+
endpoint=self.endpoint, credential=AzureKeyCredential(self.api_key)
|
44
|
+
)
|
45
|
+
return parser
|
46
|
+
|
47
|
+
def _get_parser(self):
|
48
|
+
if not self.parser:
|
49
|
+
self.parser = self._create_parser()
|
50
|
+
return self.parser
|
51
|
+
|
52
|
+
def _get_a_parser(self):
|
53
|
+
if not self.a_parser:
|
54
|
+
self.a_parser = self._create_a_parser()
|
55
|
+
return self.a_parser
|
56
|
+
|
57
|
+
def _parse_file(self, file_path: str) -> Node:
|
58
|
+
"""Parse an Azure Document Intelligence JSON file into a Node structure."""
|
59
|
+
with open(file_path) as file:
|
60
|
+
json_data = json.load(file)
|
61
|
+
|
62
|
+
return self._parse_json(json_data, file_path=file_path)
|
63
|
+
|
64
|
+
def _get_missing_paragraphs(self, json_data: dict) -> list[str]:
|
65
|
+
"""Get missing paragraphs from the Azure Document Intelligence JSON data."""
|
66
|
+
|
67
|
+
sections = json_data.get("sections", [])
|
68
|
+
figures = json_data.get("figures", [])
|
69
|
+
tables = json_data.get("tables", [])
|
70
|
+
|
71
|
+
all_paragraphs = [
|
72
|
+
"/paragraphs/" + str(x) for x in range(len(json_data.get("paragraphs", [])))
|
73
|
+
]
|
74
|
+
|
75
|
+
elements = []
|
76
|
+
|
77
|
+
def _process_section(section):
|
78
|
+
for element in section.get("elements", []):
|
79
|
+
if "paragraph" in element:
|
80
|
+
elements.append(element)
|
81
|
+
elif "section" in element:
|
82
|
+
section_idx = element.split("/")[2]
|
83
|
+
next_section = sections[int(section_idx)]
|
84
|
+
_process_section(next_section)
|
85
|
+
|
86
|
+
for section in sections:
|
87
|
+
_process_section(section)
|
88
|
+
|
89
|
+
def _process_figure(figure):
|
90
|
+
for element in figure.get("elements", []):
|
91
|
+
if "paragraph" in element:
|
92
|
+
elements.append(element)
|
93
|
+
elif "section" in element:
|
94
|
+
section_idx = element.split("/")[2]
|
95
|
+
next_section = sections[int(section_idx)]
|
96
|
+
_process_section(next_section)
|
97
|
+
|
98
|
+
for figure in figures:
|
99
|
+
_process_figure(figure)
|
100
|
+
|
101
|
+
def _process_table(table):
|
102
|
+
for element in table.get("elements", []):
|
103
|
+
if "paragraph" in element:
|
104
|
+
elements.append(element)
|
105
|
+
elif "section" in element:
|
106
|
+
section_idx = element.split("/")[2]
|
107
|
+
next_section = sections[int(section_idx)]
|
108
|
+
_process_section(next_section)
|
109
|
+
|
110
|
+
for table in tables:
|
111
|
+
_process_table(table)
|
112
|
+
|
113
|
+
missing = [x for x in all_paragraphs if x not in elements]
|
114
|
+
|
115
|
+
return missing
|
116
|
+
|
117
|
+
def _insert_missing_paragraphs(self, json_data: dict) -> dict:
|
118
|
+
"""Insert missing paragraphs into the Azure Document Intelligence JSON data."""
|
119
|
+
|
120
|
+
missing = self._get_missing_paragraphs(json_data)
|
121
|
+
|
122
|
+
def _insert_paragraph_recursive(section, p_idx, p):
|
123
|
+
for i, element in enumerate(section.get("elements", [])):
|
124
|
+
if "paragraph" in element:
|
125
|
+
if int(element.split("/")[2]) > int(p_idx):
|
126
|
+
section["elements"].insert(i, p)
|
127
|
+
return True
|
128
|
+
elif "section" in element:
|
129
|
+
section_idx = element.split("/")[2]
|
130
|
+
next_section = json_data["sections"][int(section_idx)]
|
131
|
+
if _insert_paragraph_recursive(next_section, p_idx, p):
|
132
|
+
return True
|
133
|
+
return False
|
134
|
+
|
135
|
+
for p in missing:
|
136
|
+
idx = int(p.split("/")[2])
|
137
|
+
|
138
|
+
for section in json_data.get("sections", []):
|
139
|
+
if _insert_paragraph_recursive(section, idx, p):
|
140
|
+
break
|
141
|
+
return json_data
|
142
|
+
|
143
|
+
def _parse_json(self, json_data: dict, file_path: str) -> Node:
|
144
|
+
"""
|
145
|
+
Parse Azure Document Intelligence JSON into a hierarchical Node structure.
|
146
|
+
|
147
|
+
Args:
|
148
|
+
json_data: The Azure Document Intelligence JSON response
|
149
|
+
|
150
|
+
Returns:
|
151
|
+
A Node representing the document with hierarchical structure
|
152
|
+
"""
|
153
|
+
# Create root document node
|
154
|
+
|
155
|
+
json_data = self._insert_missing_paragraphs(json_data)
|
156
|
+
|
157
|
+
document_node = Node(
|
158
|
+
children=[],
|
159
|
+
metadata=self._extract_document_metadata(json_data),
|
160
|
+
node_type=NodeType.DOCUMENT,
|
161
|
+
)
|
162
|
+
|
163
|
+
# Process each page in the document
|
164
|
+
analyze_result = json_data # .get('analyzeResult', {})
|
165
|
+
sections = analyze_result.get("sections", [])
|
166
|
+
|
167
|
+
document_node.children = self._process_children_elements(
|
168
|
+
sections[0], analyze_result, file_path=file_path
|
169
|
+
)
|
170
|
+
|
171
|
+
return document_node
|
172
|
+
|
173
|
+
def _process_children_elements(
|
174
|
+
self,
|
175
|
+
parent_object: dict[str, Any],
|
176
|
+
analyze_result: dict[str, Any],
|
177
|
+
file_path: str,
|
178
|
+
) -> list[Node]:
|
179
|
+
"""Process children elements of a section."""
|
180
|
+
children_nodes = []
|
181
|
+
elements = parent_object.get("elements", [])
|
182
|
+
for _element_idx, element in enumerate(elements):
|
183
|
+
if "paragraph" in element:
|
184
|
+
paragrap_index = element.split("/")[2]
|
185
|
+
|
186
|
+
paragraph = analyze_result.get("paragraphs", [])[int(paragrap_index)]
|
187
|
+
paragraph_node = self._create_paragraph_node(paragraph)
|
188
|
+
paragraph_node.children = self._process_children_elements(
|
189
|
+
paragraph, analyze_result, file_path=file_path
|
190
|
+
)
|
191
|
+
children_nodes.append(paragraph_node)
|
192
|
+
|
193
|
+
elif "table" in element:
|
194
|
+
table_index = element.split("/")[2]
|
195
|
+
table = analyze_result.get("tables", [])[int(table_index)]
|
196
|
+
table_node = self._create_media_node(
|
197
|
+
media=table,
|
198
|
+
node_type=NodeType.TABLE,
|
199
|
+
content_result=analyze_result.get("content", ""),
|
200
|
+
file_path=file_path,
|
201
|
+
)
|
202
|
+
table_node.children = self._process_children_elements(
|
203
|
+
table, analyze_result, file_path=file_path
|
204
|
+
)
|
205
|
+
children_nodes.append(table_node)
|
206
|
+
|
207
|
+
elif "figures" in element:
|
208
|
+
image_index = element.split("/")[2]
|
209
|
+
image = analyze_result.get("figures", [])[int(image_index)]
|
210
|
+
image_node = self._create_media_node(
|
211
|
+
media=image,
|
212
|
+
node_type=NodeType.FIGURE,
|
213
|
+
content_result=analyze_result.get("content", ""),
|
214
|
+
file_path=file_path,
|
215
|
+
)
|
216
|
+
image_node.children = self._process_children_elements(
|
217
|
+
image, analyze_result, file_path=file_path
|
218
|
+
)
|
219
|
+
children_nodes.append(image_node)
|
220
|
+
|
221
|
+
elif "section" in element:
|
222
|
+
section_index = element.split("/")[2]
|
223
|
+
section = analyze_result.get("sections", [])[int(section_index)]
|
224
|
+
section_node = Node(children=[], node_type=NodeType.SECTION)
|
225
|
+
section_node.children = self._process_children_elements(
|
226
|
+
section, analyze_result, file_path=file_path
|
227
|
+
)
|
228
|
+
children_nodes.append(section_node)
|
229
|
+
|
230
|
+
return children_nodes
|
231
|
+
|
232
|
+
def _transform_cells_to_markdown(
|
233
|
+
self, table_data: dict[str, Any], content_result: str
|
234
|
+
) -> str:
|
235
|
+
"""Transforms table cells from Azure response to a markdown table string."""
|
236
|
+
cells = table_data.get("cells", [])
|
237
|
+
if not cells:
|
238
|
+
return ""
|
239
|
+
|
240
|
+
offset = table_data.get("spans", [{}])[0].get("offset")
|
241
|
+
length = table_data.get("spans", [{}])[0].get("length")
|
242
|
+
if offset is None or length is None:
|
243
|
+
return ""
|
244
|
+
|
245
|
+
markdown_table = content_result[offset : offset + length]
|
246
|
+
|
247
|
+
return markdown_table
|
248
|
+
|
249
|
+
def _create_media_node(
|
250
|
+
self,
|
251
|
+
media: dict[str, Any],
|
252
|
+
node_type: NodeType,
|
253
|
+
content_result: str,
|
254
|
+
file_path: str,
|
255
|
+
) -> Node:
|
256
|
+
"""Create a node for an media with its child elements."""
|
257
|
+
# Get bounding regions
|
258
|
+
bounding_regions = media.get("boundingRegions", [])
|
259
|
+
|
260
|
+
if file_path and bounding_regions:
|
261
|
+
base64_image = extract_media(
|
262
|
+
coordinates=bounding_regions[0]["polygon"],
|
263
|
+
file_path=file_path,
|
264
|
+
page_number=bounding_regions[0]["pageNumber"],
|
265
|
+
)
|
266
|
+
|
267
|
+
media_obj = Media(
|
268
|
+
media_type="image",
|
269
|
+
source=base64_image,
|
270
|
+
source_type="base64",
|
271
|
+
extension="png",
|
272
|
+
)
|
273
|
+
else:
|
274
|
+
raise ValueError("No bounding regions found for media")
|
275
|
+
|
276
|
+
content = None
|
277
|
+
metadata = {
|
278
|
+
"boundingRegions": bounding_regions,
|
279
|
+
}
|
280
|
+
if node_type == NodeType.TABLE:
|
281
|
+
content = self._transform_cells_to_markdown(media, content_result)
|
282
|
+
metadata["rowCount"] = media.get("rowCount")
|
283
|
+
metadata["columnCount"] = media.get("columnCount")
|
284
|
+
|
285
|
+
# Create MediaNode with bounding regions metadata
|
286
|
+
image_node = MediaNode(
|
287
|
+
media=media_obj,
|
288
|
+
children=[],
|
289
|
+
node_type=node_type,
|
290
|
+
metadata=metadata,
|
291
|
+
content=content,
|
292
|
+
)
|
293
|
+
return image_node
|
294
|
+
|
295
|
+
def _extract_document_metadata(self, json_data: dict[str, Any]) -> dict[str, Any]:
|
296
|
+
"""Extract document-level metadata from the Azure response."""
|
297
|
+
metadata = {}
|
298
|
+
analyze_result = json_data.get("analyzeResult", {})
|
299
|
+
|
300
|
+
# Add document-level metadata
|
301
|
+
if "documentResults" in analyze_result:
|
302
|
+
doc_results = analyze_result["documentResults"]
|
303
|
+
if doc_results and len(doc_results) > 0:
|
304
|
+
metadata.update(doc_results[0].get("fields", {}))
|
305
|
+
|
306
|
+
# Add model information if available
|
307
|
+
metadata["modelId"] = analyze_result.get("modelId")
|
308
|
+
metadata["apiVersion"] = analyze_result.get("apiVersion")
|
309
|
+
|
310
|
+
return metadata
|
311
|
+
|
312
|
+
# def _create_table_node(self, table: Dict[str, Any]) -> Node:
|
313
|
+
# """Create a node for a table with its child lines and words."""
|
314
|
+
# table_node = Node(
|
315
|
+
# children=[],
|
316
|
+
# node_type=NodeType.TABLE,
|
317
|
+
# content=table.get("content", ""),
|
318
|
+
# metadata={
|
319
|
+
# "boundingRegions": table.get("boundingRegions", []),
|
320
|
+
# },
|
321
|
+
# )
|
322
|
+
# return table_node
|
323
|
+
|
324
|
+
def _create_paragraph_node(self, paragraph: dict[str, Any]) -> Node:
|
325
|
+
"""Create a node for a paragraph with its child lines and words."""
|
326
|
+
para_node = Node(
|
327
|
+
children=[],
|
328
|
+
node_type=NodeType.PARAGRAPH,
|
329
|
+
content=paragraph.get("content", ""),
|
330
|
+
metadata={
|
331
|
+
"boundingRegions": paragraph.get("boundingRegions", {}),
|
332
|
+
},
|
333
|
+
)
|
334
|
+
return para_node
|
335
|
+
|
336
|
+
def parse_with_azure_ai(self, file_path: str) -> dict:
|
337
|
+
"""
|
338
|
+
Parse a Document with Azure AI Document Intelligence into a json dictionary.
|
339
|
+
|
340
|
+
Args:
|
341
|
+
file_path: Path to the document
|
342
|
+
|
343
|
+
Returns:
|
344
|
+
A dictionary with the Azure AI Document Intelligence response
|
345
|
+
"""
|
346
|
+
|
347
|
+
with open(file_path, "rb") as file:
|
348
|
+
file_content = file.read()
|
349
|
+
|
350
|
+
parser = self._get_parser()
|
351
|
+
poller = parser.begin_analyze_document(
|
352
|
+
"prebuilt-layout",
|
353
|
+
AnalyzeDocumentRequest(bytes_source=file_content),
|
354
|
+
output_content_format=self.result_type,
|
355
|
+
)
|
356
|
+
result: AnalyzeResult = poller.result()
|
357
|
+
return result.as_dict()
|
358
|
+
|
359
|
+
async def a_parse_with_azure_ai(self, file_path: str) -> dict:
|
360
|
+
async with aiofiles.open(file_path, "rb") as file:
|
361
|
+
file_content = await file.read()
|
362
|
+
|
363
|
+
parser = self._get_a_parser()
|
364
|
+
async with parser:
|
365
|
+
poller = await parser.begin_analyze_document(
|
366
|
+
"prebuilt-layout",
|
367
|
+
AnalyzeDocumentRequest(bytes_source=file_content),
|
368
|
+
output_content_format=self.result_type,
|
369
|
+
)
|
370
|
+
result: AnalyzeResult = await poller.result()
|
371
|
+
return result.as_dict()
|
372
|
+
|
373
|
+
def parse(self, file_path: str) -> Node:
|
374
|
+
"""
|
375
|
+
Parse a Document with Azure AI Document Intelligence into a Node structure.
|
376
|
+
|
377
|
+
Args:
|
378
|
+
file_path: Path to the document
|
379
|
+
|
380
|
+
Returns:
|
381
|
+
A Node representing the document with hierarchical structure
|
382
|
+
"""
|
383
|
+
result_dict = self.parse_with_azure_ai(file_path)
|
384
|
+
return self._parse_json(result_dict, file_path=file_path)
|
385
|
+
|
386
|
+
def __call__(self, file_path: str) -> Node:
|
387
|
+
return self.parse(file_path)
|
388
|
+
|
389
|
+
async def a_parse(self, file_path: str) -> Node:
|
390
|
+
result_dict = await self.a_parse_with_azure_ai(file_path)
|
391
|
+
return self._parse_json(result_dict, file_path=file_path)
|
@@ -0,0 +1,15 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: datapizza-ai-parsers-azure
|
3
|
+
Version: 0.0.2
|
4
|
+
Summary: Azure Document Intelligence parser for the datapizza-ai framework
|
5
|
+
Author-email: Datapizza <datapizza@datapizza.tech>
|
6
|
+
License: MIT
|
7
|
+
Classifier: License :: OSI Approved :: MIT License
|
8
|
+
Classifier: Operating System :: OS Independent
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
10
|
+
Requires-Python: <4,>=3.10.0
|
11
|
+
Requires-Dist: aiofiles>=24.1.0
|
12
|
+
Requires-Dist: azure-ai-documentintelligence<2.0.0,>=1.0.1
|
13
|
+
Requires-Dist: datapizza-ai-core==0.0.1
|
14
|
+
Requires-Dist: pillow>=11.3.0
|
15
|
+
Requires-Dist: pymupdf<2.0.0,>=1.25.4
|
@@ -0,0 +1,5 @@
|
|
1
|
+
datapizza/modules/parsers/azure/__init__.py,sha256=78Cmh6Swk3zlqhpXwy8RtGw97Yjw8E-_FARfUzzBgho,65
|
2
|
+
datapizza/modules/parsers/azure/azure_parser.py,sha256=ZcdH2VJ0t2xox8sCz9n3X_0jsV4UfjZl2qVj-gZLGr4,14335
|
3
|
+
datapizza_ai_parsers_azure-0.0.2.dist-info/METADATA,sha256=BcLLFTpsSkuis9UNRv8_TybXJOtMZAlQQSKeUW2htEY,583
|
4
|
+
datapizza_ai_parsers_azure-0.0.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
5
|
+
datapizza_ai_parsers_azure-0.0.2.dist-info/RECORD,,
|