datapizza-ai-parsers-azure 0.0.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,207 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[codz]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py.cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+ #poetry.toml
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
+ #pdm.lock
116
+ #pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # pixi
121
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
+ #pixi.lock
123
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
+ # in the .venv directory. It is recommended not to include this directory in version control.
125
+ .pixi
126
+
127
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
+ __pypackages__/
129
+
130
+ # Celery stuff
131
+ celerybeat-schedule
132
+ celerybeat.pid
133
+
134
+ # SageMath parsed files
135
+ *.sage.py
136
+
137
+ # Environments
138
+ .env
139
+ .envrc
140
+ .venv
141
+ env/
142
+ venv/
143
+ ENV/
144
+ env.bak/
145
+ venv.bak/
146
+
147
+ # Spyder project settings
148
+ .spyderproject
149
+ .spyproject
150
+
151
+ # Rope project settings
152
+ .ropeproject
153
+
154
+ # mkdocs documentation
155
+ /site
156
+
157
+ # mypy
158
+ .mypy_cache/
159
+ .dmypy.json
160
+ dmypy.json
161
+
162
+ # Pyre type checker
163
+ .pyre/
164
+
165
+ # pytype static type analyzer
166
+ .pytype/
167
+
168
+ # Cython debug symbols
169
+ cython_debug/
170
+
171
+ # PyCharm
172
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
173
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
174
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
175
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
176
+ #.idea/
177
+
178
+ # Abstra
179
+ # Abstra is an AI-powered process automation framework.
180
+ # Ignore directories containing user credentials, local state, and settings.
181
+ # Learn more at https://abstra.io/docs
182
+ .abstra/
183
+
184
+ # Visual Studio Code
185
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
186
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
187
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
188
+ # you could uncomment the following to ignore the entire vscode folder
189
+ # .vscode/
190
+
191
+ # Ruff stuff:
192
+ .ruff_cache/
193
+
194
+ # PyPI configuration file
195
+ .pypirc
196
+
197
+ # Cursor
198
+ # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
199
+ # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
200
+ # refer to https://docs.cursor.com/context/ignore-files
201
+ .cursorignore
202
+ .cursorindexingignore
203
+
204
+ # Marimo
205
+ marimo/_static/
206
+ marimo/_lsp/
207
+ __marimo__/
@@ -0,0 +1,15 @@
1
+ Metadata-Version: 2.4
2
+ Name: datapizza-ai-parsers-azure
3
+ Version: 0.0.2
4
+ Summary: Azure Document Intelligence parser for the datapizza-ai framework
5
+ Author-email: Datapizza <datapizza@datapizza.tech>
6
+ License: MIT
7
+ Classifier: License :: OSI Approved :: MIT License
8
+ Classifier: Operating System :: OS Independent
9
+ Classifier: Programming Language :: Python :: 3
10
+ Requires-Python: <4,>=3.10.0
11
+ Requires-Dist: aiofiles>=24.1.0
12
+ Requires-Dist: azure-ai-documentintelligence<2.0.0,>=1.0.1
13
+ Requires-Dist: datapizza-ai-core==0.0.1
14
+ Requires-Dist: pillow>=11.3.0
15
+ Requires-Dist: pymupdf<2.0.0,>=1.25.4
File without changes
@@ -0,0 +1,3 @@
1
+ from .azure_parser import AzureParser
2
+
3
+ __all__ = ["AzureParser"]
@@ -0,0 +1,391 @@
1
+ import json
2
+ from typing import Any
3
+
4
+ import aiofiles
5
+ from datapizza.core.modules.parser import Parser
6
+ from datapizza.core.utils import extract_media
7
+ from datapizza.type import Media, MediaNode, Node, NodeType
8
+
9
+ from azure.ai.documentintelligence import DocumentIntelligenceClient
10
+ from azure.ai.documentintelligence.aio import (
11
+ DocumentIntelligenceClient as AsyncDocumentIntelligenceClient,
12
+ )
13
+ from azure.ai.documentintelligence.models import AnalyzeDocumentRequest, AnalyzeResult
14
+ from azure.core.credentials import AzureKeyCredential
15
+
16
+
17
+ class AzureParser(Parser):
18
+ """
19
+ Parser that creates a hierarchical tree structure from Azure AI Document Intelligence response.
20
+ The hierarchy goes from document -> pages -> paragraphs/tables -> lines/cells -> words.
21
+
22
+ params:
23
+ api_key: str
24
+ endpoint: str
25
+ result_type: str = "markdown", "text"
26
+ """
27
+
28
+ def __init__(self, api_key: str, endpoint: str, result_type: str = "text"):
29
+ self.api_key = api_key
30
+ self.endpoint = endpoint
31
+ self.result_type = result_type
32
+ self.parser = None # self._create_parser()
33
+ self.a_parser = None # self._create_a_parser()
34
+
35
+ def _create_parser(self):
36
+ document_intelligence_client = DocumentIntelligenceClient(
37
+ endpoint=self.endpoint, credential=AzureKeyCredential(self.api_key)
38
+ )
39
+ return document_intelligence_client
40
+
41
+ def _create_a_parser(self):
42
+ parser = AsyncDocumentIntelligenceClient(
43
+ endpoint=self.endpoint, credential=AzureKeyCredential(self.api_key)
44
+ )
45
+ return parser
46
+
47
+ def _get_parser(self):
48
+ if not self.parser:
49
+ self.parser = self._create_parser()
50
+ return self.parser
51
+
52
+ def _get_a_parser(self):
53
+ if not self.a_parser:
54
+ self.a_parser = self._create_a_parser()
55
+ return self.a_parser
56
+
57
+ def _parse_file(self, file_path: str) -> Node:
58
+ """Parse an Azure Document Intelligence JSON file into a Node structure."""
59
+ with open(file_path) as file:
60
+ json_data = json.load(file)
61
+
62
+ return self._parse_json(json_data, file_path=file_path)
63
+
64
+ def _get_missing_paragraphs(self, json_data: dict) -> list[str]:
65
+ """Get missing paragraphs from the Azure Document Intelligence JSON data."""
66
+
67
+ sections = json_data.get("sections", [])
68
+ figures = json_data.get("figures", [])
69
+ tables = json_data.get("tables", [])
70
+
71
+ all_paragraphs = [
72
+ "/paragraphs/" + str(x) for x in range(len(json_data.get("paragraphs", [])))
73
+ ]
74
+
75
+ elements = []
76
+
77
+ def _process_section(section):
78
+ for element in section.get("elements", []):
79
+ if "paragraph" in element:
80
+ elements.append(element)
81
+ elif "section" in element:
82
+ section_idx = element.split("/")[2]
83
+ next_section = sections[int(section_idx)]
84
+ _process_section(next_section)
85
+
86
+ for section in sections:
87
+ _process_section(section)
88
+
89
+ def _process_figure(figure):
90
+ for element in figure.get("elements", []):
91
+ if "paragraph" in element:
92
+ elements.append(element)
93
+ elif "section" in element:
94
+ section_idx = element.split("/")[2]
95
+ next_section = sections[int(section_idx)]
96
+ _process_section(next_section)
97
+
98
+ for figure in figures:
99
+ _process_figure(figure)
100
+
101
+ def _process_table(table):
102
+ for element in table.get("elements", []):
103
+ if "paragraph" in element:
104
+ elements.append(element)
105
+ elif "section" in element:
106
+ section_idx = element.split("/")[2]
107
+ next_section = sections[int(section_idx)]
108
+ _process_section(next_section)
109
+
110
+ for table in tables:
111
+ _process_table(table)
112
+
113
+ missing = [x for x in all_paragraphs if x not in elements]
114
+
115
+ return missing
116
+
117
+ def _insert_missing_paragraphs(self, json_data: dict) -> dict:
118
+ """Insert missing paragraphs into the Azure Document Intelligence JSON data."""
119
+
120
+ missing = self._get_missing_paragraphs(json_data)
121
+
122
+ def _insert_paragraph_recursive(section, p_idx, p):
123
+ for i, element in enumerate(section.get("elements", [])):
124
+ if "paragraph" in element:
125
+ if int(element.split("/")[2]) > int(p_idx):
126
+ section["elements"].insert(i, p)
127
+ return True
128
+ elif "section" in element:
129
+ section_idx = element.split("/")[2]
130
+ next_section = json_data["sections"][int(section_idx)]
131
+ if _insert_paragraph_recursive(next_section, p_idx, p):
132
+ return True
133
+ return False
134
+
135
+ for p in missing:
136
+ idx = int(p.split("/")[2])
137
+
138
+ for section in json_data.get("sections", []):
139
+ if _insert_paragraph_recursive(section, idx, p):
140
+ break
141
+ return json_data
142
+
143
+ def _parse_json(self, json_data: dict, file_path: str) -> Node:
144
+ """
145
+ Parse Azure Document Intelligence JSON into a hierarchical Node structure.
146
+
147
+ Args:
148
+ json_data: The Azure Document Intelligence JSON response
149
+
150
+ Returns:
151
+ A Node representing the document with hierarchical structure
152
+ """
153
+ # Create root document node
154
+
155
+ json_data = self._insert_missing_paragraphs(json_data)
156
+
157
+ document_node = Node(
158
+ children=[],
159
+ metadata=self._extract_document_metadata(json_data),
160
+ node_type=NodeType.DOCUMENT,
161
+ )
162
+
163
+ # Process each page in the document
164
+ analyze_result = json_data # .get('analyzeResult', {})
165
+ sections = analyze_result.get("sections", [])
166
+
167
+ document_node.children = self._process_children_elements(
168
+ sections[0], analyze_result, file_path=file_path
169
+ )
170
+
171
+ return document_node
172
+
173
+ def _process_children_elements(
174
+ self,
175
+ parent_object: dict[str, Any],
176
+ analyze_result: dict[str, Any],
177
+ file_path: str,
178
+ ) -> list[Node]:
179
+ """Process children elements of a section."""
180
+ children_nodes = []
181
+ elements = parent_object.get("elements", [])
182
+ for _element_idx, element in enumerate(elements):
183
+ if "paragraph" in element:
184
+ paragrap_index = element.split("/")[2]
185
+
186
+ paragraph = analyze_result.get("paragraphs", [])[int(paragrap_index)]
187
+ paragraph_node = self._create_paragraph_node(paragraph)
188
+ paragraph_node.children = self._process_children_elements(
189
+ paragraph, analyze_result, file_path=file_path
190
+ )
191
+ children_nodes.append(paragraph_node)
192
+
193
+ elif "table" in element:
194
+ table_index = element.split("/")[2]
195
+ table = analyze_result.get("tables", [])[int(table_index)]
196
+ table_node = self._create_media_node(
197
+ media=table,
198
+ node_type=NodeType.TABLE,
199
+ content_result=analyze_result.get("content", ""),
200
+ file_path=file_path,
201
+ )
202
+ table_node.children = self._process_children_elements(
203
+ table, analyze_result, file_path=file_path
204
+ )
205
+ children_nodes.append(table_node)
206
+
207
+ elif "figures" in element:
208
+ image_index = element.split("/")[2]
209
+ image = analyze_result.get("figures", [])[int(image_index)]
210
+ image_node = self._create_media_node(
211
+ media=image,
212
+ node_type=NodeType.FIGURE,
213
+ content_result=analyze_result.get("content", ""),
214
+ file_path=file_path,
215
+ )
216
+ image_node.children = self._process_children_elements(
217
+ image, analyze_result, file_path=file_path
218
+ )
219
+ children_nodes.append(image_node)
220
+
221
+ elif "section" in element:
222
+ section_index = element.split("/")[2]
223
+ section = analyze_result.get("sections", [])[int(section_index)]
224
+ section_node = Node(children=[], node_type=NodeType.SECTION)
225
+ section_node.children = self._process_children_elements(
226
+ section, analyze_result, file_path=file_path
227
+ )
228
+ children_nodes.append(section_node)
229
+
230
+ return children_nodes
231
+
232
+ def _transform_cells_to_markdown(
233
+ self, table_data: dict[str, Any], content_result: str
234
+ ) -> str:
235
+ """Transforms table cells from Azure response to a markdown table string."""
236
+ cells = table_data.get("cells", [])
237
+ if not cells:
238
+ return ""
239
+
240
+ offset = table_data.get("spans", [{}])[0].get("offset")
241
+ length = table_data.get("spans", [{}])[0].get("length")
242
+ if offset is None or length is None:
243
+ return ""
244
+
245
+ markdown_table = content_result[offset : offset + length]
246
+
247
+ return markdown_table
248
+
249
+ def _create_media_node(
250
+ self,
251
+ media: dict[str, Any],
252
+ node_type: NodeType,
253
+ content_result: str,
254
+ file_path: str,
255
+ ) -> Node:
256
+ """Create a node for an media with its child elements."""
257
+ # Get bounding regions
258
+ bounding_regions = media.get("boundingRegions", [])
259
+
260
+ if file_path and bounding_regions:
261
+ base64_image = extract_media(
262
+ coordinates=bounding_regions[0]["polygon"],
263
+ file_path=file_path,
264
+ page_number=bounding_regions[0]["pageNumber"],
265
+ )
266
+
267
+ media_obj = Media(
268
+ media_type="image",
269
+ source=base64_image,
270
+ source_type="base64",
271
+ extension="png",
272
+ )
273
+ else:
274
+ raise ValueError("No bounding regions found for media")
275
+
276
+ content = None
277
+ metadata = {
278
+ "boundingRegions": bounding_regions,
279
+ }
280
+ if node_type == NodeType.TABLE:
281
+ content = self._transform_cells_to_markdown(media, content_result)
282
+ metadata["rowCount"] = media.get("rowCount")
283
+ metadata["columnCount"] = media.get("columnCount")
284
+
285
+ # Create MediaNode with bounding regions metadata
286
+ image_node = MediaNode(
287
+ media=media_obj,
288
+ children=[],
289
+ node_type=node_type,
290
+ metadata=metadata,
291
+ content=content,
292
+ )
293
+ return image_node
294
+
295
+ def _extract_document_metadata(self, json_data: dict[str, Any]) -> dict[str, Any]:
296
+ """Extract document-level metadata from the Azure response."""
297
+ metadata = {}
298
+ analyze_result = json_data.get("analyzeResult", {})
299
+
300
+ # Add document-level metadata
301
+ if "documentResults" in analyze_result:
302
+ doc_results = analyze_result["documentResults"]
303
+ if doc_results and len(doc_results) > 0:
304
+ metadata.update(doc_results[0].get("fields", {}))
305
+
306
+ # Add model information if available
307
+ metadata["modelId"] = analyze_result.get("modelId")
308
+ metadata["apiVersion"] = analyze_result.get("apiVersion")
309
+
310
+ return metadata
311
+
312
+ # def _create_table_node(self, table: Dict[str, Any]) -> Node:
313
+ # """Create a node for a table with its child lines and words."""
314
+ # table_node = Node(
315
+ # children=[],
316
+ # node_type=NodeType.TABLE,
317
+ # content=table.get("content", ""),
318
+ # metadata={
319
+ # "boundingRegions": table.get("boundingRegions", []),
320
+ # },
321
+ # )
322
+ # return table_node
323
+
324
+ def _create_paragraph_node(self, paragraph: dict[str, Any]) -> Node:
325
+ """Create a node for a paragraph with its child lines and words."""
326
+ para_node = Node(
327
+ children=[],
328
+ node_type=NodeType.PARAGRAPH,
329
+ content=paragraph.get("content", ""),
330
+ metadata={
331
+ "boundingRegions": paragraph.get("boundingRegions", {}),
332
+ },
333
+ )
334
+ return para_node
335
+
336
+ def parse_with_azure_ai(self, file_path: str) -> dict:
337
+ """
338
+ Parse a Document with Azure AI Document Intelligence into a json dictionary.
339
+
340
+ Args:
341
+ file_path: Path to the document
342
+
343
+ Returns:
344
+ A dictionary with the Azure AI Document Intelligence response
345
+ """
346
+
347
+ with open(file_path, "rb") as file:
348
+ file_content = file.read()
349
+
350
+ parser = self._get_parser()
351
+ poller = parser.begin_analyze_document(
352
+ "prebuilt-layout",
353
+ AnalyzeDocumentRequest(bytes_source=file_content),
354
+ output_content_format=self.result_type,
355
+ )
356
+ result: AnalyzeResult = poller.result()
357
+ return result.as_dict()
358
+
359
+ async def a_parse_with_azure_ai(self, file_path: str) -> dict:
360
+ async with aiofiles.open(file_path, "rb") as file:
361
+ file_content = await file.read()
362
+
363
+ parser = self._get_a_parser()
364
+ async with parser:
365
+ poller = await parser.begin_analyze_document(
366
+ "prebuilt-layout",
367
+ AnalyzeDocumentRequest(bytes_source=file_content),
368
+ output_content_format=self.result_type,
369
+ )
370
+ result: AnalyzeResult = await poller.result()
371
+ return result.as_dict()
372
+
373
+ def parse(self, file_path: str) -> Node:
374
+ """
375
+ Parse a Document with Azure AI Document Intelligence into a Node structure.
376
+
377
+ Args:
378
+ file_path: Path to the document
379
+
380
+ Returns:
381
+ A Node representing the document with hierarchical structure
382
+ """
383
+ result_dict = self.parse_with_azure_ai(file_path)
384
+ return self._parse_json(result_dict, file_path=file_path)
385
+
386
+ def __call__(self, file_path: str) -> Node:
387
+ return self.parse(file_path)
388
+
389
+ async def a_parse(self, file_path: str) -> Node:
390
+ result_dict = await self.a_parse_with_azure_ai(file_path)
391
+ return self._parse_json(result_dict, file_path=file_path)
@@ -0,0 +1,62 @@
1
+ # Build system configuration
2
+ [build-system]
3
+ requires = ["hatchling"]
4
+ build-backend = "hatchling.build"
5
+
6
+ # Project metadata
7
+ [project]
8
+ name = "datapizza-ai-parsers-azure"
9
+ version = "0.0.2"
10
+ description = "Azure Document Intelligence parser for the datapizza-ai framework"
11
+ readme = "README.md"
12
+ license = {text = "MIT"}
13
+ authors = [
14
+ {name = "Datapizza", email = "datapizza@datapizza.tech"}
15
+ ]
16
+ requires-python = ">=3.10.0,<4"
17
+ classifiers = [
18
+ "Programming Language :: Python :: 3",
19
+ "License :: OSI Approved :: MIT License",
20
+ "Operating System :: OS Independent",
21
+ ]
22
+ dependencies = [
23
+ "datapizza-ai-core==0.0.1",
24
+ "aiofiles>=24.1.0",
25
+ "azure-ai-documentintelligence>=1.0.1,<2.0.0",
26
+ "pymupdf>=1.25.4,<2.0.0",
27
+ "pillow>=11.3.0",
28
+ ]
29
+
30
+ # Development dependencies
31
+ [dependency-groups]
32
+ dev = [
33
+ "deptry>=0.23.0",
34
+ "pytest",
35
+ "ruff>=0.11.5",
36
+ ]
37
+
38
+ # Hatch build configuration
39
+ [tool.hatch.build.targets.sdist]
40
+ include = ["/datapizza"]
41
+ exclude = ["**/BUILD"]
42
+
43
+ [tool.hatch.build.targets.wheel]
44
+ include = ["/datapizza"]
45
+ exclude = ["**/BUILD"]
46
+
47
+ # Ruff configuration
48
+ [tool.ruff]
49
+ line-length = 88
50
+
51
+ [tool.ruff.lint]
52
+ select = [
53
+ # "E", # pycodestyle errors
54
+ "W", # pycodestyle warnings
55
+ "F", # pyflakes
56
+ "B", # flake8-bugbear
57
+ "I", # isort
58
+ "UP", # pyupgrade
59
+ "SIM", # flake8-simplify
60
+ "RUF", # Ruff-specific rules
61
+ "C4", # flake8-comprehensions
62
+ ]