nv-ingest-api 2025.4.15.dev20250415__py3-none-any.whl → 2025.4.17.dev20250417__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of nv-ingest-api might be problematic. Click here for more details.

Files changed (153) hide show
  1. nv_ingest_api/__init__.py +3 -0
  2. nv_ingest_api/interface/__init__.py +215 -0
  3. nv_ingest_api/interface/extract.py +972 -0
  4. nv_ingest_api/interface/mutate.py +154 -0
  5. nv_ingest_api/interface/store.py +218 -0
  6. nv_ingest_api/interface/transform.py +382 -0
  7. nv_ingest_api/interface/utility.py +200 -0
  8. nv_ingest_api/internal/enums/__init__.py +3 -0
  9. nv_ingest_api/internal/enums/common.py +494 -0
  10. nv_ingest_api/internal/extract/__init__.py +3 -0
  11. nv_ingest_api/internal/extract/audio/__init__.py +3 -0
  12. nv_ingest_api/internal/extract/audio/audio_extraction.py +149 -0
  13. nv_ingest_api/internal/extract/docx/__init__.py +5 -0
  14. nv_ingest_api/internal/extract/docx/docx_extractor.py +205 -0
  15. nv_ingest_api/internal/extract/docx/engines/__init__.py +0 -0
  16. nv_ingest_api/internal/extract/docx/engines/docxreader_helpers/__init__.py +3 -0
  17. nv_ingest_api/internal/extract/docx/engines/docxreader_helpers/docx_helper.py +122 -0
  18. nv_ingest_api/internal/extract/docx/engines/docxreader_helpers/docxreader.py +895 -0
  19. nv_ingest_api/internal/extract/image/__init__.py +3 -0
  20. nv_ingest_api/internal/extract/image/chart_extractor.py +353 -0
  21. nv_ingest_api/internal/extract/image/image_extractor.py +204 -0
  22. nv_ingest_api/internal/extract/image/image_helpers/__init__.py +3 -0
  23. nv_ingest_api/internal/extract/image/image_helpers/common.py +403 -0
  24. nv_ingest_api/internal/extract/image/infographic_extractor.py +253 -0
  25. nv_ingest_api/internal/extract/image/table_extractor.py +344 -0
  26. nv_ingest_api/internal/extract/pdf/__init__.py +3 -0
  27. nv_ingest_api/internal/extract/pdf/engines/__init__.py +19 -0
  28. nv_ingest_api/internal/extract/pdf/engines/adobe.py +484 -0
  29. nv_ingest_api/internal/extract/pdf/engines/llama.py +243 -0
  30. nv_ingest_api/internal/extract/pdf/engines/nemoretriever.py +597 -0
  31. nv_ingest_api/internal/extract/pdf/engines/pdf_helpers/__init__.py +146 -0
  32. nv_ingest_api/internal/extract/pdf/engines/pdfium.py +603 -0
  33. nv_ingest_api/internal/extract/pdf/engines/tika.py +96 -0
  34. nv_ingest_api/internal/extract/pdf/engines/unstructured_io.py +426 -0
  35. nv_ingest_api/internal/extract/pdf/pdf_extractor.py +74 -0
  36. nv_ingest_api/internal/extract/pptx/__init__.py +5 -0
  37. nv_ingest_api/internal/extract/pptx/engines/__init__.py +0 -0
  38. nv_ingest_api/internal/extract/pptx/engines/pptx_helper.py +799 -0
  39. nv_ingest_api/internal/extract/pptx/pptx_extractor.py +187 -0
  40. nv_ingest_api/internal/mutate/__init__.py +3 -0
  41. nv_ingest_api/internal/mutate/deduplicate.py +110 -0
  42. nv_ingest_api/internal/mutate/filter.py +133 -0
  43. nv_ingest_api/internal/primitives/__init__.py +0 -0
  44. nv_ingest_api/{primitives → internal/primitives}/control_message_task.py +4 -0
  45. nv_ingest_api/{primitives → internal/primitives}/ingest_control_message.py +5 -2
  46. nv_ingest_api/internal/primitives/nim/__init__.py +8 -0
  47. nv_ingest_api/internal/primitives/nim/default_values.py +15 -0
  48. nv_ingest_api/internal/primitives/nim/model_interface/__init__.py +3 -0
  49. nv_ingest_api/internal/primitives/nim/model_interface/cached.py +274 -0
  50. nv_ingest_api/internal/primitives/nim/model_interface/decorators.py +56 -0
  51. nv_ingest_api/internal/primitives/nim/model_interface/deplot.py +270 -0
  52. nv_ingest_api/internal/primitives/nim/model_interface/helpers.py +275 -0
  53. nv_ingest_api/internal/primitives/nim/model_interface/nemoretriever_parse.py +238 -0
  54. nv_ingest_api/internal/primitives/nim/model_interface/paddle.py +462 -0
  55. nv_ingest_api/internal/primitives/nim/model_interface/parakeet.py +367 -0
  56. nv_ingest_api/internal/primitives/nim/model_interface/text_embedding.py +132 -0
  57. nv_ingest_api/internal/primitives/nim/model_interface/vlm.py +152 -0
  58. nv_ingest_api/internal/primitives/nim/model_interface/yolox.py +1400 -0
  59. nv_ingest_api/internal/primitives/nim/nim_client.py +344 -0
  60. nv_ingest_api/internal/primitives/nim/nim_model_interface.py +81 -0
  61. nv_ingest_api/internal/primitives/tracing/__init__.py +0 -0
  62. nv_ingest_api/internal/primitives/tracing/latency.py +69 -0
  63. nv_ingest_api/internal/primitives/tracing/logging.py +96 -0
  64. nv_ingest_api/internal/primitives/tracing/tagging.py +197 -0
  65. nv_ingest_api/internal/schemas/__init__.py +3 -0
  66. nv_ingest_api/internal/schemas/extract/__init__.py +3 -0
  67. nv_ingest_api/internal/schemas/extract/extract_audio_schema.py +130 -0
  68. nv_ingest_api/internal/schemas/extract/extract_chart_schema.py +135 -0
  69. nv_ingest_api/internal/schemas/extract/extract_docx_schema.py +124 -0
  70. nv_ingest_api/internal/schemas/extract/extract_image_schema.py +124 -0
  71. nv_ingest_api/internal/schemas/extract/extract_infographic_schema.py +128 -0
  72. nv_ingest_api/internal/schemas/extract/extract_pdf_schema.py +218 -0
  73. nv_ingest_api/internal/schemas/extract/extract_pptx_schema.py +124 -0
  74. nv_ingest_api/internal/schemas/extract/extract_table_schema.py +129 -0
  75. nv_ingest_api/internal/schemas/message_brokers/__init__.py +3 -0
  76. nv_ingest_api/internal/schemas/message_brokers/message_broker_client_schema.py +23 -0
  77. nv_ingest_api/internal/schemas/message_brokers/request_schema.py +34 -0
  78. nv_ingest_api/internal/schemas/message_brokers/response_schema.py +19 -0
  79. nv_ingest_api/internal/schemas/meta/__init__.py +3 -0
  80. nv_ingest_api/internal/schemas/meta/base_model_noext.py +11 -0
  81. nv_ingest_api/internal/schemas/meta/ingest_job_schema.py +237 -0
  82. nv_ingest_api/internal/schemas/meta/metadata_schema.py +221 -0
  83. nv_ingest_api/internal/schemas/mutate/__init__.py +3 -0
  84. nv_ingest_api/internal/schemas/mutate/mutate_image_dedup_schema.py +16 -0
  85. nv_ingest_api/internal/schemas/store/__init__.py +3 -0
  86. nv_ingest_api/internal/schemas/store/store_embedding_schema.py +28 -0
  87. nv_ingest_api/internal/schemas/store/store_image_schema.py +30 -0
  88. nv_ingest_api/internal/schemas/transform/__init__.py +3 -0
  89. nv_ingest_api/internal/schemas/transform/transform_image_caption_schema.py +15 -0
  90. nv_ingest_api/internal/schemas/transform/transform_image_filter_schema.py +17 -0
  91. nv_ingest_api/internal/schemas/transform/transform_text_embedding_schema.py +25 -0
  92. nv_ingest_api/internal/schemas/transform/transform_text_splitter_schema.py +22 -0
  93. nv_ingest_api/internal/store/__init__.py +3 -0
  94. nv_ingest_api/internal/store/embed_text_upload.py +236 -0
  95. nv_ingest_api/internal/store/image_upload.py +232 -0
  96. nv_ingest_api/internal/transform/__init__.py +3 -0
  97. nv_ingest_api/internal/transform/caption_image.py +205 -0
  98. nv_ingest_api/internal/transform/embed_text.py +496 -0
  99. nv_ingest_api/internal/transform/split_text.py +157 -0
  100. nv_ingest_api/util/__init__.py +0 -0
  101. nv_ingest_api/util/control_message/__init__.py +0 -0
  102. nv_ingest_api/util/control_message/validators.py +47 -0
  103. nv_ingest_api/util/converters/__init__.py +0 -0
  104. nv_ingest_api/util/converters/bytetools.py +78 -0
  105. nv_ingest_api/util/converters/containers.py +65 -0
  106. nv_ingest_api/util/converters/datetools.py +90 -0
  107. nv_ingest_api/util/converters/dftools.py +127 -0
  108. nv_ingest_api/util/converters/formats.py +64 -0
  109. nv_ingest_api/util/converters/type_mappings.py +27 -0
  110. nv_ingest_api/util/detectors/__init__.py +5 -0
  111. nv_ingest_api/util/detectors/language.py +38 -0
  112. nv_ingest_api/util/exception_handlers/__init__.py +0 -0
  113. nv_ingest_api/util/exception_handlers/converters.py +72 -0
  114. nv_ingest_api/util/exception_handlers/decorators.py +223 -0
  115. nv_ingest_api/util/exception_handlers/detectors.py +74 -0
  116. nv_ingest_api/util/exception_handlers/pdf.py +116 -0
  117. nv_ingest_api/util/exception_handlers/schemas.py +68 -0
  118. nv_ingest_api/util/image_processing/__init__.py +5 -0
  119. nv_ingest_api/util/image_processing/clustering.py +260 -0
  120. nv_ingest_api/util/image_processing/processing.py +179 -0
  121. nv_ingest_api/util/image_processing/table_and_chart.py +449 -0
  122. nv_ingest_api/util/image_processing/transforms.py +407 -0
  123. nv_ingest_api/util/logging/__init__.py +0 -0
  124. nv_ingest_api/util/logging/configuration.py +31 -0
  125. nv_ingest_api/util/message_brokers/__init__.py +3 -0
  126. nv_ingest_api/util/message_brokers/simple_message_broker/__init__.py +9 -0
  127. nv_ingest_api/util/message_brokers/simple_message_broker/broker.py +465 -0
  128. nv_ingest_api/util/message_brokers/simple_message_broker/ordered_message_queue.py +71 -0
  129. nv_ingest_api/util/message_brokers/simple_message_broker/simple_client.py +435 -0
  130. nv_ingest_api/util/metadata/__init__.py +5 -0
  131. nv_ingest_api/util/metadata/aggregators.py +469 -0
  132. nv_ingest_api/util/multi_processing/__init__.py +8 -0
  133. nv_ingest_api/util/multi_processing/mp_pool_singleton.py +194 -0
  134. nv_ingest_api/util/nim/__init__.py +56 -0
  135. nv_ingest_api/util/pdf/__init__.py +3 -0
  136. nv_ingest_api/util/pdf/pdfium.py +427 -0
  137. nv_ingest_api/util/schema/__init__.py +0 -0
  138. nv_ingest_api/util/schema/schema_validator.py +10 -0
  139. nv_ingest_api/util/service_clients/__init__.py +3 -0
  140. nv_ingest_api/util/service_clients/client_base.py +72 -0
  141. nv_ingest_api/util/service_clients/kafka/__init__.py +3 -0
  142. nv_ingest_api/util/service_clients/redis/__init__.py +0 -0
  143. nv_ingest_api/util/service_clients/redis/redis_client.py +334 -0
  144. nv_ingest_api/util/service_clients/rest/__init__.py +0 -0
  145. nv_ingest_api/util/service_clients/rest/rest_client.py +398 -0
  146. nv_ingest_api/util/string_processing/__init__.py +51 -0
  147. {nv_ingest_api-2025.4.15.dev20250415.dist-info → nv_ingest_api-2025.4.17.dev20250417.dist-info}/METADATA +1 -1
  148. nv_ingest_api-2025.4.17.dev20250417.dist-info/RECORD +152 -0
  149. nv_ingest_api-2025.4.15.dev20250415.dist-info/RECORD +0 -9
  150. /nv_ingest_api/{primitives → internal}/__init__.py +0 -0
  151. {nv_ingest_api-2025.4.15.dev20250415.dist-info → nv_ingest_api-2025.4.17.dev20250417.dist-info}/WHEEL +0 -0
  152. {nv_ingest_api-2025.4.15.dev20250415.dist-info → nv_ingest_api-2025.4.17.dev20250417.dist-info}/licenses/LICENSE +0 -0
  153. {nv_ingest_api-2025.4.15.dev20250415.dist-info → nv_ingest_api-2025.4.17.dev20250417.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,597 @@
1
+ # SPDX-FileCopyrightText: Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES.
2
+ # All rights reserved.
3
+ # SPDX-License-Identifier: Apache-2.0
4
+ # Copyright (c) 2024, NVIDIA CORPORATION.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+
18
+ import io
19
+ import logging
20
+ import math
21
+ import uuid
22
+ import concurrent.futures
23
+ from typing import Any
24
+ from typing import Dict
25
+ from typing import Tuple
26
+ from typing import Optional
27
+ from typing import List
28
+
29
+ import numpy as np
30
+ import pypdfium2 as pdfium
31
+
32
+ from nv_ingest_api.internal.extract.pdf.engines.pdfium import _extract_page_elements
33
+ from nv_ingest_api.internal.primitives.nim.model_interface import nemoretriever_parse as nemoretriever_parse_utils
34
+ from nv_ingest_api.internal.enums.common import AccessLevelEnum
35
+ from nv_ingest_api.internal.enums.common import ContentTypeEnum
36
+ from nv_ingest_api.internal.enums.common import ContentDescriptionEnum
37
+ from nv_ingest_api.internal.enums.common import TableFormatEnum
38
+ from nv_ingest_api.internal.enums.common import TextTypeEnum
39
+ from nv_ingest_api.internal.schemas.meta.metadata_schema import validate_metadata
40
+ from nv_ingest_api.internal.primitives.nim.model_interface.yolox import (
41
+ YOLOX_PAGE_IMAGE_PREPROC_WIDTH,
42
+ YOLOX_PAGE_IMAGE_PREPROC_HEIGHT,
43
+ )
44
+ from nv_ingest_api.internal.schemas.extract.extract_pdf_schema import NemoRetrieverParseConfigSchema
45
+ from nv_ingest_api.util.metadata.aggregators import (
46
+ extract_pdf_metadata,
47
+ LatexTable,
48
+ Base64Image,
49
+ construct_image_metadata_from_pdf_image,
50
+ construct_text_metadata,
51
+ )
52
+ from nv_ingest_api.util.pdf.pdfium import pdfium_pages_to_numpy
53
+ from nv_ingest_api.internal.primitives.nim.default_values import YOLOX_MAX_BATCH_SIZE
54
+ from nv_ingest_api.util.exception_handlers.pdf import pdfium_exception_handler
55
+ from nv_ingest_api.util.image_processing.transforms import numpy_to_base64, crop_image
56
+ from nv_ingest_api.util.nim import create_inference_client
57
+
58
+
59
+ logger = logging.getLogger(__name__)
60
+
61
+ NEMORETRIEVER_PARSE_RENDER_DPI = 300
62
+ NEMORETRIEVER_PARSE_MAX_WIDTH = 1024
63
+ NEMORETRIEVER_PARSE_MAX_HEIGHT = 1280
64
+ NEMORETRIEVER_PARSE_MAX_BATCH_SIZE = 8
65
+
66
+
67
+ # Define a helper function to use nemoretriever_parse to extract text from a base64 encoded bytestram PDF
68
+ def nemoretriever_parse_extractor(
69
+ pdf_stream: io.BytesIO,
70
+ extract_text: bool,
71
+ extract_images: bool,
72
+ extract_infographics: bool,
73
+ extract_tables: bool,
74
+ extract_charts: bool,
75
+ extractor_config: dict,
76
+ execution_trace_log: Optional[List[Any]] = None,
77
+ ) -> str:
78
+ """
79
+ Helper function to use nemoretriever_parse to extract text from a bytestream PDF.
80
+
81
+ Parameters
82
+ ----------
83
+ pdf_stream : io.BytesIO
84
+ A bytestream PDF.
85
+ extract_text : bool
86
+ Specifies whether to extract text.
87
+ extract_images : bool
88
+ Specifies whether to extract images.
89
+ extract_tables : bool
90
+ Specifies whether to extract tables.
91
+ extract_infographics : bool
92
+ Specifies whether to extract infographics.
93
+ extract_charts : bool
94
+ Specifies whether to extract charts.
95
+ execution_trace_log : Optional[List], optional
96
+ Trace information for debugging purposes (default is None).
97
+ extractor_config : dict
98
+ A dictionary containing additional extraction parameters. Expected keys include:
99
+ - row_data : dict
100
+ - text_depth : str, optional (default is "page")
101
+ - extract_tables_method : str, optional (default is "yolox")
102
+ - identify_nearby_objects : bool, optional (default is True)
103
+ - paddle_output_format : str, optional (default is "pseudo_markdown")
104
+ - pdfium_config : dict, optional (configuration for PDFium)
105
+ - nemoretriever_parse_config : dict, optional (configuration for NemoRetrieverParse)
106
+ - metadata_column : str, optional (default is "metadata")
107
+
108
+ Returns
109
+ -------
110
+ str
111
+ A string of extracted text.
112
+
113
+ Raises
114
+ ------
115
+ ValueError
116
+ If required keys are missing in extractor_config or invalid values are provided.
117
+ KeyError
118
+ If required keys are missing in row_data.
119
+ """
120
+ logger = logging.getLogger(__name__)
121
+ logger.debug("Extracting PDF with nemoretriever_parse backend.")
122
+
123
+ # Retrieve row_data from extractor_config.
124
+ row_data = extractor_config.get("row_data")
125
+ if row_data is None:
126
+ raise ValueError("Missing 'row_data' in extractor_config.")
127
+
128
+ # Get source_id from row_data.
129
+ try:
130
+ source_id = row_data["source_id"]
131
+ except KeyError:
132
+ raise KeyError("row_data must contain 'source_id'.")
133
+
134
+ # Get and validate text_depth.
135
+ text_depth_str = extractor_config.get("text_depth", "page")
136
+ try:
137
+ text_depth = TextTypeEnum[text_depth_str.upper()]
138
+ except KeyError:
139
+ valid_options = [e.name.lower() for e in TextTypeEnum]
140
+ raise ValueError(f"Invalid text_depth value: {text_depth_str}. Expected one of: {valid_options}")
141
+
142
+ # Get extraction method for tables.
143
+ extract_tables_method = extractor_config.get("extract_tables_method", "yolox")
144
+
145
+ # Flag for identifying nearby objects.
146
+ identify_nearby_objects = extractor_config.get("identify_nearby_objects", True)
147
+
148
+ # Get and validate paddle_output_format.
149
+ paddle_output_format_str = extractor_config.get("paddle_output_format", "pseudo_markdown")
150
+ try:
151
+ paddle_output_format = TableFormatEnum[paddle_output_format_str.upper()]
152
+ except KeyError:
153
+ valid_options = [e.name.lower() for e in TableFormatEnum]
154
+ raise ValueError(
155
+ f"Invalid paddle_output_format value: {paddle_output_format_str}. Expected one of: {valid_options}"
156
+ )
157
+
158
+ # Process nemoretriever_parse configuration.
159
+ nemoretriever_parse_config_raw = extractor_config.get("nemoretriever_parse_config", {})
160
+ if isinstance(nemoretriever_parse_config_raw, dict):
161
+ nemoretriever_parse_config = NemoRetrieverParseConfigSchema(**nemoretriever_parse_config_raw)
162
+ elif isinstance(nemoretriever_parse_config_raw, NemoRetrieverParseConfigSchema):
163
+ nemoretriever_parse_config = nemoretriever_parse_config_raw
164
+ else:
165
+ raise ValueError(
166
+ "`nemoretriever_parse_config` must be a dictionary or a NemoRetrieverParseConfigSchema instance."
167
+ )
168
+
169
+ # Get base metadata.
170
+ metadata_col = extractor_config.get("metadata_column", "metadata")
171
+ if hasattr(row_data, "index") and metadata_col in row_data.index:
172
+ base_unified_metadata = row_data[metadata_col]
173
+ else:
174
+ base_unified_metadata = row_data.get(metadata_col, {})
175
+
176
+ # get base source_metadata
177
+ base_source_metadata = base_unified_metadata.get("source_metadata", {})
178
+ # get source_location
179
+ source_location = base_source_metadata.get("source_location", "")
180
+ # get collection_id (assuming coming in from source_metadata...)
181
+ collection_id = base_source_metadata.get("collection_id", "")
182
+ # get partition_id (assuming coming in from source_metadata...)
183
+ partition_id = base_source_metadata.get("partition_id", -1)
184
+ # get access_level (assuming coming in from source_metadata...)
185
+ access_level = base_source_metadata.get("access_level", AccessLevelEnum.UNKNOWN)
186
+
187
+ extracted_data = []
188
+ doc = pdfium.PdfDocument(pdf_stream)
189
+ pdf_metadata = extract_pdf_metadata(doc, source_id)
190
+ page_count = pdf_metadata.page_count
191
+
192
+ source_metadata = {
193
+ "source_name": pdf_metadata.filename,
194
+ "source_id": source_id,
195
+ "source_location": source_location,
196
+ "source_type": pdf_metadata.source_type,
197
+ "collection_id": collection_id,
198
+ "date_created": pdf_metadata.date_created,
199
+ "last_modified": pdf_metadata.last_modified,
200
+ "summary": "",
201
+ "partition_id": partition_id,
202
+ "access_level": access_level,
203
+ }
204
+
205
+ accumulated_text = []
206
+ accumulated_tables = []
207
+ accumulated_images = []
208
+
209
+ pages_for_ocr = [] # We'll accumulate (page_idx, np_image) here
210
+ pages_for_tables = [] # We'll accumulate (page_idx, np_image) here
211
+ futures = [] # We'll keep track of all the Future objects for table/charts
212
+
213
+ nemoretriever_parse_client = None
214
+ if extract_text:
215
+ nemoretriever_parse_client = _create_clients(nemoretriever_parse_config)
216
+
217
+ max_workers = nemoretriever_parse_config.workers_per_progress_engine
218
+ with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
219
+
220
+ for page_idx in range(page_count):
221
+ page = doc.get_page(page_idx)
222
+
223
+ page_image, padding_offset = _convert_pdfium_page_to_numpy_for_parser(page)
224
+ pages_for_ocr.append((page_idx, page_image))
225
+ page_image_for_tables, padding_offset_for_tables = _convert_pdfium_page_to_numpy_for_yolox(page)
226
+ pages_for_tables.append((page_idx, page_image_for_tables, padding_offset_for_tables))
227
+
228
+ page.close()
229
+
230
+ # Whenever pages_as_images hits NEMORETRIEVER_PARSE_MAX_BATCH_SIZE, submit a job
231
+ if (extract_text) and (len(pages_for_ocr) >= NEMORETRIEVER_PARSE_MAX_BATCH_SIZE):
232
+ future_parser = executor.submit(
233
+ lambda *args, **kwargs: ("parser", _extract_text_and_bounding_boxes(*args, **kwargs)),
234
+ pages_for_ocr[:], # pass a copy
235
+ nemoretriever_parse_client,
236
+ execution_trace_log=execution_trace_log,
237
+ )
238
+ futures.append(future_parser)
239
+ pages_for_ocr.clear()
240
+
241
+ # Whenever pages_as_images hits YOLOX_MAX_BATCH_SIZE, submit a job
242
+ if (
243
+ (extract_tables_method == "yolox")
244
+ and (extract_tables or extract_charts or extract_infographics)
245
+ and (len(pages_for_tables) >= YOLOX_MAX_BATCH_SIZE)
246
+ ):
247
+ future_yolox = executor.submit(
248
+ lambda *args, **kwargs: ("yolox", _extract_page_elements(*args, **kwargs)),
249
+ pages_for_tables[:], # pass a copy
250
+ page_count,
251
+ source_metadata,
252
+ base_unified_metadata,
253
+ extract_tables,
254
+ extract_charts,
255
+ extract_infographics,
256
+ paddle_output_format,
257
+ nemoretriever_parse_config.yolox_endpoints,
258
+ nemoretriever_parse_config.yolox_infer_protocol,
259
+ nemoretriever_parse_config.auth_token,
260
+ execution_trace_log=execution_trace_log,
261
+ )
262
+ futures.append(future_yolox)
263
+ pages_for_tables.clear()
264
+
265
+ # After page loop, if we still have leftover pages_as_images, submit one last job
266
+ if extract_text and pages_for_ocr:
267
+ future_parser = executor.submit(
268
+ lambda *args, **kwargs: ("parser", _extract_text_and_bounding_boxes(*args, **kwargs)),
269
+ pages_for_ocr[:], # pass a copy
270
+ nemoretriever_parse_client,
271
+ execution_trace_log=execution_trace_log,
272
+ )
273
+ futures.append(future_parser)
274
+ pages_for_ocr.clear()
275
+
276
+ if (
277
+ (extract_tables_method == "yolox")
278
+ and (extract_tables or extract_charts or extract_infographics)
279
+ and pages_for_tables
280
+ ):
281
+ future_yolox = executor.submit(
282
+ lambda *args, **kwargs: ("yolox", _extract_page_elements(*args, **kwargs)),
283
+ pages_for_tables[:],
284
+ page_count,
285
+ source_metadata,
286
+ base_unified_metadata,
287
+ extract_tables,
288
+ extract_charts,
289
+ extract_infographics,
290
+ paddle_output_format,
291
+ nemoretriever_parse_config.yolox_endpoints,
292
+ nemoretriever_parse_config.yolox_infer_protocol,
293
+ nemoretriever_parse_config.auth_token,
294
+ execution_trace_log=execution_trace_log,
295
+ )
296
+ futures.append(future_yolox)
297
+ pages_for_tables.clear()
298
+
299
+ parser_results = []
300
+ # Now wait for all futures to complete
301
+ for fut in concurrent.futures.as_completed(futures):
302
+ model_name, extracted_items = fut.result() # blocks until finished
303
+ if (model_name == "yolox") and (extract_tables or extract_charts or extract_infographics):
304
+ extracted_data.extend(extracted_items)
305
+ elif model_name == "parser":
306
+ parser_results.extend(extracted_items)
307
+
308
+ for page_idx, parser_output in parser_results:
309
+ page = None
310
+ page_image = None
311
+ page_text = []
312
+
313
+ page_nearby_blocks = {
314
+ "text": {"content": [], "bbox": [], "type": []},
315
+ "images": {"content": [], "bbox": [], "type": []},
316
+ "structured": {"content": [], "bbox": [], "type": []},
317
+ }
318
+
319
+ for bbox_dict in parser_output:
320
+ cls = bbox_dict["type"]
321
+ bbox = bbox_dict["bbox"]
322
+ txt = bbox_dict["text"]
323
+
324
+ transformed_bbox = [
325
+ math.floor(bbox["xmin"] * NEMORETRIEVER_PARSE_MAX_WIDTH),
326
+ math.floor(bbox["ymin"] * NEMORETRIEVER_PARSE_MAX_HEIGHT),
327
+ math.ceil(bbox["xmax"] * NEMORETRIEVER_PARSE_MAX_WIDTH),
328
+ math.ceil(bbox["ymax"] * NEMORETRIEVER_PARSE_MAX_HEIGHT),
329
+ ]
330
+
331
+ if cls not in nemoretriever_parse_utils.ACCEPTED_CLASSES:
332
+ continue
333
+
334
+ if identify_nearby_objects:
335
+ _insert_page_nearby_blocks(page_nearby_blocks, cls, txt, transformed_bbox)
336
+
337
+ if extract_text:
338
+ page_text.append(txt)
339
+
340
+ if (extract_tables_method == "nemoretriever_parse") and (extract_tables) and (cls == "Table"):
341
+ table = LatexTable(
342
+ latex=txt,
343
+ bbox=transformed_bbox,
344
+ max_width=NEMORETRIEVER_PARSE_MAX_WIDTH,
345
+ max_height=NEMORETRIEVER_PARSE_MAX_HEIGHT,
346
+ )
347
+ accumulated_tables.append(table)
348
+
349
+ if extract_images and (cls == "Picture"):
350
+ if page is None:
351
+ page = doc.get_page(page_idx)
352
+ if page_image is None:
353
+ page_image, _ = _convert_pdfium_page_to_numpy_for_parser(page)
354
+
355
+ img_numpy = crop_image(page_image, transformed_bbox)
356
+
357
+ if img_numpy is not None:
358
+ base64_img = numpy_to_base64(img_numpy)
359
+ image = Base64Image(
360
+ image=base64_img,
361
+ bbox=transformed_bbox,
362
+ width=img_numpy.shape[1],
363
+ height=img_numpy.shape[0],
364
+ max_width=NEMORETRIEVER_PARSE_MAX_WIDTH,
365
+ max_height=NEMORETRIEVER_PARSE_MAX_HEIGHT,
366
+ )
367
+ accumulated_images.append(image)
368
+
369
+ # If NemoRetrieverParse fails to extract anything, fall back to using pdfium.
370
+ if not "".join(page_text).strip():
371
+ if page is None:
372
+ page = doc.get_page(page_idx)
373
+ page_text = [page.get_textpage().get_text_bounded()]
374
+
375
+ accumulated_text.extend(page_text)
376
+
377
+ # Construct tables
378
+ if extract_tables:
379
+ for table in accumulated_tables:
380
+ extracted_data.append(
381
+ _construct_table_metadata(
382
+ table,
383
+ page_idx,
384
+ page_count,
385
+ source_metadata,
386
+ base_unified_metadata,
387
+ )
388
+ )
389
+ accumulated_tables = []
390
+
391
+ # Construct images
392
+ if extract_images:
393
+ for image in accumulated_images:
394
+ extracted_data.append(
395
+ construct_image_metadata_from_pdf_image(
396
+ image,
397
+ page_idx,
398
+ page_count,
399
+ source_metadata,
400
+ base_unified_metadata,
401
+ )
402
+ )
403
+ accumulated_images = []
404
+
405
+ # Construct text - page
406
+ if (extract_text) and (text_depth == TextTypeEnum.PAGE):
407
+ extracted_data.append(
408
+ construct_text_metadata(
409
+ accumulated_text,
410
+ pdf_metadata.keywords,
411
+ page_idx,
412
+ -1,
413
+ -1,
414
+ -1,
415
+ page_count,
416
+ text_depth,
417
+ source_metadata,
418
+ base_unified_metadata,
419
+ delimiter="\n\n",
420
+ bbox_max_dimensions=(NEMORETRIEVER_PARSE_MAX_WIDTH, NEMORETRIEVER_PARSE_MAX_HEIGHT),
421
+ nearby_objects=page_nearby_blocks,
422
+ )
423
+ )
424
+ accumulated_text = []
425
+
426
+ # Construct text - document
427
+ if (extract_text) and (text_depth == TextTypeEnum.DOCUMENT):
428
+ text_extraction = construct_text_metadata(
429
+ accumulated_text,
430
+ pdf_metadata.keywords,
431
+ -1,
432
+ -1,
433
+ -1,
434
+ -1,
435
+ page_count,
436
+ text_depth,
437
+ source_metadata,
438
+ base_unified_metadata,
439
+ delimiter="\n\n",
440
+ )
441
+
442
+ if len(text_extraction) > 0:
443
+ extracted_data.append(text_extraction)
444
+
445
+ if nemoretriever_parse_client:
446
+ nemoretriever_parse_client.close()
447
+ doc.close()
448
+
449
+ return extracted_data
450
+
451
+
452
+ def _extract_text_and_bounding_boxes(
453
+ pages: list,
454
+ nemoretriever_parse_client,
455
+ execution_trace_log=None,
456
+ ) -> list:
457
+
458
+ # Collect all page indices and images in order.
459
+ image_page_indices = [page[0] for page in pages]
460
+ original_images = [page[1] for page in pages]
461
+
462
+ # Prepare the data payload with all images.
463
+ data = {"images": original_images}
464
+
465
+ # Perform inference using the NimClient.
466
+ inference_results = nemoretriever_parse_client.infer(
467
+ data=data,
468
+ model_name="nemoretriever_parse",
469
+ stage_name="pdf_content_extractor",
470
+ max_batch_size=NEMORETRIEVER_PARSE_MAX_BATCH_SIZE,
471
+ execution_trace_log=execution_trace_log,
472
+ )
473
+
474
+ return list(zip(image_page_indices, inference_results))
475
+
476
+
477
+ def _create_clients(nemoretriever_parse_config):
478
+ model_interface = nemoretriever_parse_utils.NemoRetrieverParseModelInterface(
479
+ model_name=nemoretriever_parse_config.model_name,
480
+ )
481
+ nemoretriever_parse_client = create_inference_client(
482
+ nemoretriever_parse_config.nemoretriever_parse_endpoints,
483
+ model_interface,
484
+ nemoretriever_parse_config.auth_token,
485
+ nemoretriever_parse_config.nemoretriever_parse_infer_protocol,
486
+ nemoretriever_parse_config.timeout,
487
+ )
488
+
489
+ return nemoretriever_parse_client
490
+
491
+
492
+ def _send_inference_request(
493
+ nemoretriever_parse_client,
494
+ image_array: np.ndarray,
495
+ ) -> Dict[str, Any]:
496
+
497
+ try:
498
+ # NIM only supports processing one page at a time (batch size = 1).
499
+ data = {"image": image_array}
500
+ response = nemoretriever_parse_client.infer(
501
+ data=data,
502
+ model_name="nemoretriever_parse",
503
+ )
504
+ except Exception as e:
505
+ logger.exception(f"Unhandled error during NemoRetrieverParse inference: {e}")
506
+ raise e
507
+
508
+ return response
509
+
510
+
511
+ def _convert_pdfium_page_to_numpy_for_parser(
512
+ page: pdfium.PdfPage,
513
+ render_dpi: int = NEMORETRIEVER_PARSE_RENDER_DPI,
514
+ scale_tuple: Tuple[int, int] = (NEMORETRIEVER_PARSE_MAX_WIDTH, NEMORETRIEVER_PARSE_MAX_HEIGHT),
515
+ padding_tuple: Tuple[int, int] = (NEMORETRIEVER_PARSE_MAX_WIDTH, NEMORETRIEVER_PARSE_MAX_HEIGHT),
516
+ ) -> np.ndarray:
517
+ page_images, padding_offsets = pdfium_pages_to_numpy(
518
+ [page], render_dpi=render_dpi, scale_tuple=scale_tuple, padding_tuple=padding_tuple
519
+ )
520
+
521
+ return page_images[0], padding_offsets[0]
522
+
523
+
524
+ def _convert_pdfium_page_to_numpy_for_yolox(
525
+ page: pdfium.PdfPage,
526
+ scale_tuple: Tuple[int, int] = (YOLOX_PAGE_IMAGE_PREPROC_WIDTH, YOLOX_PAGE_IMAGE_PREPROC_HEIGHT),
527
+ padding_tuple: Tuple[int, int] = (YOLOX_PAGE_IMAGE_PREPROC_WIDTH, YOLOX_PAGE_IMAGE_PREPROC_HEIGHT),
528
+ ) -> np.ndarray:
529
+ page_images, padding_offsets = pdfium_pages_to_numpy([page], scale_tuple=scale_tuple, padding_tuple=padding_tuple)
530
+
531
+ return page_images[0], padding_offsets[0]
532
+
533
+
534
+ def _insert_page_nearby_blocks(
535
+ page_nearby_blocks: Dict[str, Any],
536
+ cls: str,
537
+ txt: str,
538
+ bbox: str,
539
+ ):
540
+ if cls in nemoretriever_parse_utils.ACCEPTED_TEXT_CLASSES:
541
+ nearby_blocks_key = "text"
542
+ elif cls in nemoretriever_parse_utils.ACCEPTED_TABLE_CLASSES:
543
+ nearby_blocks_key = "structured"
544
+ elif cls in nemoretriever_parse_utils.ACCEPTED_IMAGE_CLASSES:
545
+ nearby_blocks_key = "images"
546
+
547
+ page_nearby_blocks[nearby_blocks_key]["content"].append(txt)
548
+ page_nearby_blocks[nearby_blocks_key]["bbox"].append(bbox)
549
+ page_nearby_blocks[nearby_blocks_key]["type"].append(cls)
550
+
551
+
552
+ @pdfium_exception_handler(descriptor="nemoretriever_parse")
553
+ def _construct_table_metadata(
554
+ table: LatexTable,
555
+ page_idx: int,
556
+ page_count: int,
557
+ source_metadata: Dict,
558
+ base_unified_metadata: Dict,
559
+ ):
560
+ content = table.latex
561
+ table_format = TableFormatEnum.LATEX
562
+ subtype = ContentTypeEnum.TABLE
563
+ description = ContentDescriptionEnum.PDF_TABLE
564
+
565
+ content_metadata = {
566
+ "type": ContentTypeEnum.STRUCTURED,
567
+ "description": description,
568
+ "page_number": page_idx,
569
+ "hierarchy": {
570
+ "page_count": page_count,
571
+ "page": page_idx,
572
+ "line": -1,
573
+ "span": -1,
574
+ },
575
+ "subtype": subtype,
576
+ }
577
+ table_metadata = {
578
+ "caption": "",
579
+ "table_content": content,
580
+ "table_format": table_format,
581
+ "table_location": table.bbox,
582
+ "table_location_max_dimensions": (table.max_width, table.max_height),
583
+ }
584
+ ext_unified_metadata = base_unified_metadata.copy()
585
+
586
+ ext_unified_metadata.update(
587
+ {
588
+ "content": "",
589
+ "source_metadata": source_metadata,
590
+ "content_metadata": content_metadata,
591
+ "table_metadata": table_metadata,
592
+ }
593
+ )
594
+
595
+ validated_unified_metadata = validate_metadata(ext_unified_metadata)
596
+
597
+ return [ContentTypeEnum.STRUCTURED, validated_unified_metadata.model_dump(), str(uuid.uuid4())]