kreuzberg 3.7.0__py3-none-any.whl → 3.8.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. kreuzberg/_entity_extraction.py +1 -2
  2. kreuzberg/_extractors/_base.py +39 -1
  3. kreuzberg/_extractors/_email.py +149 -0
  4. kreuzberg/_extractors/_html.py +15 -3
  5. kreuzberg/_extractors/_image.py +21 -36
  6. kreuzberg/_extractors/_pandoc.py +3 -14
  7. kreuzberg/_extractors/_pdf.py +81 -48
  8. kreuzberg/_extractors/_presentation.py +62 -10
  9. kreuzberg/_extractors/_spread_sheet.py +179 -4
  10. kreuzberg/_extractors/_structured.py +148 -0
  11. kreuzberg/_gmft.py +314 -7
  12. kreuzberg/_mime_types.py +27 -1
  13. kreuzberg/_ocr/__init__.py +10 -1
  14. kreuzberg/_ocr/_base.py +59 -0
  15. kreuzberg/_ocr/_easyocr.py +91 -0
  16. kreuzberg/_ocr/_paddleocr.py +89 -0
  17. kreuzberg/_ocr/_tesseract.py +564 -4
  18. kreuzberg/_registry.py +4 -0
  19. kreuzberg/_types.py +131 -0
  20. kreuzberg/_utils/_cache.py +52 -4
  21. kreuzberg/_utils/_errors.py +3 -7
  22. kreuzberg/_utils/_process_pool.py +180 -7
  23. kreuzberg/_utils/_quality.py +237 -0
  24. kreuzberg/_utils/_serialization.py +4 -2
  25. kreuzberg/_utils/_string.py +153 -10
  26. kreuzberg/_utils/_sync.py +5 -2
  27. kreuzberg/_utils/_table.py +261 -0
  28. kreuzberg/cli.py +1 -2
  29. kreuzberg/extraction.py +4 -22
  30. {kreuzberg-3.7.0.dist-info → kreuzberg-3.8.1.dist-info}/METADATA +58 -54
  31. kreuzberg-3.8.1.dist-info/RECORD +53 -0
  32. kreuzberg/_multiprocessing/__init__.py +0 -6
  33. kreuzberg/_multiprocessing/gmft_isolated.py +0 -330
  34. kreuzberg/_multiprocessing/process_manager.py +0 -189
  35. kreuzberg/_multiprocessing/sync_easyocr.py +0 -235
  36. kreuzberg/_multiprocessing/sync_paddleocr.py +0 -199
  37. kreuzberg/_multiprocessing/sync_tesseract.py +0 -261
  38. kreuzberg/_multiprocessing/tesseract_pool.py +0 -359
  39. kreuzberg-3.7.0.dist-info/RECORD +0 -56
  40. {kreuzberg-3.7.0.dist-info → kreuzberg-3.8.1.dist-info}/WHEEL +0 -0
  41. {kreuzberg-3.7.0.dist-info → kreuzberg-3.8.1.dist-info}/entry_points.txt +0 -0
  42. {kreuzberg-3.7.0.dist-info → kreuzberg-3.8.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,330 +0,0 @@
1
- """Isolated GMFT table extraction to handle segmentation faults."""
2
-
3
- from __future__ import annotations
4
-
5
- import multiprocessing as mp
6
- import pickle
7
- import queue
8
- import signal
9
- import traceback
10
- from typing import TYPE_CHECKING, Any
11
-
12
- if TYPE_CHECKING:
13
- from os import PathLike
14
-
15
- from kreuzberg._gmft import GMFTConfig
16
- from kreuzberg._types import TableData
17
-
18
-
19
- def _extract_tables_in_process(
20
- file_path: str | PathLike[str],
21
- config_dict: dict[str, Any],
22
- result_queue: queue.Queue[tuple[bool, Any]],
23
- ) -> None:
24
- """Extract tables in an isolated process to handle potential segfaults.
25
-
26
- Args:
27
- file_path: Path to the PDF file
28
- config_dict: Serialized GMFTConfig as a dict
29
- result_queue: Queue to put results or errors
30
- """
31
- signal.signal(signal.SIGINT, signal.SIG_IGN)
32
-
33
- try:
34
- from gmft.auto import AutoTableDetector, AutoTableFormatter # type: ignore[attr-defined]
35
- from gmft.detectors.tatr import TATRDetectorConfig # type: ignore[attr-defined]
36
- from gmft.formatters.tatr import TATRFormatConfig
37
- from gmft.pdf_bindings.pdfium import PyPDFium2Document
38
-
39
- from kreuzberg._gmft import GMFTConfig
40
-
41
- config = GMFTConfig(**config_dict)
42
-
43
- formatter = AutoTableFormatter( # type: ignore[no-untyped-call]
44
- config=TATRFormatConfig(
45
- verbosity=config.verbosity,
46
- formatter_base_threshold=config.formatter_base_threshold,
47
- cell_required_confidence=config.cell_required_confidence,
48
- remove_null_rows=config.remove_null_rows,
49
- enable_multi_header=config.enable_multi_header,
50
- semantic_spanning_cells=config.semantic_spanning_cells,
51
- semantic_hierarchical_left_fill=config.semantic_hierarchical_left_fill,
52
- large_table_if_n_rows_removed=config.large_table_if_n_rows_removed,
53
- large_table_threshold=config.large_table_threshold,
54
- large_table_row_overlap_threshold=config.large_table_row_overlap_threshold,
55
- large_table_maximum_rows=config.large_table_maximum_rows,
56
- force_large_table_assumption=config.force_large_table_assumption,
57
- )
58
- )
59
- detector = AutoTableDetector(config=TATRDetectorConfig(detector_base_threshold=config.detector_base_threshold)) # type: ignore[no-untyped-call]
60
-
61
- doc = PyPDFium2Document(str(file_path))
62
- cropped_tables = []
63
- dataframes = []
64
-
65
- try:
66
- for page in doc:
67
- cropped_tables.extend(detector.extract(page)) # type: ignore[attr-defined]
68
-
69
- for cropped_table in cropped_tables:
70
- formatted_table = formatter.extract(cropped_table) # type: ignore[attr-defined]
71
- dataframes.append(formatted_table.df())
72
-
73
- results = []
74
- for data_frame, cropped_table in zip(dataframes, cropped_tables, strict=False):
75
- import io
76
-
77
- img_bytes = io.BytesIO()
78
- cropped_image = cropped_table.image()
79
- cropped_image.save(img_bytes, format="PNG")
80
- img_bytes.seek(0)
81
-
82
- results.append(
83
- {
84
- "cropped_image_bytes": img_bytes.getvalue(),
85
- "page_number": cropped_table.page.page_number,
86
- "text": data_frame.to_markdown(),
87
- "df_pickle": pickle.dumps(data_frame),
88
- }
89
- )
90
-
91
- result_queue.put((True, results))
92
-
93
- finally:
94
- doc.close() # type: ignore[no-untyped-call]
95
-
96
- except Exception as e: # noqa: BLE001
97
- error_info = {"error": str(e), "type": type(e).__name__, "traceback": traceback.format_exc()}
98
- result_queue.put((False, error_info))
99
-
100
-
101
- def extract_tables_isolated(
102
- file_path: str | PathLike[str],
103
- config: GMFTConfig | None = None,
104
- timeout: float = 300.0,
105
- ) -> list[TableData]:
106
- """Extract tables using an isolated process to handle segfaults.
107
-
108
- Args:
109
- file_path: Path to the PDF file
110
- config: GMFT configuration
111
- timeout: Maximum time to wait for extraction
112
-
113
- Returns:
114
- List of extracted tables
115
-
116
- Raises:
117
- RuntimeError: If extraction fails or times out
118
- """
119
- from kreuzberg._gmft import GMFTConfig
120
- from kreuzberg._types import TableData
121
- from kreuzberg.exceptions import ParsingError
122
-
123
- config = config or GMFTConfig()
124
- config_dict = config.__dict__.copy()
125
-
126
- ctx = mp.get_context("spawn")
127
- result_queue = ctx.Queue()
128
-
129
- process = ctx.Process(
130
- target=_extract_tables_in_process,
131
- args=(str(file_path), config_dict, result_queue),
132
- )
133
-
134
- process.start()
135
-
136
- try:
137
- # Wait for result with timeout, checking for process death # ~keep
138
- import time
139
-
140
- start_time = time.time()
141
- while True:
142
- try:
143
- success, result = result_queue.get_nowait()
144
- break
145
- except queue.Empty:
146
- if time.time() - start_time > timeout:
147
- raise
148
-
149
- if not process.is_alive():
150
- # Process died without putting result # ~keep
151
- if process.exitcode == -signal.SIGSEGV:
152
- raise ParsingError(
153
- "GMFT process crashed with segmentation fault",
154
- context={
155
- "file_path": str(file_path),
156
- "exit_code": process.exitcode,
157
- },
158
- ) from None
159
- raise ParsingError(
160
- f"GMFT process died unexpectedly with exit code {process.exitcode}",
161
- context={
162
- "file_path": str(file_path),
163
- "exit_code": process.exitcode,
164
- },
165
- ) from None
166
-
167
- time.sleep(0.1)
168
-
169
- if success:
170
- tables = []
171
- for table_dict in result:
172
- import io
173
- import pickle
174
-
175
- from PIL import Image
176
-
177
- img = Image.open(io.BytesIO(table_dict["cropped_image_bytes"]))
178
- df = pickle.loads(table_dict["df_pickle"]) # noqa: S301
179
-
180
- tables.append(
181
- TableData(
182
- cropped_image=img,
183
- page_number=table_dict["page_number"],
184
- text=table_dict["text"],
185
- df=df,
186
- )
187
- )
188
-
189
- return tables
190
-
191
- error_info = result
192
- raise ParsingError(
193
- f"GMFT table extraction failed: {error_info['error']}",
194
- context={
195
- "file_path": str(file_path),
196
- "error_type": error_info["type"],
197
- "traceback": error_info["traceback"],
198
- },
199
- )
200
-
201
- except queue.Empty as e:
202
- raise ParsingError(
203
- "GMFT table extraction timed out",
204
- context={
205
- "file_path": str(file_path),
206
- "timeout": timeout,
207
- },
208
- ) from e
209
- finally:
210
- if process.is_alive():
211
- process.terminate()
212
- process.join(timeout=5)
213
- if process.is_alive():
214
- process.kill()
215
- process.join()
216
-
217
-
218
- async def extract_tables_isolated_async(
219
- file_path: str | PathLike[str],
220
- config: GMFTConfig | None = None,
221
- timeout: float = 300.0,
222
- ) -> list[TableData]:
223
- """Async version of extract_tables_isolated using asyncio.
224
-
225
- Args:
226
- file_path: Path to the PDF file
227
- config: GMFT configuration
228
- timeout: Maximum time to wait for extraction
229
-
230
- Returns:
231
- List of extracted tables
232
-
233
- Raises:
234
- RuntimeError: If extraction fails or times out
235
- """
236
- import anyio
237
-
238
- from kreuzberg._gmft import GMFTConfig
239
- from kreuzberg._types import TableData
240
- from kreuzberg.exceptions import ParsingError
241
-
242
- config = config or GMFTConfig()
243
- config_dict = config.__dict__.copy()
244
-
245
- ctx = mp.get_context("spawn")
246
- result_queue = ctx.Queue()
247
-
248
- process = ctx.Process(
249
- target=_extract_tables_in_process,
250
- args=(str(file_path), config_dict, result_queue),
251
- )
252
-
253
- process.start()
254
-
255
- try:
256
-
257
- async def wait_for_result() -> tuple[bool, Any]:
258
- while True:
259
- try:
260
- return result_queue.get_nowait() # type: ignore[no-any-return]
261
- except queue.Empty: # noqa: PERF203
262
- await anyio.sleep(0.1)
263
- if not process.is_alive():
264
- # Process died without putting result # ~keep
265
- if process.exitcode == -signal.SIGSEGV:
266
- raise ParsingError(
267
- "GMFT process crashed with segmentation fault",
268
- context={
269
- "file_path": str(file_path),
270
- "exit_code": process.exitcode,
271
- },
272
- ) from None
273
- raise ParsingError(
274
- f"GMFT process died unexpectedly with exit code {process.exitcode}",
275
- context={
276
- "file_path": str(file_path),
277
- "exit_code": process.exitcode,
278
- },
279
- ) from None
280
-
281
- with anyio.fail_after(timeout):
282
- success, result = await wait_for_result()
283
-
284
- if success:
285
- tables = []
286
- for table_dict in result:
287
- import io
288
- import pickle
289
-
290
- from PIL import Image
291
-
292
- img = Image.open(io.BytesIO(table_dict["cropped_image_bytes"]))
293
- df = pickle.loads(table_dict["df_pickle"]) # noqa: S301
294
-
295
- tables.append(
296
- TableData(
297
- cropped_image=img,
298
- page_number=table_dict["page_number"],
299
- text=table_dict["text"],
300
- df=df,
301
- )
302
- )
303
-
304
- return tables
305
-
306
- error_info = result
307
- raise ParsingError(
308
- f"GMFT table extraction failed: {error_info['error']}",
309
- context={
310
- "file_path": str(file_path),
311
- "error_type": error_info["type"],
312
- "traceback": error_info["traceback"],
313
- },
314
- )
315
-
316
- except TimeoutError as e:
317
- raise ParsingError(
318
- "GMFT table extraction timed out",
319
- context={
320
- "file_path": str(file_path),
321
- "timeout": timeout,
322
- },
323
- ) from e
324
- finally:
325
- if process.is_alive():
326
- process.terminate()
327
- await anyio.to_thread.run_sync(lambda: process.join(timeout=5))
328
- if process.is_alive():
329
- process.kill()
330
- await anyio.to_thread.run_sync(process.join)
@@ -1,189 +0,0 @@
1
- """Process pool manager for resource-aware multiprocessing."""
2
-
3
- from __future__ import annotations
4
-
5
- import multiprocessing as mp
6
- from concurrent.futures import ProcessPoolExecutor
7
- from typing import TYPE_CHECKING, Any, TypeVar
8
-
9
- import anyio
10
- import psutil
11
- from typing_extensions import Self
12
-
13
- if TYPE_CHECKING:
14
- import types
15
- from collections.abc import Callable
16
-
17
- T = TypeVar("T")
18
-
19
-
20
- class ProcessPoolManager:
21
- """Resource-aware process pool manager for CPU-intensive tasks."""
22
-
23
- def __init__(
24
- self,
25
- max_processes: int | None = None,
26
- memory_limit_gb: float | None = None,
27
- ) -> None:
28
- """Initialize the process pool manager.
29
-
30
- Args:
31
- max_processes: Maximum number of processes. Defaults to CPU count.
32
- memory_limit_gb: Memory limit in GB. Defaults to 75% of available memory.
33
- """
34
- self.max_processes = max_processes or mp.cpu_count()
35
-
36
- if memory_limit_gb is None:
37
- available_memory = psutil.virtual_memory().available
38
- self.memory_limit_bytes = int(available_memory * 0.75) # Use 75% of available # ~keep
39
- else:
40
- self.memory_limit_bytes = int(memory_limit_gb * 1024**3)
41
-
42
- self._executor: ProcessPoolExecutor | None = None
43
- self._active_tasks = 0
44
-
45
- def get_optimal_workers(self, task_memory_mb: float = 100) -> int:
46
- """Calculate optimal number of workers based on memory constraints.
47
-
48
- Args:
49
- task_memory_mb: Estimated memory usage per task in MB.
50
-
51
- Returns:
52
- Optimal number of workers.
53
- """
54
- task_memory_bytes = task_memory_mb * 1024**2
55
- memory_based_limit = max(1, int(self.memory_limit_bytes / task_memory_bytes))
56
-
57
- return min(self.max_processes, memory_based_limit)
58
-
59
- def _ensure_executor(self, max_workers: int | None = None) -> ProcessPoolExecutor:
60
- """Ensure process pool executor is initialized."""
61
- if self._executor is None or getattr(self._executor, "_max_workers", None) != max_workers:
62
- if self._executor is not None:
63
- self._executor.shutdown(wait=False)
64
-
65
- workers = max_workers or self.max_processes
66
- self._executor = ProcessPoolExecutor(max_workers=workers)
67
-
68
- return self._executor
69
-
70
- async def submit_task(
71
- self,
72
- func: Callable[..., T],
73
- *args: Any,
74
- task_memory_mb: float = 100,
75
- ) -> T:
76
- """Submit a task to the process pool.
77
-
78
- Args:
79
- func: Function to execute.
80
- *args: Positional arguments for the function.
81
- task_memory_mb: Estimated memory usage in MB.
82
-
83
- Returns:
84
- Result of the function execution.
85
- """
86
- workers = self.get_optimal_workers(task_memory_mb)
87
- self._ensure_executor(workers)
88
-
89
- self._active_tasks += 1
90
-
91
- try:
92
- return await anyio.to_thread.run_sync(func, *args)
93
- finally:
94
- self._active_tasks -= 1
95
-
96
- async def submit_batch(
97
- self,
98
- func: Callable[..., T],
99
- arg_batches: list[tuple[Any, ...]],
100
- task_memory_mb: float = 100,
101
- max_concurrent: int | None = None,
102
- ) -> list[T]:
103
- """Submit a batch of tasks to the process pool.
104
-
105
- Args:
106
- func: Function to execute.
107
- arg_batches: List of argument tuples for each task.
108
- task_memory_mb: Estimated memory usage per task in MB.
109
- max_concurrent: Maximum concurrent tasks. Defaults to optimal workers.
110
-
111
- Returns:
112
- List of results in the same order as input.
113
- """
114
- if not arg_batches:
115
- return []
116
-
117
- workers = self.get_optimal_workers(task_memory_mb)
118
- max_concurrent = max_concurrent or workers
119
-
120
- self._ensure_executor(workers)
121
-
122
- semaphore = anyio.CapacityLimiter(max_concurrent)
123
-
124
- async def submit_single(args: tuple[Any, ...]) -> T:
125
- async with semaphore:
126
- self._active_tasks += 1
127
- try:
128
- return await anyio.to_thread.run_sync(func, *args)
129
- finally:
130
- self._active_tasks -= 1
131
-
132
- async with anyio.create_task_group() as tg:
133
- results: list[T] = [None] * len(arg_batches) # type: ignore[list-item]
134
-
135
- async def run_task(idx: int, args: tuple[Any, ...]) -> None:
136
- results[idx] = await submit_single(args)
137
-
138
- for idx, args in enumerate(arg_batches):
139
- tg.start_soon(run_task, idx, args)
140
-
141
- return results
142
-
143
- def get_system_info(self) -> dict[str, Any]:
144
- """Get current system resource information."""
145
- memory = psutil.virtual_memory()
146
- cpu_percent = psutil.cpu_percent(interval=1)
147
-
148
- return {
149
- "cpu_count": mp.cpu_count(),
150
- "cpu_percent": cpu_percent,
151
- "memory_total": memory.total,
152
- "memory_available": memory.available,
153
- "memory_percent": memory.percent,
154
- "active_tasks": self._active_tasks,
155
- "max_processes": self.max_processes,
156
- "memory_limit": self.memory_limit_bytes,
157
- }
158
-
159
- def shutdown(self, wait: bool = True) -> None:
160
- """Shutdown the process pool."""
161
- if self._executor is not None:
162
- self._executor.shutdown(wait=wait)
163
- self._executor = None
164
-
165
- def __enter__(self) -> Self:
166
- """Context manager entry."""
167
- return self
168
-
169
- def __exit__(
170
- self,
171
- exc_type: type[BaseException] | None,
172
- exc_val: BaseException | None,
173
- exc_tb: types.TracebackType | None,
174
- ) -> None:
175
- """Context manager exit."""
176
- self.shutdown()
177
-
178
- async def __aenter__(self) -> Self:
179
- """Async context manager entry."""
180
- return self
181
-
182
- async def __aexit__(
183
- self,
184
- exc_type: type[BaseException] | None,
185
- exc_val: BaseException | None,
186
- exc_tb: types.TracebackType | None,
187
- ) -> None:
188
- """Async context manager exit."""
189
- self.shutdown()