natural-pdf 0.1.22__py3-none-any.whl → 0.1.24__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- natural_pdf/analyzers/shape_detection_mixin.py +43 -3
- natural_pdf/classification/manager.py +1 -1
- natural_pdf/classification/mixin.py +35 -14
- natural_pdf/classification/results.py +16 -1
- natural_pdf/cli.py +1 -0
- natural_pdf/core/highlighting_service.py +23 -0
- natural_pdf/core/page.py +32 -2
- natural_pdf/core/pdf.py +24 -4
- natural_pdf/describe/base.py +11 -1
- natural_pdf/describe/summary.py +26 -0
- natural_pdf/elements/base.py +81 -3
- natural_pdf/elements/collections.py +162 -101
- natural_pdf/elements/region.py +187 -160
- natural_pdf/elements/text.py +15 -7
- natural_pdf/exporters/paddleocr.py +1 -1
- natural_pdf/extraction/manager.py +2 -2
- natural_pdf/extraction/mixin.py +295 -11
- natural_pdf/extraction/result.py +28 -1
- natural_pdf/flows/region.py +117 -2
- natural_pdf/ocr/engine_surya.py +25 -5
- natural_pdf/qa/__init__.py +2 -1
- natural_pdf/qa/document_qa.py +166 -113
- natural_pdf/qa/qa_result.py +55 -0
- natural_pdf/selectors/parser.py +22 -0
- natural_pdf/utils/text_extraction.py +34 -14
- {natural_pdf-0.1.22.dist-info → natural_pdf-0.1.24.dist-info}/METADATA +22 -13
- {natural_pdf-0.1.22.dist-info → natural_pdf-0.1.24.dist-info}/RECORD +31 -30
- {natural_pdf-0.1.22.dist-info → natural_pdf-0.1.24.dist-info}/WHEEL +0 -0
- {natural_pdf-0.1.22.dist-info → natural_pdf-0.1.24.dist-info}/entry_points.txt +0 -0
- {natural_pdf-0.1.22.dist-info → natural_pdf-0.1.24.dist-info}/licenses/LICENSE +0 -0
- {natural_pdf-0.1.22.dist-info → natural_pdf-0.1.24.dist-info}/top_level.txt +0 -0
@@ -63,7 +63,7 @@ class ShapeDetectionMixin:
|
|
63
63
|
logger.debug(f"Shape detection on Region: {self}")
|
64
64
|
page_obj = self._page
|
65
65
|
pil_image = self.to_image(
|
66
|
-
resolution=resolution,
|
66
|
+
resolution=resolution, crop=True, include_highlights=False
|
67
67
|
)
|
68
68
|
if pil_image: # Ensure pil_image is not None before accessing attributes
|
69
69
|
origin_offset_pdf = (self.x0, self.top)
|
@@ -681,7 +681,7 @@ class ShapeDetectionMixin:
|
|
681
681
|
if hasattr(self, "to_image") and hasattr(self, "width") and hasattr(self, "height"):
|
682
682
|
if hasattr(self, "x0") and hasattr(self, "top") and hasattr(self, "_page"):
|
683
683
|
pil_image_for_dims = self.to_image(
|
684
|
-
resolution=resolution,
|
684
|
+
resolution=resolution, crop=True, include_highlights=False
|
685
685
|
)
|
686
686
|
else:
|
687
687
|
pil_image_for_dims = self.to_image(resolution=resolution, include_highlights=False)
|
@@ -1204,7 +1204,7 @@ class ShapeDetectionMixin:
|
|
1204
1204
|
if hasattr(self, "to_image") and hasattr(self, "width") and hasattr(self, "height"):
|
1205
1205
|
if hasattr(self, "x0") and hasattr(self, "top") and hasattr(self, "_page"):
|
1206
1206
|
pil_image_for_dims = self.to_image(
|
1207
|
-
resolution=resolution,
|
1207
|
+
resolution=resolution, crop=True, include_highlights=False
|
1208
1208
|
)
|
1209
1209
|
else:
|
1210
1210
|
pil_image_for_dims = self.to_image(resolution=resolution, include_highlights=False)
|
@@ -1490,6 +1490,45 @@ class ShapeDetectionMixin:
|
|
1490
1490
|
|
1491
1491
|
element_manager = page_object_for_elements._element_mgr
|
1492
1492
|
|
1493
|
+
# ------------------------------------------------------------------
|
1494
|
+
# CLEAN-UP existing table-related regions from earlier runs to avoid duplicates
|
1495
|
+
# ------------------------------------------------------------------
|
1496
|
+
try:
|
1497
|
+
_purge_types = {"table", "table_row", "table_column", "table_cell"}
|
1498
|
+
|
1499
|
+
if (
|
1500
|
+
hasattr(element_manager, "_elements")
|
1501
|
+
and "regions" in element_manager._elements
|
1502
|
+
):
|
1503
|
+
_orig_len = len(element_manager._elements["regions"])
|
1504
|
+
element_manager._elements["regions"] = [
|
1505
|
+
r
|
1506
|
+
for r in element_manager._elements["regions"]
|
1507
|
+
if not (
|
1508
|
+
getattr(r, "source", None) == source_label
|
1509
|
+
and getattr(r, "region_type", None) in _purge_types
|
1510
|
+
)
|
1511
|
+
]
|
1512
|
+
_removed = _orig_len - len(element_manager._elements["regions"])
|
1513
|
+
if _removed:
|
1514
|
+
logger.info(
|
1515
|
+
f"Removed {_removed} previous table-related regions (source='{source_label}') before regeneration."
|
1516
|
+
)
|
1517
|
+
|
1518
|
+
if hasattr(page_object_for_elements, "_regions") and "detected" in page_object_for_elements._regions:
|
1519
|
+
page_object_for_elements._regions["detected"] = [
|
1520
|
+
r
|
1521
|
+
for r in page_object_for_elements._regions["detected"]
|
1522
|
+
if not (
|
1523
|
+
getattr(r, "source", None) == source_label
|
1524
|
+
and getattr(r, "region_type", None) in _purge_types
|
1525
|
+
)
|
1526
|
+
]
|
1527
|
+
except Exception as _cleanup_err:
|
1528
|
+
logger.warning(
|
1529
|
+
f"Table-region cleanup failed: {_cleanup_err}", exc_info=True
|
1530
|
+
)
|
1531
|
+
|
1493
1532
|
# Get lines with the specified source
|
1494
1533
|
all_lines = element_manager.lines # Access lines from the correct element manager
|
1495
1534
|
filtered_lines = [
|
@@ -1724,6 +1763,7 @@ class ShapeDetectionMixin:
|
|
1724
1763
|
logger.info(
|
1725
1764
|
f"Created {tables_created} table, {rows_created} rows, {cols_created} columns, and {cells_created} table cells from detected lines (source: '{source_label}') for {self}."
|
1726
1765
|
)
|
1766
|
+
|
1727
1767
|
return self
|
1728
1768
|
|
1729
1769
|
|
@@ -90,7 +90,7 @@ class ClassificationManager:
|
|
90
90
|
if not _check_classification_dependencies():
|
91
91
|
raise ImportError(
|
92
92
|
"Classification dependencies missing. "
|
93
|
-
'Install with: pip install "natural-pdf[
|
93
|
+
'Install with: pip install "natural-pdf[ai]"'
|
94
94
|
)
|
95
95
|
|
96
96
|
self.pipelines: Dict[Tuple[str, str], "Pipeline"] = (
|
@@ -2,6 +2,7 @@ import logging
|
|
2
2
|
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
|
3
3
|
|
4
4
|
from PIL import Image
|
5
|
+
import warnings
|
5
6
|
|
6
7
|
from .results import ClassificationResult
|
7
8
|
|
@@ -74,32 +75,52 @@ class ClassificationMixin:
|
|
74
75
|
try:
|
75
76
|
manager = self._get_classification_manager()
|
76
77
|
|
77
|
-
#
|
78
|
+
# ------------------------------------------------------------
|
79
|
+
# Resolve engine ('text' vs 'vision')
|
80
|
+
# ------------------------------------------------------------
|
81
|
+
engine: Optional[str] = using # rename for clarity
|
82
|
+
|
83
|
+
content = None # will hold final content
|
84
|
+
|
85
|
+
if engine is None:
|
86
|
+
# Try text first
|
87
|
+
try:
|
88
|
+
tentative_text = self._get_classification_content("text", **kwargs)
|
89
|
+
if tentative_text and not (isinstance(tentative_text, str) and tentative_text.isspace()):
|
90
|
+
engine = "text"
|
91
|
+
content = tentative_text
|
92
|
+
else:
|
93
|
+
raise ValueError("Empty text")
|
94
|
+
except Exception:
|
95
|
+
warnings.warn(
|
96
|
+
"No text found for classification; falling back to vision model. "
|
97
|
+
"Pass using='vision' explicitly to silence this message.",
|
98
|
+
UserWarning,
|
99
|
+
)
|
100
|
+
engine = "vision"
|
101
|
+
|
102
|
+
# If engine determined but content not yet retrieved, get it now
|
103
|
+
if content is None:
|
104
|
+
content = self._get_classification_content(model_type=engine, **kwargs)
|
105
|
+
|
106
|
+
# ------------------------------------------------------------
|
107
|
+
# Determine model ID default based on engine
|
108
|
+
# ------------------------------------------------------------
|
78
109
|
effective_model_id = model
|
79
|
-
inferred_using = manager.infer_using(
|
80
|
-
model if model else manager.DEFAULT_TEXT_MODEL, using
|
81
|
-
)
|
82
|
-
|
83
|
-
# If model was not provided, use the manager's default for the inferred engine type
|
84
110
|
if effective_model_id is None:
|
85
111
|
effective_model_id = (
|
86
|
-
manager.DEFAULT_TEXT_MODEL
|
87
|
-
if inferred_using == "text"
|
88
|
-
else manager.DEFAULT_VISION_MODEL
|
112
|
+
manager.DEFAULT_TEXT_MODEL if engine == "text" else manager.DEFAULT_VISION_MODEL
|
89
113
|
)
|
90
114
|
logger.debug(
|
91
|
-
f"No model provided, using default for mode '{
|
115
|
+
f"No model provided, using default for mode '{engine}': '{effective_model_id}'"
|
92
116
|
)
|
93
117
|
|
94
|
-
# Get content based on the *final* determined engine type
|
95
|
-
content = self._get_classification_content(model_type=inferred_using, **kwargs)
|
96
|
-
|
97
118
|
# Manager now returns a ClassificationResult object
|
98
119
|
result_obj: ClassificationResult = manager.classify_item(
|
99
120
|
item_content=content,
|
100
121
|
labels=labels,
|
101
122
|
model_id=effective_model_id,
|
102
|
-
using=
|
123
|
+
using=engine,
|
103
124
|
min_confidence=min_confidence,
|
104
125
|
multi_label=multi_label,
|
105
126
|
**kwargs,
|
@@ -3,6 +3,7 @@ import logging
|
|
3
3
|
from dataclasses import dataclass
|
4
4
|
from datetime import datetime
|
5
5
|
from typing import Any, Dict, List, Optional
|
6
|
+
from collections.abc import Mapping
|
6
7
|
|
7
8
|
logger = logging.getLogger(__name__)
|
8
9
|
|
@@ -20,7 +21,7 @@ class CategoryScore:
|
|
20
21
|
|
21
22
|
|
22
23
|
@dataclass
|
23
|
-
class ClassificationResult:
|
24
|
+
class ClassificationResult(Mapping):
|
24
25
|
"""Results from a classification operation."""
|
25
26
|
|
26
27
|
category: Optional[str] # Can be None if scores are empty
|
@@ -86,3 +87,17 @@ class ClassificationResult:
|
|
86
87
|
|
87
88
|
def __repr__(self) -> str:
|
88
89
|
return f"<ClassificationResult category='{self.category}' score={self.score:.3f} model='{self.model_id}'>"
|
90
|
+
|
91
|
+
def __iter__(self):
|
92
|
+
"""Iterate over mapping keys (linked to ``to_dict`` so it stays in sync)."""
|
93
|
+
return iter(self.to_dict())
|
94
|
+
|
95
|
+
def __getitem__(self, key):
|
96
|
+
"""Dictionary-style access to attributes."""
|
97
|
+
try:
|
98
|
+
return self.to_dict()[key]
|
99
|
+
except KeyError as exc:
|
100
|
+
raise KeyError(key) from exc
|
101
|
+
|
102
|
+
def __len__(self):
|
103
|
+
return len(self.to_dict())
|
natural_pdf/cli.py
CHANGED
@@ -727,6 +727,7 @@ class HighlightingService:
|
|
727
727
|
legend_position: str = "right",
|
728
728
|
render_ocr: bool = False,
|
729
729
|
resolution: Optional[float] = None,
|
730
|
+
crop_bbox: Optional[Tuple[float, float, float, float]] = None,
|
730
731
|
**kwargs,
|
731
732
|
) -> Optional[Image.Image]:
|
732
733
|
"""
|
@@ -741,6 +742,9 @@ class HighlightingService:
|
|
741
742
|
legend_position: Position of the legend.
|
742
743
|
render_ocr: Whether to render OCR text.
|
743
744
|
resolution: Resolution for base page image rendering if width/height not used.
|
745
|
+
crop_bbox: Optional bounding box (x0, top, x1, bottom) in PDF coordinate
|
746
|
+
space to crop the output image to, before legends or other overlays are
|
747
|
+
applied. If None, no cropping is performed.
|
744
748
|
**kwargs: Additional args for pdfplumber's to_image (e.g., width, height).
|
745
749
|
|
746
750
|
Returns:
|
@@ -855,6 +859,25 @@ class HighlightingService:
|
|
855
859
|
)
|
856
860
|
rendered_image = renderer.render()
|
857
861
|
|
862
|
+
# --- Optional Cropping BEFORE legend addition ---
|
863
|
+
if crop_bbox is not None:
|
864
|
+
cb_x0, cb_top, cb_x1, cb_bottom = crop_bbox
|
865
|
+
# Convert to pixel coordinates using actual scales
|
866
|
+
left_px = int(cb_x0 * actual_scale_x) - 1
|
867
|
+
top_px = int(cb_top * actual_scale_y) - 1
|
868
|
+
right_px = int(cb_x1 * actual_scale_x) + 1
|
869
|
+
bottom_px = int(cb_bottom * actual_scale_y) + 1
|
870
|
+
|
871
|
+
# Safeguard coordinates within bounds
|
872
|
+
left_px = max(0, min(left_px, rendered_image.width - 1))
|
873
|
+
top_px = max(0, min(top_px, rendered_image.height - 1))
|
874
|
+
right_px = max(left_px + 1, min(right_px, rendered_image.width))
|
875
|
+
bottom_px = max(top_px + 1, min(bottom_px, rendered_image.height))
|
876
|
+
|
877
|
+
rendered_image = rendered_image.crop(
|
878
|
+
(left_px, top_px, right_px, bottom_px)
|
879
|
+
)
|
880
|
+
|
858
881
|
legend = None
|
859
882
|
if labels:
|
860
883
|
preview_labels = {h.label: h.color for h in preview_highlights if h.label}
|
natural_pdf/core/page.py
CHANGED
@@ -2235,12 +2235,12 @@ class Page(ClassificationMixin, ExtractionMixin, ShapeDetectionMixin, DescribeMi
|
|
2235
2235
|
|
2236
2236
|
def ask(
|
2237
2237
|
self,
|
2238
|
-
question: str,
|
2238
|
+
question: Union[str, List[str], Tuple[str, ...]],
|
2239
2239
|
min_confidence: float = 0.1,
|
2240
2240
|
model: str = None,
|
2241
2241
|
debug: bool = False,
|
2242
2242
|
**kwargs,
|
2243
|
-
) -> Dict[str, Any]:
|
2243
|
+
) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
|
2244
2244
|
"""
|
2245
2245
|
Ask a question about the page content using document QA.
|
2246
2246
|
"""
|
@@ -2808,3 +2808,33 @@ class Page(ClassificationMixin, ExtractionMixin, ShapeDetectionMixin, DescribeMi
|
|
2808
2808
|
return None
|
2809
2809
|
|
2810
2810
|
# --- End Skew Detection and Correction --- #
|
2811
|
+
|
2812
|
+
# ------------------------------------------------------------------
|
2813
|
+
# Unified analysis storage (maps to metadata["analysis"])
|
2814
|
+
# ------------------------------------------------------------------
|
2815
|
+
|
2816
|
+
@property
|
2817
|
+
def analyses(self) -> Dict[str, Any]:
|
2818
|
+
if not hasattr(self, "metadata") or self.metadata is None:
|
2819
|
+
self.metadata = {}
|
2820
|
+
return self.metadata.setdefault("analysis", {})
|
2821
|
+
|
2822
|
+
@analyses.setter
|
2823
|
+
def analyses(self, value: Dict[str, Any]):
|
2824
|
+
if not hasattr(self, "metadata") or self.metadata is None:
|
2825
|
+
self.metadata = {}
|
2826
|
+
self.metadata["analysis"] = value
|
2827
|
+
|
2828
|
+
def inspect(self, limit: int = 30) -> "InspectionSummary":
|
2829
|
+
"""
|
2830
|
+
Inspect all elements on this page with detailed tabular view.
|
2831
|
+
Equivalent to page.find_all('*').inspect().
|
2832
|
+
|
2833
|
+
Args:
|
2834
|
+
limit: Maximum elements per type to show (default: 30)
|
2835
|
+
|
2836
|
+
Returns:
|
2837
|
+
InspectionSummary with element tables showing coordinates,
|
2838
|
+
properties, and other details for each element
|
2839
|
+
"""
|
2840
|
+
return self.find_all('*').inspect(limit=limit)
|
natural_pdf/core/pdf.py
CHANGED
@@ -263,7 +263,7 @@ class PDF(ExtractionMixin, ExportMixin, ClassificationMixin):
|
|
263
263
|
|
264
264
|
self._initialize_managers()
|
265
265
|
self._initialize_highlighter()
|
266
|
-
self.analyses
|
266
|
+
# Analysis results accessed via self.analyses property (see below)
|
267
267
|
|
268
268
|
# --- Automatic cleanup when object is garbage-collected ---
|
269
269
|
self._finalizer = weakref.finalize(
|
@@ -1490,7 +1490,7 @@ class PDF(ExtractionMixin, ExportMixin, ClassificationMixin):
|
|
1490
1490
|
if not is_classification_available():
|
1491
1491
|
raise ImportError(
|
1492
1492
|
"Classification dependencies missing. "
|
1493
|
-
'Install with: pip install "natural-pdf[
|
1493
|
+
'Install with: pip install "natural-pdf[ai]"'
|
1494
1494
|
)
|
1495
1495
|
raise ClassificationError("ClassificationManager not available.")
|
1496
1496
|
|
@@ -1802,6 +1802,26 @@ class PDF(ExtractionMixin, ExportMixin, ClassificationMixin):
|
|
1802
1802
|
|
1803
1803
|
# --- End Classification Mixin Implementation ---
|
1804
1804
|
|
1805
|
+
# ------------------------------------------------------------------
|
1806
|
+
# Unified analysis storage (maps to metadata["analysis"])
|
1807
|
+
# ------------------------------------------------------------------
|
1808
|
+
|
1809
|
+
@property
|
1810
|
+
def analyses(self) -> Dict[str, Any]:
|
1811
|
+
if not hasattr(self, "metadata") or self.metadata is None:
|
1812
|
+
# For PDF, metadata property returns self._pdf.metadata which may be None
|
1813
|
+
self._pdf.metadata = self._pdf.metadata or {}
|
1814
|
+
if self.metadata is None:
|
1815
|
+
# Fallback safeguard
|
1816
|
+
self._pdf.metadata = {}
|
1817
|
+
return self.metadata.setdefault("analysis", {}) # type: ignore[attr-defined]
|
1818
|
+
|
1819
|
+
@analyses.setter
|
1820
|
+
def analyses(self, value: Dict[str, Any]):
|
1821
|
+
if not hasattr(self, "metadata") or self.metadata is None:
|
1822
|
+
self._pdf.metadata = self._pdf.metadata or {}
|
1823
|
+
self.metadata["analysis"] = value # type: ignore[attr-defined]
|
1824
|
+
|
1805
1825
|
# Static helper for weakref.finalize to avoid capturing 'self'
|
1806
1826
|
@staticmethod
|
1807
1827
|
def _finalize_cleanup(plumber_pdf, temp_file_obj, is_stream):
|
@@ -1816,5 +1836,5 @@ class PDF(ExtractionMixin, ExportMixin, ClassificationMixin):
|
|
1816
1836
|
path = temp_file_obj.name if hasattr(temp_file_obj, "name") else None
|
1817
1837
|
if path and os.path.exists(path):
|
1818
1838
|
os.unlink(path)
|
1819
|
-
except Exception:
|
1820
|
-
|
1839
|
+
except Exception as e:
|
1840
|
+
logger.warning(f"Failed to clean up temporary file '{path}': {e}")
|
natural_pdf/describe/base.py
CHANGED
@@ -269,7 +269,7 @@ def _get_columns_for_type(element_type: str, show_page_column: bool) -> List[str
|
|
269
269
|
base_columns = ['x0', 'top', 'x1', 'bottom']
|
270
270
|
|
271
271
|
if element_type == 'word':
|
272
|
-
columns = ['text'] + base_columns + ['font_family', 'size', 'bold', 'italic', 'source', 'confidence']
|
272
|
+
columns = ['text'] + base_columns + ['font_family', 'font_variant', 'size', 'bold', 'italic', 'source', 'confidence']
|
273
273
|
# Add color for text elements
|
274
274
|
columns.append('color')
|
275
275
|
elif element_type == 'rect':
|
@@ -315,6 +315,16 @@ def _extract_element_value(element: "Element", column: str) -> Any:
|
|
315
315
|
# Fallback to fontname
|
316
316
|
return getattr(element, 'fontname', '')
|
317
317
|
|
318
|
+
elif column == 'font_variant':
|
319
|
+
variant = getattr(element, 'font_variant', None)
|
320
|
+
if variant:
|
321
|
+
return variant
|
322
|
+
# Fallback – try to derive from fontname if property missing
|
323
|
+
fontname = getattr(element, 'fontname', '')
|
324
|
+
if "+" in fontname:
|
325
|
+
return fontname.split("+", 1)[0]
|
326
|
+
return ''
|
327
|
+
|
318
328
|
elif column in ['bold', 'italic']:
|
319
329
|
value = getattr(element, column, False)
|
320
330
|
return value if isinstance(value, bool) else False
|
natural_pdf/describe/summary.py
CHANGED
@@ -128,6 +128,32 @@ class ElementSummary:
|
|
128
128
|
""
|
129
129
|
]
|
130
130
|
|
131
|
+
# Added for better VS Code and other frontends support
|
132
|
+
def _repr_html_(self) -> str: # type: ignore
|
133
|
+
"""Return HTML representation so rich rendering works in more frontends.
|
134
|
+
|
135
|
+
Many notebook frontends (including VS Code) give priority to the
|
136
|
+
``_repr_html_`` method over Markdown. When available, we convert the
|
137
|
+
generated Markdown to HTML using the *markdown* library. If the
|
138
|
+
library is not installed we simply wrap the Markdown in a ``<pre>``
|
139
|
+
block so that at least the plain-text representation is visible.
|
140
|
+
"""
|
141
|
+
md_source = self._to_markdown()
|
142
|
+
try:
|
143
|
+
import markdown as _markdown # pylint: disable=import-error
|
144
|
+
|
145
|
+
# Convert markdown to HTML. We explicitly enable tables so the
|
146
|
+
# element and inspection summaries render nicely.
|
147
|
+
return _markdown.markdown(md_source, extensions=["tables"])
|
148
|
+
except Exception: # noqa: BLE001, broad-except
|
149
|
+
# Fallback: present the Markdown as-is inside a <pre> block.
|
150
|
+
escaped = (
|
151
|
+
md_source.replace("&", "&")
|
152
|
+
.replace("<", "<")
|
153
|
+
.replace(">", ">")
|
154
|
+
)
|
155
|
+
return f"<pre>{escaped}</pre>"
|
156
|
+
|
131
157
|
|
132
158
|
class InspectionSummary(ElementSummary):
|
133
159
|
"""
|
natural_pdf/elements/base.py
CHANGED
@@ -9,11 +9,13 @@ from PIL import Image
|
|
9
9
|
# Import selector parsing functions
|
10
10
|
from natural_pdf.selectors.parser import parse_selector, selector_to_filter_func
|
11
11
|
from natural_pdf.describe.mixin import DescribeMixin
|
12
|
+
from natural_pdf.classification.mixin import ClassificationMixin
|
12
13
|
|
13
14
|
if TYPE_CHECKING:
|
14
15
|
from natural_pdf.core.page import Page
|
15
16
|
from natural_pdf.elements.collections import ElementCollection
|
16
17
|
from natural_pdf.elements.region import Region
|
18
|
+
from natural_pdf.classification.manager import ClassificationManager # noqa: F401
|
17
19
|
|
18
20
|
|
19
21
|
def extract_bbox(obj: Any) -> Optional[Tuple[float, float, float, float]]:
|
@@ -172,8 +174,8 @@ class DirectionalMixin:
|
|
172
174
|
# Adjust cross boundaries if cross_size is 'element'
|
173
175
|
if cross_size == "element":
|
174
176
|
if is_horizontal: # Adjust y0, y1
|
175
|
-
y0 = min(y0, self.
|
176
|
-
y1 = max(y1, self.
|
177
|
+
y0 = min(y0, self.top)
|
178
|
+
y1 = max(y1, self.bottom)
|
177
179
|
else: # Adjust x0, x1
|
178
180
|
x0 = min(x0, self.x0)
|
179
181
|
x1 = max(x1, self.x1)
|
@@ -413,7 +415,7 @@ class DirectionalMixin:
|
|
413
415
|
return new_region
|
414
416
|
|
415
417
|
|
416
|
-
class Element(DirectionalMixin, DescribeMixin):
|
418
|
+
class Element(DirectionalMixin, ClassificationMixin, DescribeMixin):
|
417
419
|
"""
|
418
420
|
Base class for all PDF elements.
|
419
421
|
|
@@ -432,6 +434,10 @@ class Element(DirectionalMixin, DescribeMixin):
|
|
432
434
|
self._obj = obj
|
433
435
|
self._page = page
|
434
436
|
|
437
|
+
# Containers for per-element metadata and analysis results (e.g., classification)
|
438
|
+
self.metadata: Dict[str, Any] = {}
|
439
|
+
# Access analysis results via self.analyses property (see below)
|
440
|
+
|
435
441
|
@property
|
436
442
|
def type(self) -> str:
|
437
443
|
"""Element type."""
|
@@ -850,6 +856,7 @@ class Element(DirectionalMixin, DescribeMixin):
|
|
850
856
|
color: Optional[Union[Tuple, str]] = "red", # Default color for single element
|
851
857
|
label: Optional[str] = None,
|
852
858
|
width: Optional[int] = None, # Add width parameter
|
859
|
+
crop: bool = False, # NEW: Crop to element bounds before legend
|
853
860
|
) -> Optional["Image.Image"]:
|
854
861
|
"""
|
855
862
|
Show the page with only this element highlighted temporarily.
|
@@ -861,6 +868,8 @@ class Element(DirectionalMixin, DescribeMixin):
|
|
861
868
|
color: Color to highlight this element (default: red)
|
862
869
|
label: Optional label for this element in the legend
|
863
870
|
width: Optional width for the output image in pixels
|
871
|
+
crop: If True, crop the rendered image to this element's
|
872
|
+
bounding box before legends/overlays are added.
|
864
873
|
|
865
874
|
Returns:
|
866
875
|
PIL Image of the page with only this element highlighted, or None if error.
|
@@ -887,6 +896,9 @@ class Element(DirectionalMixin, DescribeMixin):
|
|
887
896
|
"use_color_cycling": False, # Explicitly false for single preview
|
888
897
|
}
|
889
898
|
|
899
|
+
# Determine crop bbox
|
900
|
+
crop_bbox = self.bbox if crop else None
|
901
|
+
|
890
902
|
# Check if we actually got geometry data
|
891
903
|
if temp_highlight_data["bbox"] is None and temp_highlight_data["polygon"] is None:
|
892
904
|
logger.warning(f"Cannot show element, failed to get bbox or polygon: {self}")
|
@@ -901,6 +913,7 @@ class Element(DirectionalMixin, DescribeMixin):
|
|
901
913
|
width=width, # Pass the width parameter
|
902
914
|
labels=labels,
|
903
915
|
legend_position=legend_position,
|
916
|
+
crop_bbox=crop_bbox,
|
904
917
|
)
|
905
918
|
except Exception as e:
|
906
919
|
logger.error(f"Error calling render_preview for element {self}: {e}", exc_info=True)
|
@@ -1070,3 +1083,68 @@ class Element(DirectionalMixin, DescribeMixin):
|
|
1070
1083
|
case=case,
|
1071
1084
|
**kwargs,
|
1072
1085
|
)
|
1086
|
+
|
1087
|
+
# ------------------------------------------------------------------
|
1088
|
+
# ClassificationMixin requirements
|
1089
|
+
# ------------------------------------------------------------------
|
1090
|
+
|
1091
|
+
def _get_classification_manager(self) -> "ClassificationManager":
|
1092
|
+
"""Access the shared ClassificationManager via the parent PDF."""
|
1093
|
+
if (
|
1094
|
+
not hasattr(self, "page")
|
1095
|
+
or not hasattr(self.page, "pdf")
|
1096
|
+
or not hasattr(self.page.pdf, "get_manager")
|
1097
|
+
):
|
1098
|
+
raise AttributeError(
|
1099
|
+
"ClassificationManager cannot be accessed: Parent Page, PDF, or get_manager method missing."
|
1100
|
+
)
|
1101
|
+
|
1102
|
+
return self.page.pdf.get_manager("classification")
|
1103
|
+
|
1104
|
+
def _get_classification_content(self, model_type: str, **kwargs): # type: ignore[override]
|
1105
|
+
"""Return either text or an image, depending on model_type (text|vision)."""
|
1106
|
+
if model_type == "text":
|
1107
|
+
text_content = self.extract_text(layout=False) # type: ignore[arg-type]
|
1108
|
+
if not text_content or text_content.isspace():
|
1109
|
+
raise ValueError(
|
1110
|
+
"Cannot classify element with 'text' model: No text content found."
|
1111
|
+
)
|
1112
|
+
return text_content
|
1113
|
+
|
1114
|
+
elif model_type == "vision":
|
1115
|
+
# Delegate to Region implementation via a temporary expand()
|
1116
|
+
resolution = kwargs.get("resolution", 150)
|
1117
|
+
from natural_pdf.elements.region import Region # Local import to avoid cycles
|
1118
|
+
|
1119
|
+
return self.expand().to_image(
|
1120
|
+
resolution=resolution,
|
1121
|
+
include_highlights=False,
|
1122
|
+
crop=True,
|
1123
|
+
)
|
1124
|
+
else:
|
1125
|
+
raise ValueError(f"Unsupported model_type for classification: {model_type}")
|
1126
|
+
|
1127
|
+
# ------------------------------------------------------------------
|
1128
|
+
# Lightweight to_image proxy (vision models, previews, etc.)
|
1129
|
+
# ------------------------------------------------------------------
|
1130
|
+
|
1131
|
+
def to_image(self, *args, **kwargs): # type: ignore[override]
|
1132
|
+
"""Generate an image of this element by delegating to a temporary Region."""
|
1133
|
+
return self.expand().to_image(*args, **kwargs)
|
1134
|
+
|
1135
|
+
# ------------------------------------------------------------------
|
1136
|
+
# Unified analysis storage (maps to metadata["analysis"])
|
1137
|
+
# ------------------------------------------------------------------
|
1138
|
+
|
1139
|
+
@property
|
1140
|
+
def analyses(self) -> Dict[str, Any]:
|
1141
|
+
"""Dictionary holding model-generated analysis objects (classification, extraction, …)."""
|
1142
|
+
if not hasattr(self, "metadata") or self.metadata is None:
|
1143
|
+
self.metadata = {}
|
1144
|
+
return self.metadata.setdefault("analysis", {})
|
1145
|
+
|
1146
|
+
@analyses.setter
|
1147
|
+
def analyses(self, value: Dict[str, Any]):
|
1148
|
+
if not hasattr(self, "metadata") or self.metadata is None:
|
1149
|
+
self.metadata = {}
|
1150
|
+
self.metadata["analysis"] = value
|