docreader-ocr 0.2.2__tar.gz → 0.2.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/PKG-INFO +2 -1
  2. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/pyproject.toml +2 -1
  3. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/src/docreader/config.py +19 -0
  4. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/src/docreader/factory.py +39 -0
  5. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/src/docreader/hub.py +11 -3
  6. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/src/docreader/pipeline.py +92 -30
  7. docreader_ocr-0.2.4/src/docreader/resolver/__init__.py +4 -0
  8. docreader_ocr-0.2.4/src/docreader/resolver/base.py +40 -0
  9. docreader_ocr-0.2.4/src/docreader/resolver/lvl_resolver.py +195 -0
  10. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/src/docreader/schemas.py +5 -1
  11. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/.github/workflows/publish.yaml +0 -0
  12. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/.gitignore +0 -0
  13. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/LICENSE +0 -0
  14. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/README.md +0 -0
  15. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/src/docreader/__init__.py +0 -0
  16. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/src/docreader/classifier/__init__.py +0 -0
  17. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/src/docreader/classifier/base.py +0 -0
  18. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/src/docreader/classifier/yolo_classifier.py +0 -0
  19. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/src/docreader/detector/__init__.py +0 -0
  20. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/src/docreader/detector/base.py +0 -0
  21. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/src/docreader/detector/yolo_obb.py +0 -0
  22. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/src/docreader/ocr/__init__.py +0 -0
  23. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/src/docreader/ocr/base.py +0 -0
  24. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/src/docreader/ocr/easyocr_engine.py +0 -0
  25. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/src/docreader/preprocessing/__init__.py +0 -0
  26. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/src/docreader/preprocessing/geometry.py +0 -0
  27. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/src/docreader/utils.py +0 -0
  28. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/tests/test_hub.py +0 -0
  29. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/tests/test_pipeline.py +0 -0
  30. {docreader_ocr-0.2.2 → docreader_ocr-0.2.4}/tests/test_run.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: docreader-ocr
3
- Version: 0.2.2
3
+ Version: 0.2.4
4
4
  Summary: Document OCR pipeline: classify → detect fields → recognize text
5
5
  Project-URL: Homepage, https://github.com/mishanyacorleone/docreader
6
6
  Project-URL: Repository, https://github.com/mishanyacorleone/docreader
@@ -18,6 +18,7 @@ Requires-Python: >=3.9
18
18
  Requires-Dist: easyocr>=1.7
19
19
  Requires-Dist: numpy>=1.24
20
20
  Requires-Dist: opencv-python>=4.8
21
+ Requires-Dist: rapidfuzz>=3.14.0
21
22
  Requires-Dist: requests>=2.28
22
23
  Requires-Dist: torch>=2.0
23
24
  Requires-Dist: torchvision>=0.15
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "docreader-ocr"
7
- version = "0.2.2"
7
+ version = "0.2.4"
8
8
  description = "Document OCR pipeline: classify → detect fields → recognize text"
9
9
  readme = "README.md"
10
10
  license = {text = "MIT"}
@@ -32,6 +32,7 @@ dependencies = [
32
32
  "numpy>=1.24",
33
33
  "requests>=2.28",
34
34
  "tqdm>=4.65",
35
+ "RapidFuzz>=3.14.0"
35
36
  ]
36
37
 
37
38
  [project.optional-dependencies]
@@ -6,6 +6,13 @@ from dataclasses import dataclass, field
6
6
 
7
7
  DEFAULT_SKIP_OCR_ZONES = frozenset({"stamp", "gerb"})
8
8
 
9
+ DEFAULT_AMBIGUOUS_CLASSES = frozenset({"attestat/diplom"})
10
+
11
+ DEFAULT_SUBTYPE_KEYWORDS: dict[str, list[str]] = {
12
+ "attestat": ["аттестат"],
13
+ "diplom": ["диплом"]
14
+ }
15
+
9
16
 
10
17
  @dataclass
11
18
  class PipelineConfig:
@@ -35,6 +42,18 @@ class PipelineConfig:
35
42
  ocr_download_enabled: bool = False
36
43
  skip_ocr_zones: frozenset[str] = DEFAULT_SKIP_OCR_ZONES
37
44
 
45
+ # Resolver
46
+ ambiguous_classes: frozenset[str] = field(
47
+ default_factory=lambda: DEFAULT_AMBIGUOUS_CLASSES
48
+ )
49
+ resolver_weights: str = "lvl_detector.pt"
50
+ resolver_confidence: float = 0.25
51
+ resolver_subtype_keywords: dict[str, list[str]] = field(
52
+ default_factory=lambda: dict(DEFAULT_SUBTYPE_KEYWORDS)
53
+ )
54
+ resolver_fuzzy_threshold: float = 60.0
55
+ resolver_fallback: str | None = None
56
+
38
57
  enable_deskew: bool = True # Выравнивание по линиям Хафа
39
58
  return_crops: bool = True # Сохранять кропы зон в результат
40
59
 
@@ -19,6 +19,7 @@ from docreader.hub import ensure_model
19
19
  from docreader.classifier.yolo_classifier import DocClassifier
20
20
  from docreader.detector.yolo_obb import ZoneDetector
21
21
  from docreader.ocr.easyocr_engine import TextRecognizer
22
+ from docreader.resolver.lvl_resolver import LvlSubtypeResolver
22
23
 
23
24
 
24
25
  def create_classifier(
@@ -127,3 +128,41 @@ def create_ocr(
127
128
  }
128
129
  defaults.update(kwargs)
129
130
  return TextRecognizer(**defaults)
131
+
132
+
133
+ def create_resolver(
134
+ config: PipelineConfig | None = None,
135
+ ocr_engine: TextRecognizer | None = None,
136
+ **kwargs
137
+ ) -> LvlSubtypeResolver:
138
+ """
139
+ Создаёт resolver подтипа документа (attestat/diplom)
140
+
141
+ Args:
142
+ config: конфигурация (если None — используется дефолтная).
143
+ ocr_engine: готовый OCR-движок (если None - создаётся новый).
144
+ **kwargs: переопределение параметров TextRecognizer
145
+ (weights_path, match_threshold, detector_confidence, device).
146
+
147
+ Returns:
148
+ Готовый к работе LvlSubtypeResolver.
149
+
150
+ Примеры:
151
+ resolver = create_resolver()
152
+ resolver = create_resolver(match_threshold=70.0)
153
+ resolver = create_resolver(ocr_engine=my_ocr)
154
+ """
155
+ cfg = config or PipelineConfig()
156
+ ocr = ocr_engine or create_ocr(cfg)
157
+
158
+ defaults = {
159
+ "weights_path": cfg.resolver_weights,
160
+ "ocr_engine": ocr_engine,
161
+ "subtype_keywords": cfg.resolver_subtype_keywords,
162
+ "fuzzy_threshold": cfg.resolver_fuzzy_threshold,
163
+ "confidence_threshold": cfg.resolver_confidence,
164
+ "fallback": cfg.resolver_fallback,
165
+ "device": cfg.resolve_device(),
166
+ }
167
+ defaults.update(kwargs)
168
+ return LvlSubtypeResolver(**defaults)
@@ -17,6 +17,7 @@ from tqdm import tqdm
17
17
 
18
18
  logger = logging.getLogger(__name__)
19
19
 
20
+ _BASE_LVL_DETECTOR = "https://github.com/mishanyacorleone/docreader/releases/download/v0.2.2"
20
21
  _BASE_URL_CLASSIFIER = "https://github.com/mishanyacorleone/docreader/releases/download/v0.2.1"
21
22
  _BASE_URL = "https://github.com/mishanyacorleone/docreader/releases/download/v0.1.0"
22
23
 
@@ -24,8 +25,8 @@ MODEL_REGISTRY: dict[str, dict] = {
24
25
  # === Классификатор документов (YOLO OBB) ===
25
26
  "doc_classifier.pt": {
26
27
  "url": f"{_BASE_URL_CLASSIFIER}/doc_classifier.pt",
27
- "sha256": "d912884d8517cf776e989dc4fced855f34c2ee1d8b17732b778d7e84b7de84fc",
28
- "size_mb": 6.03,
28
+ "sha256": "b97567bfe70a13974219a0cf181643f322868054bac9c6e9ff812eb6ec2607b2",
29
+ "size_mb": 5.68,
29
30
  },
30
31
 
31
32
  # === Детекторы зон ===
@@ -56,7 +57,14 @@ MODEL_REGISTRY: dict[str, dict] = {
56
57
  "sha256": "832ce5a7f3a1086d81beb1c991347e3f545a425646bc87f3f576ae06fecd2420",
57
58
  "size_mb": 87.1,
58
59
  "extract_to": "easyocr"
59
- }
60
+ },
61
+
62
+ # === Resolver ===
63
+ "lvl_detector.pt": {
64
+ "url": f"{_BASE_LVL_DETECTOR}/lvl_detector.pt",
65
+ "sha256": "10bc71dbf8de891bc591154c3c369d8db2daa329249ef4e5b4b15508e8441ba4",
66
+ "size_mb": 5.63,
67
+ },
60
68
  }
61
69
 
62
70
  def get_cache_dir() -> Path:
@@ -14,10 +14,9 @@ from docreader.hub import ensure_model
14
14
  from docreader.preprocessing import deskew_image, crop_obb_region
15
15
 
16
16
  from docreader.classifier.base import BaseClassifier
17
-
18
17
  from docreader.detector.base import BaseDetector
19
-
20
18
  from docreader.ocr.base import BaseOcrEngine
19
+ from docreader.resolver.base import BaseSubtypeResolver
21
20
 
22
21
  logger = logging.getLogger(__name__)
23
22
 
@@ -47,6 +46,7 @@ class DocReader:
47
46
  classifier: Optional[BaseClassifier] = None,
48
47
  detector: Optional[BaseDetector] = None,
49
48
  ocr_engine: Optional[BaseOcrEngine] = None,
49
+ subtype_resolver: Optional[BaseSubtypeResolver] = None
50
50
  ):
51
51
  self._config = config or PipelineConfig()
52
52
  self._device = self._config.resolve_device()
@@ -56,6 +56,7 @@ class DocReader:
56
56
  self._classifier = classifier or self._build_classifier()
57
57
  self._detector = detector or self._build_detector()
58
58
  self._ocr = ocr_engine or self._build_ocr()
59
+ self._resolver = subtype_resolver or self._build_resolver()
59
60
 
60
61
  def _build_classifier(self) -> BaseClassifier:
61
62
  """Создаёт классификатор из конфига."""
@@ -101,6 +102,28 @@ class DocReader:
101
102
  recog_network=self._config.ocr_recog_network,
102
103
  download_enabled=self._config.ocr_download_enabled,
103
104
  )
105
+
106
+ def _build_resolver(self) -> Optional[BaseSubtypeResolver]:
107
+ """
108
+ Создаёт resolver только если есть неоднозначные классы в конфиге.
109
+ Возвращает None, если resolver не нужен
110
+ """
111
+ if not self._config.ambiguous_classes:
112
+ return None
113
+
114
+ from docreader.resolver.lvl_resolver import LvlSubtypeResolver
115
+
116
+ weights_path = ensure_model(self._config.resolver_weights)
117
+
118
+ return LvlSubtypeResolver(
119
+ weights_path=weights_path,
120
+ ocr_engine=self._ocr,
121
+ subtype_keywords=self._config.resolver_subtype_keywords,
122
+ fuzzy_threshold=self._config.resolver_fuzzy_threshold,
123
+ confidence_threshold=self._config.resolver_confidence,
124
+ fallback=self._config.resolver_fallback,
125
+ device=self._device,
126
+ )
104
127
 
105
128
  # === Публичный API ===
106
129
 
@@ -111,11 +134,11 @@ class DocReader:
111
134
  ) -> PageResult:
112
135
  """
113
136
  Полный пайплайн: находит все документы и распознаёт.
114
-
137
+
115
138
  Args:
116
139
  source: путь к файлу или numpy array (BGR).
117
140
  return_crops: сохранять ли кропы.
118
-
141
+
119
142
  Returns:
120
143
  PageResult со списком найденных документов.
121
144
  """
@@ -124,17 +147,14 @@ class DocReader:
124
147
  if return_crops is not None
125
148
  else self._config.return_crops
126
149
  )
127
-
150
+
128
151
  image = load_image(source)
129
-
130
- # 1. Классификация
131
152
  classified_docs = self._classifier.classify(image)
132
-
153
+
133
154
  if not classified_docs:
134
155
  logger.info("No documents found")
135
156
  return PageResult(documents=[])
136
-
137
- # 2. Обработка каждого документа
157
+
138
158
  documents: list[DocumentResult] = []
139
159
  for doc in classified_docs:
140
160
  result = self._process_single_document(
@@ -145,11 +165,11 @@ class DocReader:
145
165
  save_crops=save_crops,
146
166
  )
147
167
  documents.append(result)
148
-
168
+
149
169
  page_result = PageResult(documents=documents)
150
170
  logger.info(f"Complete: {page_result}")
151
171
  return page_result
152
-
172
+
153
173
  def process_batch(
154
174
  self,
155
175
  sources: list[ImageSource],
@@ -159,6 +179,45 @@ class DocReader:
159
179
  return [self.process(src, return_crops) for src in sources]
160
180
 
161
181
  # === Внутренняя логика ===
182
+
183
+ def _resolve_doc_type(
184
+ self,
185
+ doc_type: str,
186
+ doc_image: np.ndarray
187
+ ) -> tuple[str, dict]:
188
+ """
189
+ Уточняет тип документа через resolver, если класс неоднозначен.
190
+
191
+ Returns:
192
+ Кортеж (уточнённый doc_type, метаданные resolve).
193
+ """
194
+ if (
195
+ doc_type not in self._config.ambiguous_classes
196
+ or self._resolver is None
197
+ ):
198
+ return doc_type, {}
199
+
200
+ resolve_result = self._resolver.resolve(doc_image)
201
+ meta = {
202
+ "resolver_ocr_text": resolve_result.ocr_text,
203
+ "resolver_ocr_confidence": resolve_result.confidence,
204
+ "resolver_fuzzy_score": resolve_result.fuzzy_score
205
+ }
206
+
207
+ if resolve_result.resolve:
208
+ logger.info(
209
+ f"Resolved '{doc_type}' -> '{resolve_result.subtype}' "
210
+ f"(text='{resolve_result.ocr_text}', "
211
+ f"fuzzy={resolve_result.fuzzy_score:.1f})"
212
+ )
213
+ return resolve_result.subtype, meta
214
+
215
+ logger.warning(
216
+ f"Could not resolve subtype for '{doc_type}': "
217
+ f"text='{resolve_result.ocr_text}', "
218
+ f"score={resolve_result.fuzzy_score:.1f}"
219
+ )
220
+ return doc_type, meta
162
221
 
163
222
  def _process_single_document(
164
223
  self,
@@ -169,42 +228,44 @@ class DocReader:
169
228
  save_crops: bool,
170
229
  ) -> DocumentResult:
171
230
  """Обрабатывает один документ."""
172
-
173
- if doc_type not in self._detector.supported_doc_types:
174
- logger.warning(f"No detector for '{doc_type}'")
231
+ if self._config.enable_deskew:
232
+ doc_image = deskew_image(doc_image)
233
+
234
+ resolved_type, resolve_meta = self._resolve_doc_type(doc_type, doc_image)
235
+
236
+ if resolved_type not in self._detector.supported_doc_types:
237
+ logger.warning(f"No detector for '{resolved_type}'")
175
238
  return DocumentResult(
176
- doc_type=doc_type,
239
+ doc_type=resolved_type,
177
240
  doc_confidence=doc_confidence,
178
241
  zones=[],
179
242
  doc_bbox=doc_bbox.tolist(),
180
243
  doc_crop=doc_image if save_crops else None,
244
+ resolve_meta=resolve_meta,
181
245
  )
182
-
183
- if self._config.enable_deskew:
184
- doc_image = deskew_image(doc_image)
185
-
186
- detections = self._detector.detect(doc_image, doc_type)
187
- logger.info(f"'{doc_type}': {len(detections)} zones")
188
-
246
+
247
+ detections = self._detector.detect(doc_image, resolved_type)
248
+ logger.info(f"'{resolved_type}': {len(detections)} zones")
249
+
189
250
  zones: list[ZoneResult] = []
190
251
  for det in detections:
191
252
  zone = self._process_zone(doc_image, det, save_crops)
192
253
  if zone is not None:
193
254
  zones.append(zone)
194
-
255
+
195
256
  return DocumentResult(
196
- doc_type=doc_type,
257
+ doc_type=resolved_type,
197
258
  doc_confidence=doc_confidence,
198
259
  zones=zones,
199
260
  doc_bbox=doc_bbox.tolist(),
200
261
  doc_crop=doc_image if save_crops else None,
262
+ resolve_meta=resolve_meta,
201
263
  )
202
264
 
203
265
  def _process_zone(self, image, detection, save_crops):
204
266
  """Обрабатывает одну зону."""
205
-
206
267
  zone_name = detection.zone_name
207
-
268
+
208
269
  if zone_name in self._config.skip_ocr_zones:
209
270
  return ZoneResult(
210
271
  name=zone_name,
@@ -212,14 +273,14 @@ class DocReader:
212
273
  confidence=detection.confidence,
213
274
  bbox=detection.obb_points.tolist(),
214
275
  )
215
-
276
+
216
277
  crop = crop_obb_region(image, detection.obb_points)
217
278
  if crop is None or crop.size == 0:
218
279
  logger.warning(f"Empty crop for '{zone_name}'")
219
280
  return None
220
-
281
+
221
282
  ocr_result = self._ocr.recognize(crop)
222
-
283
+
223
284
  return ZoneResult(
224
285
  name=zone_name,
225
286
  text=ocr_result.text,
@@ -239,6 +300,7 @@ class DocReader:
239
300
  self._classifier = None
240
301
  self._detector = None
241
302
  self._ocr = None
303
+ self._resolver = None
242
304
  try:
243
305
  import gc
244
306
  gc.collect()
@@ -0,0 +1,4 @@
1
+ from docreader.resolver.base import BaseSubtypeResolver, ResolveResult
2
+ from docreader.resolver.lvl_resolver import LvlSubtypeResolver
3
+
4
+ __all__ = ["BaseSubtypeResolver", "ResolveResult", "LvlSubtypeResolver"]
@@ -0,0 +1,40 @@
1
+ """
2
+ Абстрактный интерфейс resolver'a подтипа документа.
3
+ """
4
+
5
+ from abc import ABC, abstractmethod
6
+ from dataclasses import dataclass
7
+ from typing import Optional
8
+
9
+ import numpy as np
10
+
11
+
12
+ @dataclass
13
+ class ResolveResult:
14
+ """
15
+ Результат определения подтипа документа
16
+ """
17
+ subtype: Optional[str]
18
+ ocr_text: str
19
+ confidence: float
20
+ fuzzy_score: float
21
+
22
+ @property
23
+ def resolve(self) -> None:
24
+ return self.subtype is not None
25
+
26
+
27
+ class BaseSubtypeResolver(ABC):
28
+ """
29
+ Интерфейс для определения подтипа документа.
30
+
31
+ Используется когда классификатор не может различать 2 похожих
32
+ класса (attestat/diplom) и требуется дополнительный шаг
33
+ """
34
+
35
+ @abstractmethod
36
+ def resolve(self, image: np.ndarray) -> ResolveResult:
37
+ """
38
+ Определяет подтип документа по его crop'y
39
+ """
40
+ ...
@@ -0,0 +1,195 @@
1
+ """
2
+ Resolver подтипа документа через детекцию поля lvl + OCR + fuzzy matching.
3
+ """
4
+
5
+ import logging
6
+ from typing import Optional
7
+
8
+ import numpy as np
9
+ from rapidfuzz import process, fuzz
10
+ from ultralytics import YOLO
11
+
12
+ from docreader.ocr.base import BaseOcrEngine
13
+ from docreader.preprocessing.geometry import crop_obb_region
14
+ from docreader.resolver.base import BaseSubtypeResolver, ResolveResult
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ class LvlSubtypeResolver(BaseSubtypeResolver):
20
+ """
21
+ Определяет подтип документа (attestat/diplom) через:
22
+ 1. YOLO OBB — детектирует поле lvl на crop'е документа
23
+ 2. OCR — распознаёт текст поля
24
+ 3. Fuzzy matching — сопоставляет текст с ключевыми словами подтипов
25
+
26
+ Примеры:
27
+ resolver = LvlSubtypeResolver(
28
+ weights_path="/path/to/lvl_detector.pt",
29
+ ocr_engine=ocr,
30
+ subtype_keywords={
31
+ "attestat": ["аттестат", "attestat"],
32
+ "diplom": ["диплом", "diplom"],
33
+ },
34
+ )
35
+ result = resolver.resolve(doc_crop)
36
+ if result.resolved:
37
+ print(result.subtype) # "attestat" или "diplom"
38
+
39
+ Args:
40
+ weights_path: путь к YOLO-модели для детекции поля lvl.
41
+ ocr_engine: движок OCR (BaseOcrEngine).
42
+ subtype_keywords: словарь {подтип: [ключевые слова]}.
43
+ fuzzy_threshold: минимальный score для признания совпадения (0–100).
44
+ confidence_threshold: минимальная уверенность детектора.
45
+ fallback: подтип по умолчанию если resolve не удался (None = unresolved).
46
+ device: устройство ("cpu", "cuda").
47
+ """
48
+
49
+ def __init__(
50
+ self,
51
+ weights_path: str,
52
+ ocr_engine: BaseOcrEngine,
53
+ subtype_keywords: dict[str, list[str]],
54
+ fuzzy_threshold: float = 60.0,
55
+ confidence_threshold: float = 0.25,
56
+ fallback: Optional[str] = None,
57
+ device: str = "cpu",
58
+ ):
59
+ self._model = YOLO(weights_path)
60
+ self._ocr = ocr_engine
61
+ self._fuzzy_threshold = fuzzy_threshold
62
+ self._confidence_threshold = confidence_threshold
63
+ self._fallback = fallback
64
+ self._device = device
65
+
66
+ # Плоский список ключевых слов и маппинг слово → подтип
67
+ self._keywords: list[str] = []
68
+ self._keyword_to_subtype: dict[str, str] = {}
69
+ for subtype, words in subtype_keywords.items():
70
+ for word in words:
71
+ normalized = word.lower()
72
+ self._keywords.append(normalized)
73
+ self._keyword_to_subtype[normalized] = subtype
74
+
75
+ logger.info(
76
+ f"LvlSubtypeResolver initialized: "
77
+ f"subtypes={list(subtype_keywords.keys())}, "
78
+ f"threshold={fuzzy_threshold}, fallback={fallback}"
79
+ )
80
+
81
+ def resolve(self, image: np.ndarray) -> ResolveResult:
82
+ """
83
+ Определяет подтип документа по crop'у.
84
+
85
+ Args:
86
+ image: BGR изображение документа.
87
+
88
+ Returns:
89
+ ResolveResult с подтипом и диагностической информацией.
90
+ """
91
+ lvl_crop = self._detect_lvl_field(image)
92
+
93
+ if lvl_crop is None:
94
+ logger.warning("lvl field not detected, using fallback")
95
+ return ResolveResult(
96
+ subtype=self._fallback,
97
+ ocr_text="",
98
+ confidence=0.0,
99
+ fuzzy_score=0.0,
100
+ )
101
+
102
+ ocr_result = self._ocr.recognize(lvl_crop)
103
+ logger.debug(
104
+ f"lvl OCR: text='{ocr_result.text}', conf={ocr_result.confidence:.3f}"
105
+ )
106
+
107
+ if not ocr_result.text.strip():
108
+ logger.warning("lvl OCR returned empty text, using fallback")
109
+ return ResolveResult(
110
+ subtype=self._fallback,
111
+ ocr_text=ocr_result.text,
112
+ confidence=ocr_result.confidence,
113
+ fuzzy_score=0.0,
114
+ )
115
+
116
+ subtype, fuzzy_score = self._match_subtype(ocr_result.text)
117
+
118
+ if subtype is None:
119
+ logger.warning(
120
+ f"Fuzzy match below threshold: "
121
+ f"text='{ocr_result.text}', score={fuzzy_score:.1f}, "
122
+ f"threshold={self._fuzzy_threshold}, fallback={self._fallback}"
123
+ )
124
+
125
+ return ResolveResult(
126
+ subtype=subtype,
127
+ ocr_text=ocr_result.text,
128
+ confidence=ocr_result.confidence,
129
+ fuzzy_score=fuzzy_score,
130
+ )
131
+
132
+ def _detect_lvl_field(self, image: np.ndarray) -> Optional[np.ndarray]:
133
+ """
134
+ Детектирует поле lvl и возвращает его crop.
135
+ """
136
+
137
+ results = self._model(image, device=self._device, verbose=False)
138
+
139
+ if results[0].obb is None:
140
+ return None
141
+
142
+ best_conf = -1.0
143
+ best_crop = None
144
+
145
+ for det in results[0].obb:
146
+ confidence = float(det.conf.cpu())
147
+ if confidence < self._confidence_threshold:
148
+ continue
149
+
150
+ zone_name = self._model.names[int(det.cls.cpu())]
151
+ if zone_name != "lvl":
152
+ continue
153
+
154
+ if confidence <= best_conf:
155
+ continue
156
+
157
+ obb_points = det.xyxyxyxy.cpu().numpy().flatten()
158
+ crop = crop_obb_region(image, obb_points)
159
+
160
+ if crop is not None and crop.size > 0:
161
+ best_conf = confidence
162
+ best_crop = crop
163
+
164
+ return best_crop
165
+
166
+ def _match_subtype(self, text: str) -> tuple[Optional[str], float]:
167
+ """
168
+ Сопоставляет OCR-текст с ключевыми словами через fuzzy matching.
169
+
170
+ Returns:
171
+ Кортеж (подтип или None, fuzzy score).
172
+ """
173
+ normalized = text.lower().strip()
174
+
175
+ match = process.extractOne(
176
+ normalized,
177
+ self._keywords,
178
+ scorer=fuzz.WRatio,
179
+ )
180
+
181
+ if match is None:
182
+ return None, 0.0
183
+
184
+ best_keyword, score, _ = match
185
+
186
+ if score < self._fuzzy_threshold:
187
+ return None, float(score)
188
+
189
+ subtype = self._keyword_to_subtype[best_keyword]
190
+ logger.debug(
191
+ f"Fuzzy matched: '{normalized}' -> '{best_keyword}' "
192
+ f"(subtype={subtype}, score={score:.1f})"
193
+ )
194
+ return subtype, float(score)
195
+
@@ -33,6 +33,7 @@ class DocumentResult:
33
33
  zones: list[ZoneResult] = field(default_factory=list)
34
34
  doc_bbox: Optional[list[float]] = None # координаты документа в исходном изображении
35
35
  doc_crop: Optional[np.ndarray] = None # кроп документа
36
+ resolve_meta: dict = field(default_factory=dict) # диагностика resolver'a
36
37
 
37
38
  @property
38
39
  def fields(self) -> dict[str, str]:
@@ -42,7 +43,7 @@ class DocumentResult:
42
43
  return {zone.name: zone.text for zone in self.zones}
43
44
 
44
45
  def to_dict(self) -> dict:
45
- return {
46
+ result = {
46
47
  "document": {
47
48
  "doc_type": self.doc_type,
48
49
  "doc_confidence": round(self.doc_confidence, 4),
@@ -50,6 +51,9 @@ class DocumentResult:
50
51
  "fields": self.fields
51
52
  }
52
53
  }
54
+ if self.resolve_meta:
55
+ result["document"]["resolve_meta"] = self.resolve_meta
56
+ return result
53
57
 
54
58
  def __repr__(self) -> str:
55
59
  return (
File without changes
File without changes
File without changes