docreader-ocr 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- docreader/__init__.py +17 -0
- docreader/classifier/__init__.py +4 -0
- docreader/classifier/base.py +26 -0
- docreader/classifier/mobilenet.py +80 -0
- docreader/config.py +46 -0
- docreader/detector/__init__.py +4 -0
- docreader/detector/base.py +36 -0
- docreader/detector/yolo_obb.py +57 -0
- docreader/hub.py +182 -0
- docreader/ocr/__init__.py +4 -0
- docreader/ocr/base.py +35 -0
- docreader/ocr/easyocr_engine.py +68 -0
- docreader/pipeline.py +242 -0
- docreader/preprocessing/__init__.py +3 -0
- docreader/preprocessing/geometry.py +101 -0
- docreader/schemas.py +50 -0
- docreader/utils.py +38 -0
- docreader_ocr-0.1.2.dist-info/METADATA +33 -0
- docreader_ocr-0.1.2.dist-info/RECORD +21 -0
- docreader_ocr-0.1.2.dist-info/WHEEL +4 -0
- docreader_ocr-0.1.2.dist-info/licenses/LICENSE +21 -0
docreader/__init__.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""
|
|
2
|
+
DocReader - библиотека для распознавания текста с документов
|
|
3
|
+
|
|
4
|
+
Использование:
|
|
5
|
+
from docreader import DocReader
|
|
6
|
+
|
|
7
|
+
reader = DocReader(models_dir="./models")
|
|
8
|
+
result = reader.process("passport.jpg")
|
|
9
|
+
print(result.doc_type)
|
|
10
|
+
print(result.fields)
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from docreader.pipeline import DocReader
|
|
14
|
+
from docreader.schemas import DocumentResult, ZoneResult
|
|
15
|
+
|
|
16
|
+
__all__ = ["DocReader", "DocumentResult", "ZoneResult"]
|
|
17
|
+
__version__ = "0.1.0"
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
"""Абстрактный интерфейс классификатора документов."""
|
|
2
|
+
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
import numpy as np
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class BaseClassifier(ABC):
|
|
8
|
+
"""
|
|
9
|
+
Интерфейс для классификатора типа документа.
|
|
10
|
+
|
|
11
|
+
Чтобы подключить свой классификатор, наследуйтесь от этого класса
|
|
12
|
+
и реализуйте метод 'predict'.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
@abstractmethod
|
|
16
|
+
def predict(self, image: np.ndarray) -> tuple[str, float]:
|
|
17
|
+
"""
|
|
18
|
+
Классифицирует изображение документа.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
image: BGR изображение (numpy array).
|
|
22
|
+
|
|
23
|
+
Returns:
|
|
24
|
+
Кортеж (метка_класса, уверенность).
|
|
25
|
+
"""
|
|
26
|
+
...
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
"""Классификатор документов на основе MobileNetV2."""
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import torch
|
|
5
|
+
import torch.nn as nn
|
|
6
|
+
import torch.nn.functional as F
|
|
7
|
+
from torchvision import models, transforms
|
|
8
|
+
|
|
9
|
+
from docreader.classifier.base import BaseClassifier
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class MobileNetClassifier(BaseClassifier):
|
|
13
|
+
"""
|
|
14
|
+
Классификатор типа документа на основе MobileNetV2.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
weights_path: путь к файлу весов (.pth).
|
|
18
|
+
class_labels: список меток классов.
|
|
19
|
+
device: устройство ("cpu" / "cuda").
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
# Стандартные трансформации ImageNet
|
|
23
|
+
_transform = transforms.Compose([
|
|
24
|
+
transforms.ToPILImage(),
|
|
25
|
+
transforms.Resize((224, 224)),
|
|
26
|
+
transforms.ToTensor(),
|
|
27
|
+
transforms.Normalize(
|
|
28
|
+
mean=[0.485, 0.456, 0.406],
|
|
29
|
+
std=[0.229, 0.224, 0.225],
|
|
30
|
+
),
|
|
31
|
+
])
|
|
32
|
+
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
weights_path: str,
|
|
36
|
+
class_labels: list[str],
|
|
37
|
+
device: str = "cpu",
|
|
38
|
+
):
|
|
39
|
+
self._class_labels = class_labels
|
|
40
|
+
self._device = device
|
|
41
|
+
|
|
42
|
+
num_classes = len(class_labels)
|
|
43
|
+
self._model = self._build_model(num_classes)
|
|
44
|
+
self._load_weights(weights_path)
|
|
45
|
+
|
|
46
|
+
def _build_model(self, num_classes: int) -> nn.Module:
|
|
47
|
+
net = models.mobilenet_v2(pretrained=False)
|
|
48
|
+
net.classifier[1] = nn.Linear(net.last_channel, num_classes)
|
|
49
|
+
return net
|
|
50
|
+
|
|
51
|
+
def _load_weights(self, path: str) -> None:
|
|
52
|
+
state_dict = torch.load(path, map_location=self._device, weights_only=False)
|
|
53
|
+
new_state_dict = {}
|
|
54
|
+
for key, value in state_dict.items():
|
|
55
|
+
if key.startswith("net."):
|
|
56
|
+
new_key = key[4:] # Убираем первые 4 символа "net."
|
|
57
|
+
else:
|
|
58
|
+
new_key = key
|
|
59
|
+
new_state_dict[new_key] = value
|
|
60
|
+
|
|
61
|
+
self._model.load_state_dict(new_state_dict)
|
|
62
|
+
self._model.to(self._device)
|
|
63
|
+
self._model.eval()
|
|
64
|
+
|
|
65
|
+
@torch.no_grad()
|
|
66
|
+
def predict(self, image: np.ndarray) -> tuple[str, float]:
|
|
67
|
+
"""
|
|
68
|
+
Args:
|
|
69
|
+
image: BGR изображение.
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
(метка_класса, уверенность)
|
|
73
|
+
"""
|
|
74
|
+
tensor = self._transform(image).unsqueeze(0).to(self._device)
|
|
75
|
+
logits = self._model(tensor)
|
|
76
|
+
probs = F.softmax(logits, dim=1)
|
|
77
|
+
|
|
78
|
+
confidence, idx = probs.max(dim=1)
|
|
79
|
+
label = self._class_labels[idx.item()]
|
|
80
|
+
return label, confidence.item()
|
docreader/config.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Конфигурация пайплайна
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
|
|
7
|
+
DEFAULT_SKIP_OCR_ZONES = frozenset({"stamp", "gerb"})
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class PipelineConfig:
|
|
12
|
+
"""
|
|
13
|
+
Настройки пайплайна распознавания
|
|
14
|
+
"""
|
|
15
|
+
device: str = "auto"
|
|
16
|
+
|
|
17
|
+
# Типы документов и пути к YOLO-моделям (относительно models_dir)
|
|
18
|
+
detector_weights: dict[str, str] = field(default_factory=lambda: {
|
|
19
|
+
"attestat": "attestat.pt",
|
|
20
|
+
"diplom": "diplom.pt",
|
|
21
|
+
"passport": "passport.pt",
|
|
22
|
+
"snils": "snils.pt",
|
|
23
|
+
})
|
|
24
|
+
|
|
25
|
+
# Путь к весам классификатора (относительно models_dir)
|
|
26
|
+
classification_weights: str = "best_doc_classifier.pth"
|
|
27
|
+
|
|
28
|
+
class_labels: list[str] = field(default_factory=lambda: [
|
|
29
|
+
"attestat", "diplom", "passport", "snils", 'other'
|
|
30
|
+
])
|
|
31
|
+
|
|
32
|
+
# EasyOCR
|
|
33
|
+
skip_ocr_zones: frozenset[str] = DEFAULT_SKIP_OCR_ZONES
|
|
34
|
+
ocr_lang: list[str] = field(default_factory=lambda: ["ru"])
|
|
35
|
+
ocr_recog_network: str = "custom_example"
|
|
36
|
+
ocr_download_enabled: bool = False
|
|
37
|
+
|
|
38
|
+
enable_deskew: bool = True # Выравнивание по линиям Хафа
|
|
39
|
+
|
|
40
|
+
return_crops: bool = True # Сохранять кропы зон в результат
|
|
41
|
+
|
|
42
|
+
def resolve_device(self) -> str:
|
|
43
|
+
if self.device != "auto":
|
|
44
|
+
return self.device
|
|
45
|
+
import torch
|
|
46
|
+
return "cuda" if torch.cuda.is_available() else "cpu"
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
"""Абстрактный интерфейс детектора зон документа."""
|
|
2
|
+
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
|
|
6
|
+
import numpy as np
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class Detection:
|
|
11
|
+
"""Одна обнаруженная зона."""
|
|
12
|
+
zone_name: str
|
|
13
|
+
obb_points: np.ndarray # shape (4, 2) или (8,)
|
|
14
|
+
confidence: float
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class BaseDetector(ABC):
|
|
18
|
+
"""
|
|
19
|
+
Интерфейс для детектора полей документа.
|
|
20
|
+
|
|
21
|
+
Чтобы подключить свой детектор, наследуйтесь и реализуйте `detect`.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
@abstractmethod
|
|
25
|
+
def detect(self, image: np.ndarray, doc_type: str) -> list[Detection]:
|
|
26
|
+
"""
|
|
27
|
+
Обнаруживает зоны (поля) на изображении документа.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
image: BGR изображение.
|
|
31
|
+
doc_type: тип документа (для выбора нужной модели).
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
Список обнаруженных зон.
|
|
35
|
+
"""
|
|
36
|
+
...
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"""Детектор зон документа на основе YOLOv8 OBB."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
import numpy as np
|
|
5
|
+
from ultralytics import YOLO
|
|
6
|
+
|
|
7
|
+
from docreader.detector.base import BaseDetector, Detection
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class YoloObbDetector(BaseDetector):
|
|
11
|
+
"""
|
|
12
|
+
Детектор полей документа через YOLOv8 с ориентированными боксами.
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
models_dir: директория с весами YOLO.
|
|
16
|
+
weights_map: словарь {тип_документа: имя_файла_весов}.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(self, models_dir: str, weights_map: dict[str, str]):
|
|
20
|
+
self._models: dict[str, YOLO] = {}
|
|
21
|
+
|
|
22
|
+
for doc_type, filename in weights_map.items():
|
|
23
|
+
path = os.path.join(models_dir, filename)
|
|
24
|
+
if not os.path.isfile(path):
|
|
25
|
+
raise FileNotFoundError(
|
|
26
|
+
f"YOLO weights not found: {path} (doc_type={doc_type})"
|
|
27
|
+
)
|
|
28
|
+
self._models[doc_type] = YOLO(path)
|
|
29
|
+
|
|
30
|
+
@property
|
|
31
|
+
def supported_doc_types(self) -> list[str]:
|
|
32
|
+
return list(self._models.keys())
|
|
33
|
+
|
|
34
|
+
def detect(self, image: np.ndarray, doc_type: str) -> list[Detection]:
|
|
35
|
+
if doc_type not in self._models:
|
|
36
|
+
raise ValueError(
|
|
37
|
+
f"No YOLO model for doc_type='{doc_type}'. "
|
|
38
|
+
f"Available: {self.supported_doc_types}"
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
model = self._models[doc_type]
|
|
42
|
+
results = model(image)
|
|
43
|
+
|
|
44
|
+
detections = []
|
|
45
|
+
for det in results[0].obb:
|
|
46
|
+
zone_id = int(det.cls)
|
|
47
|
+
zone_name = model.names[zone_id]
|
|
48
|
+
obb_points = det.xyxyxyxy.cpu().numpy().flatten()
|
|
49
|
+
confidence = float(det.conf.cpu())
|
|
50
|
+
|
|
51
|
+
detections.append(Detection(
|
|
52
|
+
zone_name=zone_name,
|
|
53
|
+
obb_points=obb_points,
|
|
54
|
+
confidence=confidence,
|
|
55
|
+
))
|
|
56
|
+
|
|
57
|
+
return detections
|
docreader/hub.py
ADDED
|
@@ -0,0 +1,182 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Автоматическая загрузка и кэширование моделей.
|
|
3
|
+
|
|
4
|
+
Модели скачиваются при первом вызове и сохраняются в:
|
|
5
|
+
~/.cache/docreader/models/
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import os
|
|
9
|
+
import hashlib
|
|
10
|
+
import logging
|
|
11
|
+
import tarfile
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Optional
|
|
14
|
+
|
|
15
|
+
import requests
|
|
16
|
+
from tqdm import tqdm
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger(__name__)
|
|
19
|
+
|
|
20
|
+
_BASE_URL = "https://github.com/mishanyacorleone/docreader/releases/download/v0.1.0"
|
|
21
|
+
|
|
22
|
+
MODEL_REGISTRY: dict[str, dict] = {
|
|
23
|
+
"best_doc_classifier.pth": {
|
|
24
|
+
"url": f"{_BASE_URL}/best_doc_classifier.pth",
|
|
25
|
+
"sha256": "6d56f45bd33f5296f40bbf32c67a46c01914a3ac7a3dcbbf9aa9a0b8402b59c4",
|
|
26
|
+
"size_mb": 8.75,
|
|
27
|
+
},
|
|
28
|
+
"passport.pt": {
|
|
29
|
+
"url": f"{_BASE_URL}/passport.pt",
|
|
30
|
+
"sha256": "bebe46bcd4270442c1e14e9b5a403c9f59212d92ed8181af1326f9f80bc0f0c0",
|
|
31
|
+
"size_mb": 5.55,
|
|
32
|
+
},
|
|
33
|
+
"diplom.pt": {
|
|
34
|
+
"url": f"{_BASE_URL}/diplom.pt",
|
|
35
|
+
"sha256": "f1848733eefa4741ead199cf8226e2fc141b08b01d625912d19926bb7ebc6387",
|
|
36
|
+
"size_mb": 5.71,
|
|
37
|
+
},
|
|
38
|
+
"attestat.pt": {
|
|
39
|
+
"url": f"{_BASE_URL}/attestat.pt",
|
|
40
|
+
"sha256": "9b6eaa5860b0cb0498995c0ab8015a9b85a9a910b429f2bef509e1202232199d",
|
|
41
|
+
"size_mb": 5.72,
|
|
42
|
+
},
|
|
43
|
+
"snils.pt": {
|
|
44
|
+
"url": f"{_BASE_URL}/snils.pt",
|
|
45
|
+
"sha256": "84775a6ff1ababb3f8e31a8aa768717cf9d65d8b84df9c0cd48eb7bdaf680218",
|
|
46
|
+
"size_mb": 5.82,
|
|
47
|
+
},
|
|
48
|
+
"easyocr_custom.tar.gz": {
|
|
49
|
+
"url": f"{_BASE_URL}/easyocr_custom.tar.gz",
|
|
50
|
+
"sha256": "832ce5a7f3a1086d81beb1c991347e3f545a425646bc87f3f576ae06fecd2420",
|
|
51
|
+
"size_mb": 87.1,
|
|
52
|
+
"extract_to": "easyocr"
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
def get_cache_dir() -> Path:
|
|
57
|
+
"""Возвращает директорию кэша моделей."""
|
|
58
|
+
cache = Path(os.environ.get("DOCREADER_CACHE", "~/.cache/docreader"))
|
|
59
|
+
cache = cache.expanduser() / "models"
|
|
60
|
+
cache.mkdir(parents=True, exist_ok=True)
|
|
61
|
+
return cache
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _sha256_file(path: Path) -> str:
|
|
66
|
+
"""Считает SHA-256 хэш файла."""
|
|
67
|
+
h = hashlib.sha256()
|
|
68
|
+
with open(path, "rb") as f:
|
|
69
|
+
for chunk in iter(lambda: f.read(8192), b""):
|
|
70
|
+
h.update(chunk)
|
|
71
|
+
return h.hexdigest()
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _download_file(url: str, dest: Path, expected_sha256: Optional[str] = None):
|
|
75
|
+
"""Скачивает файл с прогресс-баром и проверкой хэша."""
|
|
76
|
+
logger.info(f"Downloading {url}")
|
|
77
|
+
|
|
78
|
+
response = requests.get(url, stream=True, timeout=30)
|
|
79
|
+
response.raise_for_status()
|
|
80
|
+
|
|
81
|
+
total_size = int(response.headers.get("content-length", 0))
|
|
82
|
+
|
|
83
|
+
with open(dest, "wb") as f, tqdm(
|
|
84
|
+
total=total_size,
|
|
85
|
+
unit="B",
|
|
86
|
+
unit_scale=True,
|
|
87
|
+
desc=dest.name,
|
|
88
|
+
) as pbar:
|
|
89
|
+
for chunk in response.iter_content(chunk_size=8192):
|
|
90
|
+
f.write(chunk)
|
|
91
|
+
pbar.update(len(chunk))
|
|
92
|
+
|
|
93
|
+
# Проверка целостности
|
|
94
|
+
if expected_sha256:
|
|
95
|
+
actual = _sha256_file(dest)
|
|
96
|
+
if actual != expected_sha256:
|
|
97
|
+
dest.unlink()
|
|
98
|
+
raise ValueError(
|
|
99
|
+
f"Hash mismatch for {dest.name}: "
|
|
100
|
+
f"expected {expected_sha256[:16]}..., "
|
|
101
|
+
f"got {actual[:16]}..."
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
logger.info(f"Saved to {dest}")
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def _extract_archive(archive_path: Path, extract_to: Path):
|
|
108
|
+
"""Распаковывает tar.gz архив."""
|
|
109
|
+
logger.info(f"Extracting {archive_path.name} → {extract_to}")
|
|
110
|
+
extract_to.mkdir(parents=True, exist_ok=True)
|
|
111
|
+
|
|
112
|
+
with tarfile.open(archive_path, "r:gz") as tar:
|
|
113
|
+
tar.extractall(path=extract_to)
|
|
114
|
+
|
|
115
|
+
archive_path.unlink()
|
|
116
|
+
logger.info(f"Extracted and cleaned up: {archive_path.name}")
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def ensure_model(filename: str, cache_dir: Optional[Path] = None) -> Path:
|
|
120
|
+
"""
|
|
121
|
+
Гарантирует наличие файла модели. Скачивает, если отсутствует.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
filename: имя файла из MODEL_REGISTRY.
|
|
125
|
+
cache_dir: директория кэша (по умолчанию ~/.cache/docreader/models).
|
|
126
|
+
|
|
127
|
+
Returns:
|
|
128
|
+
Путь к файлу модели.
|
|
129
|
+
|
|
130
|
+
Raises:
|
|
131
|
+
KeyError: если файл не зарегистрирован.
|
|
132
|
+
ConnectionError: если не удалось скачать.
|
|
133
|
+
"""
|
|
134
|
+
if filename not in MODEL_REGISTRY:
|
|
135
|
+
raise KeyError(
|
|
136
|
+
f"Unknown model '{filename}'. "
|
|
137
|
+
f"Available: {list(MODEL_REGISTRY.keys())}"
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
cache = cache_dir or get_cache_dir()
|
|
141
|
+
meta = MODEL_REGISTRY[filename]
|
|
142
|
+
|
|
143
|
+
if "extract_to" in meta:
|
|
144
|
+
extract_dir = cache / meta["extract_to"]
|
|
145
|
+
if extract_dir.exists() and any(extract_dir.iterdir()):
|
|
146
|
+
logger.debug(f"Already extracted: {extract_dir}")
|
|
147
|
+
return extract_dir
|
|
148
|
+
|
|
149
|
+
# Скачиваем и распаковываем
|
|
150
|
+
archive_path = cache / filename
|
|
151
|
+
archive_path.parent.mkdir(parents=True, exist_ok=True)
|
|
152
|
+
_download_file(
|
|
153
|
+
url=meta["url"],
|
|
154
|
+
dest=archive_path,
|
|
155
|
+
expected_sha256=meta.get("sha256")
|
|
156
|
+
)
|
|
157
|
+
_extract_archive(archive_path, extract_dir)
|
|
158
|
+
|
|
159
|
+
return extract_dir
|
|
160
|
+
|
|
161
|
+
filepath = cache / filename
|
|
162
|
+
filepath.parent.mkdir(parents=True, exist_ok=True)
|
|
163
|
+
|
|
164
|
+
if filepath.exists():
|
|
165
|
+
logger.debug(f"Model cached: {filepath}")
|
|
166
|
+
return filepath
|
|
167
|
+
|
|
168
|
+
_download_file(
|
|
169
|
+
url=meta["url"],
|
|
170
|
+
dest=filepath,
|
|
171
|
+
expected_sha256=meta.get("sha256"),
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
return filepath
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def ensure_all_models(cache_dir: Optional[Path] = None) -> Path:
|
|
178
|
+
"""Скачивает все модели. Возвращает директорию кэша."""
|
|
179
|
+
cache = cache_dir or get_cache_dir()
|
|
180
|
+
for filename in MODEL_REGISTRY:
|
|
181
|
+
ensure_model(filename, cache)
|
|
182
|
+
return cache
|
docreader/ocr/base.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
"""Абстрактный интерфейс OCR-движка."""
|
|
2
|
+
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
|
|
6
|
+
import numpy as np
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class OcrResult:
|
|
11
|
+
"""Результат OCR для одного кропа."""
|
|
12
|
+
text: str
|
|
13
|
+
confidence: float
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class BaseOcrEngine(ABC):
|
|
17
|
+
"""
|
|
18
|
+
Интерфейс OCR-движка.
|
|
19
|
+
|
|
20
|
+
Чтобы подключить Tesseract, PaddleOCR или что-то ещё —
|
|
21
|
+
наследуйтесь и реализуйте `recognize`.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
@abstractmethod
|
|
25
|
+
def recognize(self, image: np.ndarray) -> OcrResult:
|
|
26
|
+
"""
|
|
27
|
+
Распознаёт текст на изображении (кропе зоны).
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
image: BGR изображение с текстом.
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
OcrResult с текстом и уверенностью.
|
|
34
|
+
"""
|
|
35
|
+
...
|
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
"""OCR-движок на основе EasyOCR."""
|
|
2
|
+
|
|
3
|
+
from typing import Optional
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
import numpy as np
|
|
7
|
+
import easyocr
|
|
8
|
+
|
|
9
|
+
from docreader.ocr.base import BaseOcrEngine, OcrResult
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class EasyOcrEngine(BaseOcrEngine):
|
|
13
|
+
"""
|
|
14
|
+
OCR через EasyOCR (с поддержкой кастомных моделей).
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
lang: список языков, например ["ru"].
|
|
18
|
+
gpu: использовать GPU.
|
|
19
|
+
model_storage_directory: путь к директории с моделями.
|
|
20
|
+
user_network_directory: путь к пользовательским сетям.
|
|
21
|
+
recog_network: имя сети распознавания.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(
|
|
25
|
+
self,
|
|
26
|
+
lang: list[str] | None = None,
|
|
27
|
+
gpu: bool = True,
|
|
28
|
+
model_storage_directory: Optional[str] = None,
|
|
29
|
+
user_network_directory: Optional[str] = None,
|
|
30
|
+
recog_network: Optional[str] = None,
|
|
31
|
+
download_enabled: bool = False
|
|
32
|
+
):
|
|
33
|
+
# Путь для хранения моделей по умолчанию
|
|
34
|
+
if model_storage_directory is None:
|
|
35
|
+
model_storage_directory = str(Path.home() / ".cache" / "docreader" / "easyocr_models")
|
|
36
|
+
Path(model_storage_directory).mkdir(parents=True, exist_ok=True)
|
|
37
|
+
|
|
38
|
+
kwargs = {
|
|
39
|
+
"lang_list": lang or ["ru"],
|
|
40
|
+
"gpu": gpu,
|
|
41
|
+
"download_enabled": download_enabled,
|
|
42
|
+
"model_storage_directory": model_storage_directory,
|
|
43
|
+
"verbose": False,
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
if user_network_directory:
|
|
47
|
+
kwargs["user_network_directory"] = user_network_directory
|
|
48
|
+
if recog_network:
|
|
49
|
+
kwargs["recog_network"] = recog_network
|
|
50
|
+
|
|
51
|
+
self._reader = easyocr.Reader(**kwargs)
|
|
52
|
+
|
|
53
|
+
def recognize(self, image: np.ndarray) -> OcrResult:
|
|
54
|
+
results = self._reader.readtext(image)
|
|
55
|
+
|
|
56
|
+
if not results:
|
|
57
|
+
return OcrResult(text="", confidence=0.0)
|
|
58
|
+
|
|
59
|
+
texts = []
|
|
60
|
+
confidences = []
|
|
61
|
+
for _, text, conf in results:
|
|
62
|
+
texts.append(text)
|
|
63
|
+
confidences.append(conf)
|
|
64
|
+
|
|
65
|
+
combined_text = " ".join(texts).strip()
|
|
66
|
+
mean_confidence = sum(confidences) / len(confidences) if confidences else 0.0
|
|
67
|
+
|
|
68
|
+
return OcrResult(text=combined_text, confidence=mean_confidence)
|
docreader/pipeline.py
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Главный пайплайн распознавания документов.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import logging
|
|
7
|
+
from typing import Optional
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
import numpy as np
|
|
11
|
+
|
|
12
|
+
from docreader.config import PipelineConfig
|
|
13
|
+
from docreader.schemas import DocumentResult, ZoneResult
|
|
14
|
+
from docreader.utils import load_image
|
|
15
|
+
from docreader.hub import ensure_model, get_cache_dir
|
|
16
|
+
from docreader.preprocessing import deskew_image, crop_obb_region
|
|
17
|
+
|
|
18
|
+
from docreader.classifier.base import BaseClassifier
|
|
19
|
+
from docreader.classifier.mobilenet import MobileNetClassifier
|
|
20
|
+
|
|
21
|
+
from docreader.detector.base import BaseDetector
|
|
22
|
+
from docreader.detector.yolo_obb import YoloObbDetector
|
|
23
|
+
|
|
24
|
+
from docreader.ocr.base import BaseOcrEngine
|
|
25
|
+
from docreader.ocr.easyocr_engine import EasyOcrEngine
|
|
26
|
+
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
class DocReader:
|
|
30
|
+
"""
|
|
31
|
+
Распознавание текста с документов
|
|
32
|
+
|
|
33
|
+
Пайплайн:
|
|
34
|
+
1. Классификация типа документа
|
|
35
|
+
2. Выравнивание (опционально)
|
|
36
|
+
3. Детекция полей через YOLO OBB
|
|
37
|
+
4. Кроп каждой зоны
|
|
38
|
+
5. OCR каждого кропа
|
|
39
|
+
6. Сборка результата
|
|
40
|
+
|
|
41
|
+
Примеры:
|
|
42
|
+
# Стандартное использование
|
|
43
|
+
reader = DocReader(models_dir="./models")
|
|
44
|
+
result = reader.process("passport.jpg")
|
|
45
|
+
print(result.fields)
|
|
46
|
+
|
|
47
|
+
# С кастомным OCR-движком
|
|
48
|
+
my_ocr = MyTesseractEngine()
|
|
49
|
+
reader = DocReader(models_dir="./models", ocr_engine=my_ocr)
|
|
50
|
+
|
|
51
|
+
# С кастомным классификактором
|
|
52
|
+
my_cls = MyResNetClassifier(weigths="resnet.pth")
|
|
53
|
+
reader = DocReader(models_dir="./models", classifier=my_cls)
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
def __init__(
|
|
57
|
+
self,
|
|
58
|
+
models_dir: str | None,
|
|
59
|
+
config: Optional[PipelineConfig] = None,
|
|
60
|
+
classifier: Optional[BaseClassifier] = None,
|
|
61
|
+
detector: Optional[BaseDetector] = None,
|
|
62
|
+
ocr_engine: Optional[BaseOcrEngine] = None
|
|
63
|
+
):
|
|
64
|
+
"""
|
|
65
|
+
Args:
|
|
66
|
+
models_dir: директория с файлами моделей
|
|
67
|
+
config: конфигурация пайплайна
|
|
68
|
+
classifier: кастомный классификатор (если None - MobileNetV2)
|
|
69
|
+
detector: кастомный детектор (если None - YOLO OBB)
|
|
70
|
+
ocr_engine: кастомный OCR (если None - EasyOCR)
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
self._config = config or PipelineConfig()
|
|
74
|
+
self._device = self._config.resolve_device()
|
|
75
|
+
if models_dir is not None:
|
|
76
|
+
self._models_dir = Path(models_dir)
|
|
77
|
+
self._auto_download = False
|
|
78
|
+
else:
|
|
79
|
+
self._models_dir = get_cache_dir()
|
|
80
|
+
self._auto_download = True
|
|
81
|
+
|
|
82
|
+
logger.info(f"DocReader init: device={self._device}"
|
|
83
|
+
f"models_dir={self._models_dir}"
|
|
84
|
+
f"auto_download={self._auto_download}")
|
|
85
|
+
|
|
86
|
+
self._classifier = classifier or self._init_classifier()
|
|
87
|
+
self._detector = detector or self._init_detector()
|
|
88
|
+
self._ocr = ocr_engine or self._init_ocr()
|
|
89
|
+
|
|
90
|
+
def _resolve_weights(self, filename: str) -> str:
|
|
91
|
+
"""
|
|
92
|
+
Возвращает путь к весам, скачивая при необходимости
|
|
93
|
+
"""
|
|
94
|
+
if self._auto_download:
|
|
95
|
+
return str(ensure_model(filename, self._models_dir))
|
|
96
|
+
path = self._models_dir / filename
|
|
97
|
+
if not path.exists():
|
|
98
|
+
raise FileNotFoundError(f"Model not found: {path}")
|
|
99
|
+
return str(path)
|
|
100
|
+
|
|
101
|
+
def _init_classifier(self) -> BaseClassifier:
|
|
102
|
+
from docreader.classifier.mobilenet import MobileNetClassifier
|
|
103
|
+
|
|
104
|
+
weigths = self._resolve_weights(self._config.classification_weights)
|
|
105
|
+
return MobileNetClassifier(
|
|
106
|
+
weights_path=weigths,
|
|
107
|
+
class_labels=self._config.class_labels,
|
|
108
|
+
device=self._device
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
def _init_detector(self) -> BaseDetector:
|
|
112
|
+
from docreader.detector.yolo_obb import YoloObbDetector
|
|
113
|
+
|
|
114
|
+
resolved = {}
|
|
115
|
+
for doc_type, filename in self._config.detector_weights.items():
|
|
116
|
+
self._resolve_weights(filename)
|
|
117
|
+
resolved[doc_type] = filename
|
|
118
|
+
|
|
119
|
+
return YoloObbDetector(
|
|
120
|
+
models_dir=str(self._models_dir),
|
|
121
|
+
weights_map=resolved
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
def _init_ocr(self) -> BaseOcrEngine:
|
|
125
|
+
from docreader.ocr.easyocr_engine import EasyOcrEngine
|
|
126
|
+
|
|
127
|
+
easyocr_dir = ensure_model("easyocr_custom.tar.gz", self._models_dir)
|
|
128
|
+
|
|
129
|
+
return EasyOcrEngine(
|
|
130
|
+
lang=["ru"],
|
|
131
|
+
gpu=(self._device != "cpu"),
|
|
132
|
+
model_storage_directory=str(easyocr_dir / "model"),
|
|
133
|
+
user_network_directory=str(easyocr_dir / "user_network"),
|
|
134
|
+
recog_network="custom_example",
|
|
135
|
+
download_enabled=False
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
# Публичный API
|
|
139
|
+
|
|
140
|
+
def process(
|
|
141
|
+
self,
|
|
142
|
+
source,
|
|
143
|
+
return_crops: Optional[bool] = None,
|
|
144
|
+
) -> DocumentResult:
|
|
145
|
+
"""
|
|
146
|
+
Полный пайплайн распознавания документа.
|
|
147
|
+
|
|
148
|
+
Args:
|
|
149
|
+
source: путь к файлу или numpy array (BGR)
|
|
150
|
+
return_crops: сохранять ли кропы зон в результат
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
DocumentResult
|
|
154
|
+
"""
|
|
155
|
+
|
|
156
|
+
save_crops = (
|
|
157
|
+
return_crops
|
|
158
|
+
if return_crops is not None
|
|
159
|
+
else self._config.return_crops
|
|
160
|
+
)
|
|
161
|
+
image = load_image(source)
|
|
162
|
+
|
|
163
|
+
# 1. Классификация
|
|
164
|
+
doc_type, doc_conf = self._classifier.predict(image)
|
|
165
|
+
logger.info(f"Classificated as '{doc_type}' (conf={doc_conf:.3f})")
|
|
166
|
+
|
|
167
|
+
supported = self._config.detector_weights.keys()
|
|
168
|
+
if doc_type not in supported:
|
|
169
|
+
logger.warning(
|
|
170
|
+
f"Unknown doc_type '{doc_type}', "
|
|
171
|
+
f"supported: {list(supported)}. "
|
|
172
|
+
f"Returning empty result."
|
|
173
|
+
)
|
|
174
|
+
return DocumentResult(
|
|
175
|
+
doc_type=doc_type,
|
|
176
|
+
doc_confidence=doc_conf,
|
|
177
|
+
zones=[]
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
if self._config.enable_deskew:
|
|
181
|
+
image = deskew_image(image)
|
|
182
|
+
|
|
183
|
+
detections = self._detector.detect(image, doc_type)
|
|
184
|
+
logger.info(f"Detected {len(detections)} zones")
|
|
185
|
+
|
|
186
|
+
zones: list[ZoneResult] = []
|
|
187
|
+
for det in detections:
|
|
188
|
+
zone = self._process_zone(image, det, save_crops)
|
|
189
|
+
if zone is not None:
|
|
190
|
+
zones.append(zone)
|
|
191
|
+
|
|
192
|
+
return DocumentResult(
|
|
193
|
+
doc_type=doc_type,
|
|
194
|
+
doc_confidence=doc_conf,
|
|
195
|
+
zones=zones
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
def process_batch(
|
|
199
|
+
self,
|
|
200
|
+
sources: list,
|
|
201
|
+
return_crops: Optional[bool] = None
|
|
202
|
+
) -> list[DocumentResult]:
|
|
203
|
+
"""
|
|
204
|
+
Обработка нескольких документов
|
|
205
|
+
"""
|
|
206
|
+
return [self.process(src, return_crops) for src in sources]
|
|
207
|
+
|
|
208
|
+
# Внутренняя логика
|
|
209
|
+
|
|
210
|
+
def _process_zone(self, image, detection, save_crops):
|
|
211
|
+
"""
|
|
212
|
+
Обрабатывает одну зону: кроп -> OCR -> ZoneResult
|
|
213
|
+
"""
|
|
214
|
+
from docreader.detector.base import Detection
|
|
215
|
+
|
|
216
|
+
zone_name = detection.zone_name
|
|
217
|
+
|
|
218
|
+
if zone_name in self._config.skip_ocr_zones:
|
|
219
|
+
return ZoneResult(
|
|
220
|
+
name=zone_name,
|
|
221
|
+
text="",
|
|
222
|
+
confidence=detection.confidence,
|
|
223
|
+
bbox=detection.obb_points.tolist(),
|
|
224
|
+
crop_image=None
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
crop = crop_obb_region(image, detection.obb_points)
|
|
228
|
+
if crop is None or crop.size == 0:
|
|
229
|
+
logger.warning(f"Empty crop for zone: '{zone_name}', skipping")
|
|
230
|
+
return None
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
ocr_result = self._ocr.recognize(crop)
|
|
234
|
+
|
|
235
|
+
return ZoneResult(
|
|
236
|
+
name=zone_name,
|
|
237
|
+
text=ocr_result.text,
|
|
238
|
+
confidence=ocr_result.confidence,
|
|
239
|
+
bbox=detection.obb_points.tolist(),
|
|
240
|
+
crop_image=crop if save_crops else None
|
|
241
|
+
)
|
|
242
|
+
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Геометрические преобразования: выравнивание и кроп по OBB.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import math
|
|
6
|
+
import cv2
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def deskew_image(image: np.ndarray) -> np.ndarray:
|
|
11
|
+
"""
|
|
12
|
+
Выравнивает документ по горизонтали через преобразование Хафа.
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
image: BGR изображение.
|
|
16
|
+
|
|
17
|
+
Returns:
|
|
18
|
+
Повёрнутое изображение.
|
|
19
|
+
|
|
20
|
+
Raises:
|
|
21
|
+
ValueError: если линии не найдены.
|
|
22
|
+
"""
|
|
23
|
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
|
24
|
+
edges = cv2.Canny(gray, 100, 100, apertureSize=3)
|
|
25
|
+
lines = cv2.HoughLinesP(
|
|
26
|
+
edges, 1, math.pi / 180.0, 100, minLineLength=100, maxLineGap=5
|
|
27
|
+
)
|
|
28
|
+
|
|
29
|
+
if lines is None or len(lines) == 0:
|
|
30
|
+
return image
|
|
31
|
+
|
|
32
|
+
angles = [
|
|
33
|
+
math.degrees(math.atan2(y2 - y1, x2 - x1))
|
|
34
|
+
for [[x1, y1, x2, y2]] in lines
|
|
35
|
+
]
|
|
36
|
+
median_angle = float(np.median(angles))
|
|
37
|
+
|
|
38
|
+
h, w = image.shape[:2]
|
|
39
|
+
center = (w // 2, h // 2)
|
|
40
|
+
rotation_matrix = cv2.getRotationMatrix2D(center, median_angle, 1.0)
|
|
41
|
+
return cv2.warpAffine(image, rotation_matrix, (w, h))
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def crop_obb_region(image: np.ndarray, obb_points: np.ndarray) -> np.ndarray | None:
|
|
45
|
+
"""
|
|
46
|
+
Вырезает область по 4 точкам OBB и выпрямляет перспективу.
|
|
47
|
+
|
|
48
|
+
Если результат вертикальный (высота > ширина), поворачивает на 90°.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
image: исходное BGR изображение.
|
|
52
|
+
obb_points: массив из 8 координат [x1,y1,x2,y2,x3,y3,x4,y4]
|
|
53
|
+
или shape (4, 2).
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
Выпрямленный кроп или None, если размеры некорректны.
|
|
57
|
+
"""
|
|
58
|
+
pts = np.array(obb_points, dtype=np.float32).reshape(4, 2)
|
|
59
|
+
|
|
60
|
+
# Упорядочиваем: top-left, top-right, bottom-right, bottom-left
|
|
61
|
+
rect = _order_points(pts)
|
|
62
|
+
tl, tr, br, bl = rect
|
|
63
|
+
|
|
64
|
+
width = max(
|
|
65
|
+
int(np.linalg.norm(br - bl)),
|
|
66
|
+
int(np.linalg.norm(tr - tl)),
|
|
67
|
+
)
|
|
68
|
+
height = max(
|
|
69
|
+
int(np.linalg.norm(tr - br)),
|
|
70
|
+
int(np.linalg.norm(tl - bl)),
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
if width <= 0 or height <= 0:
|
|
74
|
+
return None
|
|
75
|
+
|
|
76
|
+
dst = np.array(
|
|
77
|
+
[[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]],
|
|
78
|
+
dtype=np.float32,
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
matrix = cv2.getPerspectiveTransform(rect, dst)
|
|
82
|
+
warped = cv2.warpPerspective(image, matrix, (width, height))
|
|
83
|
+
|
|
84
|
+
# Поворот вертикальных фрагментов
|
|
85
|
+
h, w = warped.shape[:2]
|
|
86
|
+
if h > w:
|
|
87
|
+
warped = cv2.rotate(warped, cv2.ROTATE_90_COUNTERCLOCKWISE)
|
|
88
|
+
|
|
89
|
+
return warped
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def _order_points(pts: np.ndarray) -> np.ndarray:
|
|
93
|
+
"""Упорядочивает 4 точки: TL, TR, BR, BL."""
|
|
94
|
+
rect = np.zeros((4, 2), dtype=np.float32)
|
|
95
|
+
s = pts.sum(axis=1)
|
|
96
|
+
rect[0] = pts[np.argmin(s)] # top-left
|
|
97
|
+
rect[2] = pts[np.argmax(s)] # bottom-right
|
|
98
|
+
diff = np.diff(pts, axis=1).flatten()
|
|
99
|
+
rect[1] = pts[np.argmin(diff)] # top-right
|
|
100
|
+
rect[3] = pts[np.argmax(diff)] # bottom-left
|
|
101
|
+
return rect
|
docreader/schemas.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
|
+
from typing import Optional
|
|
3
|
+
|
|
4
|
+
import numpy as np
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@dataclass
|
|
8
|
+
class ZoneResult:
|
|
9
|
+
"""
|
|
10
|
+
Результат распознавания одного поля документа
|
|
11
|
+
"""
|
|
12
|
+
name: str
|
|
13
|
+
text: str
|
|
14
|
+
confidence: float
|
|
15
|
+
bbox: list[float] = field(default_factory=list)
|
|
16
|
+
crop_image: Optional[np.ndarray] = None
|
|
17
|
+
|
|
18
|
+
def to_dict(self) -> dict:
|
|
19
|
+
return {
|
|
20
|
+
"name": self.name,
|
|
21
|
+
"text": self.text,
|
|
22
|
+
"confidence": round(self.confidence, 4)
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class DocumentResult:
|
|
28
|
+
"""
|
|
29
|
+
Результат обработки одного документа
|
|
30
|
+
"""
|
|
31
|
+
doc_type: str
|
|
32
|
+
doc_confidence: float
|
|
33
|
+
zones: list[ZoneResult] = field(default_factory=list)
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def fields(self) -> dict[str, str]:
|
|
37
|
+
"""
|
|
38
|
+
Словарь: {имя_зоны: распознанный_текст}
|
|
39
|
+
"""
|
|
40
|
+
return {zone.name: zone.text for zone in self.zones}
|
|
41
|
+
|
|
42
|
+
def to_dict(self) -> dict:
|
|
43
|
+
return {
|
|
44
|
+
"document": {
|
|
45
|
+
"doc_type": self.doc_type,
|
|
46
|
+
"doc_confidence": round(self.doc_confidence, 4),
|
|
47
|
+
"zones": [zone.to_dict() for zone in self.zones],
|
|
48
|
+
"fields": self.fields
|
|
49
|
+
}
|
|
50
|
+
}
|
docreader/utils.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Утилиты для загрузки и базовой обработки изображений
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import cv2
|
|
6
|
+
import numpy as np
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def load_image(source) -> np.ndarray:
|
|
10
|
+
"""
|
|
11
|
+
Загружает изображение из разных источников.
|
|
12
|
+
|
|
13
|
+
Args:
|
|
14
|
+
source: путь к файлу (str), numpy array (BGR или RGB)
|
|
15
|
+
Returns:
|
|
16
|
+
Изображение в формате BGR (numpy array)
|
|
17
|
+
Raises:
|
|
18
|
+
ValueError: если формат не поддерживается или файл не найден
|
|
19
|
+
"""
|
|
20
|
+
if isinstance(source, str):
|
|
21
|
+
image = cv2.imread(source)
|
|
22
|
+
if image is None:
|
|
23
|
+
raise ValueError(f"Не удалось загрузить изображение: {source}")
|
|
24
|
+
return image
|
|
25
|
+
|
|
26
|
+
if isinstance(source, np.ndarray):
|
|
27
|
+
if source.ndim == 3 and source.shape[2] == 3:
|
|
28
|
+
return source.copy()
|
|
29
|
+
raise ValueError(f"Неподдерживаемая форма массива: {source}")
|
|
30
|
+
raise ValueError(f"Неподдерживаемый тип источника: {type(source)}")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def bgr_to_rgb(image: np.ndarray) -> np.ndarray:
|
|
34
|
+
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def rgb_to_bgr(image: np.ndarray) -> np.ndarray:
|
|
38
|
+
return cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: docreader-ocr
|
|
3
|
+
Version: 0.1.2
|
|
4
|
+
Summary: Document OCR pipeline: classify → detect fields → recognize text
|
|
5
|
+
Project-URL: Homepage, https://github.com/mishanyacorleone/docreader
|
|
6
|
+
Project-URL: Repository, https://github.com/mishanyacorleone/docreader
|
|
7
|
+
Project-URL: Issues, https://github.com/mishanyacorleone/docreader/issues
|
|
8
|
+
Author-email: Mikhail Kardash <mishutqac@mail.com>, Ruslan Abzelilov <ruslanr26@mail.ru>, Ekaterina Karmanova <monitor81@mail.ru>
|
|
9
|
+
License: MIT
|
|
10
|
+
License-File: LICENSE
|
|
11
|
+
Keywords: document,ocr,recognition,yolo
|
|
12
|
+
Classifier: Development Status :: 3 - Alpha
|
|
13
|
+
Classifier: Intended Audience :: Developers
|
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Topic :: Scientific/Engineering :: Image Recognition
|
|
17
|
+
Requires-Python: >=3.9
|
|
18
|
+
Requires-Dist: easyocr>=1.7
|
|
19
|
+
Requires-Dist: numpy>=1.24
|
|
20
|
+
Requires-Dist: opencv-python>=4.8
|
|
21
|
+
Requires-Dist: requests>=2.28
|
|
22
|
+
Requires-Dist: torch>=2.0
|
|
23
|
+
Requires-Dist: torchvision>=0.15
|
|
24
|
+
Requires-Dist: tqdm>=4.65
|
|
25
|
+
Requires-Dist: ultralytics>=8.0
|
|
26
|
+
Provides-Extra: dev
|
|
27
|
+
Requires-Dist: mypy; extra == 'dev'
|
|
28
|
+
Requires-Dist: pytest-cov; extra == 'dev'
|
|
29
|
+
Requires-Dist: pytest>=7.0; extra == 'dev'
|
|
30
|
+
Requires-Dist: ruff; extra == 'dev'
|
|
31
|
+
Description-Content-Type: text/markdown
|
|
32
|
+
|
|
33
|
+
Заглушка
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
docreader/__init__.py,sha256=be9yeILVnnzU_lYW-11JKMJQIkr5LV_nib-AoxUTj54,515
|
|
2
|
+
docreader/config.py,sha256=V-ItUDeZyOuZ_2U9R_OQSYxebyfRFPLQgUCHFLBAxS8,1535
|
|
3
|
+
docreader/hub.py,sha256=ko5DMLA0r8vL0S_A6g1qjjKBa_zMxJcko62NwfRCes4,6005
|
|
4
|
+
docreader/pipeline.py,sha256=eVA0mfimqY7bZ4SnUyagRvxpAw8y7Osbqwg9nXS3dTw,8430
|
|
5
|
+
docreader/schemas.py,sha256=McwEhIkCnX39VLGHkgHeZsTWdNo6AmRH9dscYzK9Ebw,1362
|
|
6
|
+
docreader/utils.py,sha256=z9Fogt7GCTobg-f6MVP0cOQeudKrYDvnTlPpIVYI7_A,1356
|
|
7
|
+
docreader/classifier/__init__.py,sha256=GGgYZLonTMb54gvwvYTyQXdhh4KRc9-E0flOewjO8sE,171
|
|
8
|
+
docreader/classifier/base.py,sha256=atINqf_4EugRT5FdE0_GX-R-8GA4T0pwWpxW2aAmb8U,860
|
|
9
|
+
docreader/classifier/mobilenet.py,sha256=t8Q_icYaidmRsZAA_52AFaq-eMURj1Y89meFOeUQGaQ,2634
|
|
10
|
+
docreader/detector/__init__.py,sha256=eIOSsY4dEaVxkiJ0ce4SmZ961kXhocHe9va4K9597-Q,178
|
|
11
|
+
docreader/detector/base.py,sha256=SyxPXeTfHFxW7ftjCPD3uGgWoryYDMvMvqXgZCslzQc,1114
|
|
12
|
+
docreader/detector/yolo_obb.py,sha256=NjMnSk_96qEZcYDuf8-vu1t-X6EzDK55d3YIS2BCgHg,1949
|
|
13
|
+
docreader/ocr/__init__.py,sha256=LFLTtNZp0HjV3Am-jeoHzFqmZ6HC1XW3uUGJJ9jeB3A,172
|
|
14
|
+
docreader/ocr/base.py,sha256=uFcO3nSg7z0vQBeHy8A_zKVwSz8Mkurs6AAq8qCGB04,932
|
|
15
|
+
docreader/ocr/easyocr_engine.py,sha256=sjBk_5UZkayf58Jo80PIyv-Fw7Tf1djYuk9LkWHMyiY,2383
|
|
16
|
+
docreader/preprocessing/__init__.py,sha256=59yoWf2Q23nKRT5UXoSIkkdh0WXRSpEwdE1lgdyEHcY,123
|
|
17
|
+
docreader/preprocessing/geometry.py,sha256=c-GIgf_dpicYk4YiVcPIBo3nTyU-ZAZ1wJe7VEXX0hA,3252
|
|
18
|
+
docreader_ocr-0.1.2.dist-info/METADATA,sha256=Eja4nWkSZit8OBtw2esThFP8StFsdQxRLwt6PWitCfk,1299
|
|
19
|
+
docreader_ocr-0.1.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
20
|
+
docreader_ocr-0.1.2.dist-info/licenses/LICENSE,sha256=kts17v8HVwDuR6bfFvhtVc-UyGij5ZOrp3MkOLru-oA,1130
|
|
21
|
+
docreader_ocr-0.1.2.dist-info/RECORD,,
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Mikhail Kardash, Ruslan Abzelilov, Ekaterina Karmanova
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|