onnxtr 0.1.2__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- onnxtr/io/elements.py +17 -4
- onnxtr/io/pdf.py +6 -3
- onnxtr/models/__init__.py +1 -0
- onnxtr/models/_utils.py +57 -20
- onnxtr/models/builder.py +24 -9
- onnxtr/models/classification/models/mobilenet.py +25 -7
- onnxtr/models/classification/predictor/base.py +1 -0
- onnxtr/models/classification/zoo.py +22 -7
- onnxtr/models/detection/_utils/__init__.py +1 -0
- onnxtr/models/detection/_utils/base.py +66 -0
- onnxtr/models/detection/models/differentiable_binarization.py +41 -11
- onnxtr/models/detection/models/fast.py +37 -9
- onnxtr/models/detection/models/linknet.py +39 -9
- onnxtr/models/detection/postprocessor/base.py +4 -3
- onnxtr/models/detection/predictor/base.py +15 -1
- onnxtr/models/detection/zoo.py +16 -3
- onnxtr/models/engine.py +75 -9
- onnxtr/models/predictor/base.py +69 -42
- onnxtr/models/predictor/predictor.py +22 -15
- onnxtr/models/recognition/models/crnn.py +39 -9
- onnxtr/models/recognition/models/master.py +19 -5
- onnxtr/models/recognition/models/parseq.py +20 -5
- onnxtr/models/recognition/models/sar.py +19 -5
- onnxtr/models/recognition/models/vitstr.py +31 -9
- onnxtr/models/recognition/zoo.py +12 -6
- onnxtr/models/zoo.py +22 -0
- onnxtr/py.typed +0 -0
- onnxtr/utils/geometry.py +33 -12
- onnxtr/version.py +1 -1
- {onnxtr-0.1.2.dist-info → onnxtr-0.3.0.dist-info}/METADATA +81 -16
- {onnxtr-0.1.2.dist-info → onnxtr-0.3.0.dist-info}/RECORD +35 -32
- {onnxtr-0.1.2.dist-info → onnxtr-0.3.0.dist-info}/WHEEL +1 -1
- {onnxtr-0.1.2.dist-info → onnxtr-0.3.0.dist-info}/top_level.txt +0 -1
- {onnxtr-0.1.2.dist-info → onnxtr-0.3.0.dist-info}/LICENSE +0 -0
- {onnxtr-0.1.2.dist-info → onnxtr-0.3.0.dist-info}/zip-safe +0 -0
|
@@ -11,7 +11,7 @@ from scipy.special import softmax
|
|
|
11
11
|
|
|
12
12
|
from onnxtr.utils import VOCABS
|
|
13
13
|
|
|
14
|
-
from ...engine import Engine
|
|
14
|
+
from ...engine import Engine, EngineConfig
|
|
15
15
|
from ..core import RecognitionPostProcessor
|
|
16
16
|
|
|
17
17
|
__all__ = ["ViTSTR", "vitstr_small", "vitstr_base"]
|
|
@@ -23,6 +23,7 @@ default_cfgs: Dict[str, Dict[str, Any]] = {
|
|
|
23
23
|
"input_shape": (3, 32, 128),
|
|
24
24
|
"vocab": VOCABS["french"],
|
|
25
25
|
"url": "https://github.com/felixdittrich92/OnnxTR/releases/download/v0.0.1/vitstr_small-3ff9c500.onnx",
|
|
26
|
+
"url_8_bit": "https://github.com/felixdittrich92/OnnxTR/releases/download/v0.1.2/vitstr_small_dynamic_8_bit-bec6c796.onnx",
|
|
26
27
|
},
|
|
27
28
|
"vitstr_base": {
|
|
28
29
|
"mean": (0.694, 0.695, 0.693),
|
|
@@ -30,6 +31,7 @@ default_cfgs: Dict[str, Dict[str, Any]] = {
|
|
|
30
31
|
"input_shape": (3, 32, 128),
|
|
31
32
|
"vocab": VOCABS["french"],
|
|
32
33
|
"url": "https://github.com/felixdittrich92/OnnxTR/releases/download/v0.0.1/vitstr_base-ff62f5be.onnx",
|
|
34
|
+
"url_8_bit": "https://github.com/felixdittrich92/OnnxTR/releases/download/v0.1.2/vitstr_base_dynamic_8_bit-976c7cd6.onnx",
|
|
33
35
|
},
|
|
34
36
|
}
|
|
35
37
|
|
|
@@ -41,6 +43,7 @@ class ViTSTR(Engine):
|
|
|
41
43
|
----
|
|
42
44
|
model_path: path to onnx model file
|
|
43
45
|
vocab: vocabulary used for encoding
|
|
46
|
+
engine_cfg: configuration for the inference engine
|
|
44
47
|
cfg: dictionary containing information about the model
|
|
45
48
|
**kwargs: additional arguments to be passed to `Engine`
|
|
46
49
|
"""
|
|
@@ -49,10 +52,11 @@ class ViTSTR(Engine):
|
|
|
49
52
|
self,
|
|
50
53
|
model_path: str,
|
|
51
54
|
vocab: str,
|
|
55
|
+
engine_cfg: EngineConfig = EngineConfig(),
|
|
52
56
|
cfg: Optional[Dict[str, Any]] = None,
|
|
53
57
|
**kwargs: Any,
|
|
54
58
|
) -> None:
|
|
55
|
-
super().__init__(url=model_path, **kwargs)
|
|
59
|
+
super().__init__(url=model_path, engine_cfg=engine_cfg, **kwargs)
|
|
56
60
|
self.vocab = vocab
|
|
57
61
|
self.cfg = cfg
|
|
58
62
|
|
|
@@ -109,6 +113,8 @@ class ViTSTRPostProcessor(RecognitionPostProcessor):
|
|
|
109
113
|
def _vitstr(
|
|
110
114
|
arch: str,
|
|
111
115
|
model_path: str,
|
|
116
|
+
load_in_8_bit: bool = False,
|
|
117
|
+
engine_cfg: EngineConfig = EngineConfig(),
|
|
112
118
|
**kwargs: Any,
|
|
113
119
|
) -> ViTSTR:
|
|
114
120
|
# Patch the config
|
|
@@ -117,12 +123,19 @@ def _vitstr(
|
|
|
117
123
|
_cfg["input_shape"] = kwargs.get("input_shape", _cfg["input_shape"])
|
|
118
124
|
|
|
119
125
|
kwargs["vocab"] = _cfg["vocab"]
|
|
126
|
+
# Patch the url
|
|
127
|
+
model_path = default_cfgs[arch]["url_8_bit"] if load_in_8_bit and "http" in model_path else model_path
|
|
120
128
|
|
|
121
129
|
# Build the model
|
|
122
|
-
return ViTSTR(model_path, cfg=_cfg, **kwargs)
|
|
130
|
+
return ViTSTR(model_path, cfg=_cfg, engine_cfg=engine_cfg, **kwargs)
|
|
123
131
|
|
|
124
132
|
|
|
125
|
-
def vitstr_small(
|
|
133
|
+
def vitstr_small(
|
|
134
|
+
model_path: str = default_cfgs["vitstr_small"]["url"],
|
|
135
|
+
load_in_8_bit: bool = False,
|
|
136
|
+
engine_cfg: EngineConfig = EngineConfig(),
|
|
137
|
+
**kwargs: Any,
|
|
138
|
+
) -> ViTSTR:
|
|
126
139
|
"""ViTSTR-Small as described in `"Vision Transformer for Fast and Efficient Scene Text Recognition"
|
|
127
140
|
<https://arxiv.org/pdf/2105.08582.pdf>`_.
|
|
128
141
|
|
|
@@ -135,16 +148,23 @@ def vitstr_small(model_path: str = default_cfgs["vitstr_small"]["url"], **kwargs
|
|
|
135
148
|
Args:
|
|
136
149
|
----
|
|
137
150
|
model_path: path to onnx model file, defaults to url in default_cfgs
|
|
138
|
-
|
|
151
|
+
load_in_8_bit: whether to load the the 8-bit quantized model, defaults to False
|
|
152
|
+
engine_cfg: configuration for the inference engine
|
|
153
|
+
**kwargs: keyword arguments of the ViTSTR architecture
|
|
139
154
|
|
|
140
155
|
Returns:
|
|
141
156
|
-------
|
|
142
157
|
text recognition architecture
|
|
143
158
|
"""
|
|
144
|
-
return _vitstr("vitstr_small", model_path, **kwargs)
|
|
159
|
+
return _vitstr("vitstr_small", model_path, load_in_8_bit, engine_cfg, **kwargs)
|
|
145
160
|
|
|
146
161
|
|
|
147
|
-
def vitstr_base(
|
|
162
|
+
def vitstr_base(
|
|
163
|
+
model_path: str = default_cfgs["vitstr_base"]["url"],
|
|
164
|
+
load_in_8_bit: bool = False,
|
|
165
|
+
engine_cfg: EngineConfig = EngineConfig(),
|
|
166
|
+
**kwargs: Any,
|
|
167
|
+
) -> ViTSTR:
|
|
148
168
|
"""ViTSTR-Base as described in `"Vision Transformer for Fast and Efficient Scene Text Recognition"
|
|
149
169
|
<https://arxiv.org/pdf/2105.08582.pdf>`_.
|
|
150
170
|
|
|
@@ -157,10 +177,12 @@ def vitstr_base(model_path: str = default_cfgs["vitstr_base"]["url"], **kwargs:
|
|
|
157
177
|
Args:
|
|
158
178
|
----
|
|
159
179
|
model_path: path to onnx model file, defaults to url in default_cfgs
|
|
160
|
-
|
|
180
|
+
load_in_8_bit: whether to load the the 8-bit quantized model, defaults to False
|
|
181
|
+
engine_cfg: configuration for the inference engine
|
|
182
|
+
**kwargs: keyword arguments of the ViTSTR architecture
|
|
161
183
|
|
|
162
184
|
Returns:
|
|
163
185
|
-------
|
|
164
186
|
text recognition architecture
|
|
165
187
|
"""
|
|
166
|
-
return _vitstr("vitstr_base", model_path, **kwargs)
|
|
188
|
+
return _vitstr("vitstr_base", model_path, load_in_8_bit, engine_cfg, **kwargs)
|
onnxtr/models/recognition/zoo.py
CHANGED
|
@@ -5,9 +5,9 @@
|
|
|
5
5
|
|
|
6
6
|
from typing import Any, List
|
|
7
7
|
|
|
8
|
-
from onnxtr.models.preprocessor import PreProcessor
|
|
9
|
-
|
|
10
8
|
from .. import recognition
|
|
9
|
+
from ..engine import EngineConfig
|
|
10
|
+
from ..preprocessor import PreProcessor
|
|
11
11
|
from .predictor import RecognitionPredictor
|
|
12
12
|
|
|
13
13
|
__all__ = ["recognition_predictor"]
|
|
@@ -25,12 +25,14 @@ ARCHS: List[str] = [
|
|
|
25
25
|
]
|
|
26
26
|
|
|
27
27
|
|
|
28
|
-
def _predictor(
|
|
28
|
+
def _predictor(
|
|
29
|
+
arch: Any, load_in_8_bit: bool = False, engine_cfg: EngineConfig = EngineConfig(), **kwargs: Any
|
|
30
|
+
) -> RecognitionPredictor:
|
|
29
31
|
if isinstance(arch, str):
|
|
30
32
|
if arch not in ARCHS:
|
|
31
33
|
raise ValueError(f"unknown architecture '{arch}'")
|
|
32
34
|
|
|
33
|
-
_model = recognition.__dict__[arch]()
|
|
35
|
+
_model = recognition.__dict__[arch](load_in_8_bit=load_in_8_bit, engine_cfg=engine_cfg)
|
|
34
36
|
else:
|
|
35
37
|
if not isinstance(
|
|
36
38
|
arch, (recognition.CRNN, recognition.SAR, recognition.MASTER, recognition.ViTSTR, recognition.PARSeq)
|
|
@@ -47,7 +49,9 @@ def _predictor(arch: Any, **kwargs: Any) -> RecognitionPredictor:
|
|
|
47
49
|
return predictor
|
|
48
50
|
|
|
49
51
|
|
|
50
|
-
def recognition_predictor(
|
|
52
|
+
def recognition_predictor(
|
|
53
|
+
arch: Any = "crnn_vgg16_bn", load_in_8_bit: bool = False, engine_cfg: EngineConfig = EngineConfig(), **kwargs: Any
|
|
54
|
+
) -> RecognitionPredictor:
|
|
51
55
|
"""Text recognition architecture.
|
|
52
56
|
|
|
53
57
|
Example::
|
|
@@ -60,10 +64,12 @@ def recognition_predictor(arch: Any = "crnn_vgg16_bn", **kwargs: Any) -> Recogni
|
|
|
60
64
|
Args:
|
|
61
65
|
----
|
|
62
66
|
arch: name of the architecture or model itself to use (e.g. 'crnn_vgg16_bn')
|
|
67
|
+
load_in_8_bit: whether to load the the 8-bit quantized model, defaults to False
|
|
68
|
+
engine_cfg: configuration of inference engine
|
|
63
69
|
**kwargs: optional parameters to be passed to the architecture
|
|
64
70
|
|
|
65
71
|
Returns:
|
|
66
72
|
-------
|
|
67
73
|
Recognition predictor
|
|
68
74
|
"""
|
|
69
|
-
return _predictor(arch, **kwargs)
|
|
75
|
+
return _predictor(arch, load_in_8_bit, engine_cfg, **kwargs)
|
onnxtr/models/zoo.py
CHANGED
|
@@ -6,6 +6,7 @@
|
|
|
6
6
|
from typing import Any
|
|
7
7
|
|
|
8
8
|
from .detection.zoo import detection_predictor
|
|
9
|
+
from .engine import EngineConfig
|
|
9
10
|
from .predictor import OCRPredictor
|
|
10
11
|
from .recognition.zoo import recognition_predictor
|
|
11
12
|
|
|
@@ -23,6 +24,10 @@ def _predictor(
|
|
|
23
24
|
detect_orientation: bool = False,
|
|
24
25
|
straighten_pages: bool = False,
|
|
25
26
|
detect_language: bool = False,
|
|
27
|
+
load_in_8_bit: bool = False,
|
|
28
|
+
det_engine_cfg: EngineConfig = EngineConfig(),
|
|
29
|
+
reco_engine_cfg: EngineConfig = EngineConfig(),
|
|
30
|
+
clf_engine_cfg: EngineConfig = EngineConfig(),
|
|
26
31
|
**kwargs,
|
|
27
32
|
) -> OCRPredictor:
|
|
28
33
|
# Detection
|
|
@@ -32,12 +37,16 @@ def _predictor(
|
|
|
32
37
|
assume_straight_pages=assume_straight_pages,
|
|
33
38
|
preserve_aspect_ratio=preserve_aspect_ratio,
|
|
34
39
|
symmetric_pad=symmetric_pad,
|
|
40
|
+
load_in_8_bit=load_in_8_bit,
|
|
41
|
+
engine_cfg=det_engine_cfg,
|
|
35
42
|
)
|
|
36
43
|
|
|
37
44
|
# Recognition
|
|
38
45
|
reco_predictor = recognition_predictor(
|
|
39
46
|
reco_arch,
|
|
40
47
|
batch_size=reco_bs,
|
|
48
|
+
load_in_8_bit=load_in_8_bit,
|
|
49
|
+
engine_cfg=reco_engine_cfg,
|
|
41
50
|
)
|
|
42
51
|
|
|
43
52
|
return OCRPredictor(
|
|
@@ -49,6 +58,7 @@ def _predictor(
|
|
|
49
58
|
detect_orientation=detect_orientation,
|
|
50
59
|
straighten_pages=straighten_pages,
|
|
51
60
|
detect_language=detect_language,
|
|
61
|
+
clf_engine_cfg=clf_engine_cfg,
|
|
52
62
|
**kwargs,
|
|
53
63
|
)
|
|
54
64
|
|
|
@@ -63,6 +73,10 @@ def ocr_predictor(
|
|
|
63
73
|
detect_orientation: bool = False,
|
|
64
74
|
straighten_pages: bool = False,
|
|
65
75
|
detect_language: bool = False,
|
|
76
|
+
load_in_8_bit: bool = False,
|
|
77
|
+
det_engine_cfg: EngineConfig = EngineConfig(),
|
|
78
|
+
reco_engine_cfg: EngineConfig = EngineConfig(),
|
|
79
|
+
clf_engine_cfg: EngineConfig = EngineConfig(),
|
|
66
80
|
**kwargs: Any,
|
|
67
81
|
) -> OCRPredictor:
|
|
68
82
|
"""End-to-end OCR architecture using one model for localization, and another for text recognition.
|
|
@@ -94,6 +108,10 @@ def ocr_predictor(
|
|
|
94
108
|
Doing so will improve performances for documents with page-uniform rotations.
|
|
95
109
|
detect_language: if True, the language prediction will be added to the predictions for each
|
|
96
110
|
page. Doing so will slightly deteriorate the overall latency.
|
|
111
|
+
load_in_8_bit: whether to load the the 8-bit quantized model, defaults to False
|
|
112
|
+
det_engine_cfg: configuration of the detection engine
|
|
113
|
+
reco_engine_cfg: configuration of the recognition engine
|
|
114
|
+
clf_engine_cfg: configuration of the orientation classification engine
|
|
97
115
|
kwargs: keyword args of `OCRPredictor`
|
|
98
116
|
|
|
99
117
|
Returns:
|
|
@@ -110,5 +128,9 @@ def ocr_predictor(
|
|
|
110
128
|
detect_orientation=detect_orientation,
|
|
111
129
|
straighten_pages=straighten_pages,
|
|
112
130
|
detect_language=detect_language,
|
|
131
|
+
load_in_8_bit=load_in_8_bit,
|
|
132
|
+
det_engine_cfg=det_engine_cfg,
|
|
133
|
+
reco_engine_cfg=reco_engine_cfg,
|
|
134
|
+
clf_engine_cfg=clf_engine_cfg,
|
|
113
135
|
**kwargs,
|
|
114
136
|
)
|
onnxtr/py.typed
ADDED
|
File without changes
|
onnxtr/utils/geometry.py
CHANGED
|
@@ -26,6 +26,7 @@ __all__ = [
|
|
|
26
26
|
"extract_crops",
|
|
27
27
|
"extract_rcrops",
|
|
28
28
|
"shape_translate",
|
|
29
|
+
"detach_scores",
|
|
29
30
|
]
|
|
30
31
|
|
|
31
32
|
|
|
@@ -58,6 +59,26 @@ def polygon_to_bbox(polygon: Polygon4P) -> BoundingBox:
|
|
|
58
59
|
return (min(x), min(y)), (max(x), max(y))
|
|
59
60
|
|
|
60
61
|
|
|
62
|
+
def detach_scores(boxes: List[np.ndarray]) -> Tuple[List[np.ndarray], List[np.ndarray]]:
|
|
63
|
+
"""Detach the objectness scores from box predictions
|
|
64
|
+
Args:
|
|
65
|
+
----
|
|
66
|
+
boxes: list of arrays with boxes of shape (N, 5) or (N, 5, 2)
|
|
67
|
+
Returns:
|
|
68
|
+
-------
|
|
69
|
+
a tuple of two lists: the first one contains the boxes without the objectness scores,
|
|
70
|
+
the second one contains the objectness scores
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
def _detach(boxes: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
|
|
74
|
+
if boxes.ndim == 2:
|
|
75
|
+
return boxes[:, :-1], boxes[:, -1]
|
|
76
|
+
return boxes[:, :-1], boxes[:, -1, -1]
|
|
77
|
+
|
|
78
|
+
loc_preds, obj_scores = zip(*(_detach(box) for box in boxes))
|
|
79
|
+
return list(loc_preds), list(obj_scores)
|
|
80
|
+
|
|
81
|
+
|
|
61
82
|
def shape_translate(data: np.ndarray, format: str) -> np.ndarray:
|
|
62
83
|
"""Translate the shape of the input data to the desired format
|
|
63
84
|
|
|
@@ -106,18 +127,18 @@ def resolve_enclosing_bbox(bboxes: Union[List[BoundingBox], np.ndarray]) -> Unio
|
|
|
106
127
|
----
|
|
107
128
|
bboxes: boxes in one of the following formats:
|
|
108
129
|
|
|
109
|
-
- an array of boxes: (*,
|
|
110
|
-
(xmin, ymin, xmax, ymax
|
|
130
|
+
- an array of boxes: (*, 4), where boxes have this shape:
|
|
131
|
+
(xmin, ymin, xmax, ymax)
|
|
111
132
|
|
|
112
133
|
- a list of BoundingBox
|
|
113
134
|
|
|
114
135
|
Returns:
|
|
115
136
|
-------
|
|
116
|
-
a (1,
|
|
137
|
+
a (1, 4) array (enclosing boxarray), or a BoundingBox
|
|
117
138
|
"""
|
|
118
139
|
if isinstance(bboxes, np.ndarray):
|
|
119
|
-
xmin, ymin, xmax, ymax
|
|
120
|
-
return np.array([xmin.min(), ymin.min(), xmax.max(), ymax.max()
|
|
140
|
+
xmin, ymin, xmax, ymax = np.split(bboxes, 4, axis=1)
|
|
141
|
+
return np.array([xmin.min(), ymin.min(), xmax.max(), ymax.max()])
|
|
121
142
|
else:
|
|
122
143
|
x, y = zip(*[point for box in bboxes for point in box])
|
|
123
144
|
return (min(x), min(y)), (max(x), max(y))
|
|
@@ -130,21 +151,21 @@ def resolve_enclosing_rbbox(rbboxes: List[np.ndarray], intermed_size: int = 1024
|
|
|
130
151
|
----
|
|
131
152
|
rbboxes: boxes in one of the following formats:
|
|
132
153
|
|
|
133
|
-
- an array of boxes: (*,
|
|
134
|
-
(
|
|
154
|
+
- an array of boxes: (*, 4, 2), where boxes have this shape:
|
|
155
|
+
(x1, y1), (x2, y2), (x3, y3), (x4, y4)
|
|
135
156
|
|
|
136
157
|
- a list of BoundingBox
|
|
137
158
|
intermed_size: size of the intermediate image
|
|
138
159
|
|
|
139
160
|
Returns:
|
|
140
161
|
-------
|
|
141
|
-
a (
|
|
162
|
+
a (4, 2) array (enclosing rotated box)
|
|
142
163
|
"""
|
|
143
164
|
cloud: np.ndarray = np.concatenate(rbboxes, axis=0)
|
|
144
165
|
# Convert to absolute for minAreaRect
|
|
145
166
|
cloud *= intermed_size
|
|
146
167
|
rect = cv2.minAreaRect(cloud.astype(np.int32))
|
|
147
|
-
return cv2.boxPoints(rect) / intermed_size # type: ignore[
|
|
168
|
+
return cv2.boxPoints(rect) / intermed_size # type: ignore[return-value]
|
|
148
169
|
|
|
149
170
|
|
|
150
171
|
def rotate_abs_points(points: np.ndarray, angle: float = 0.0) -> np.ndarray:
|
|
@@ -274,7 +295,7 @@ def rotate_boxes(
|
|
|
274
295
|
|
|
275
296
|
Args:
|
|
276
297
|
----
|
|
277
|
-
loc_preds: (N,
|
|
298
|
+
loc_preds: (N, 4) or (N, 4, 2) array of RELATIVE boxes
|
|
278
299
|
angle: angle between -90 and +90 degrees
|
|
279
300
|
orig_shape: shape of the origin image
|
|
280
301
|
min_angle: minimum angle to rotate boxes
|
|
@@ -362,7 +383,7 @@ def rotate_image(
|
|
|
362
383
|
# Pad height
|
|
363
384
|
else:
|
|
364
385
|
h_pad, w_pad = int(rot_img.shape[1] * image.shape[0] / image.shape[1] - rot_img.shape[0]), 0
|
|
365
|
-
rot_img = np.pad(rot_img, ((h_pad // 2, h_pad - h_pad // 2), (w_pad // 2, w_pad - w_pad // 2), (0, 0)))
|
|
386
|
+
rot_img = np.pad(rot_img, ((h_pad // 2, h_pad - h_pad // 2), (w_pad // 2, w_pad - w_pad // 2), (0, 0))) # type: ignore[assignment]
|
|
366
387
|
if preserve_origin_shape:
|
|
367
388
|
# rescale
|
|
368
389
|
rot_img = cv2.resize(rot_img, image.shape[:-1][::-1], interpolation=cv2.INTER_LINEAR)
|
|
@@ -495,4 +516,4 @@ def extract_rcrops(
|
|
|
495
516
|
)
|
|
496
517
|
for idx in range(_boxes.shape[0])
|
|
497
518
|
]
|
|
498
|
-
return crops
|
|
519
|
+
return crops # type: ignore[return-value]
|
onnxtr/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = 'v0.
|
|
1
|
+
__version__ = 'v0.3.0'
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: onnxtr
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.3.0
|
|
4
4
|
Summary: Onnx Text Recognition (OnnxTR): docTR Onnx-Wrapper for high-performance OCR on documents.
|
|
5
5
|
Author-email: Felix Dittrich <felixdittrich92@gmail.com>
|
|
6
6
|
Maintainer: Felix Dittrich
|
|
@@ -228,7 +228,7 @@ License-File: LICENSE
|
|
|
228
228
|
Requires-Dist: numpy <2.0.0,>=1.16.0
|
|
229
229
|
Requires-Dist: scipy <2.0.0,>=1.4.0
|
|
230
230
|
Requires-Dist: opencv-python <5.0.0,>=4.5.0
|
|
231
|
-
Requires-Dist: pypdfium2 <5.0.0,>=4.
|
|
231
|
+
Requires-Dist: pypdfium2 <5.0.0,>=4.11.0
|
|
232
232
|
Requires-Dist: pyclipper <2.0.0,>=1.2.0
|
|
233
233
|
Requires-Dist: shapely <3.0.0,>=1.6.0
|
|
234
234
|
Requires-Dist: rapidfuzz <4.0.0,>=3.0.0
|
|
@@ -275,7 +275,7 @@ Requires-Dist: mplcursors >=0.3 ; extra == 'viz'
|
|
|
275
275
|
[](https://codecov.io/gh/felixdittrich92/OnnxTR)
|
|
276
276
|
[](https://app.codacy.com/gh/felixdittrich92/OnnxTR/dashboard?utm_source=gh&utm_medium=referral&utm_content=&utm_campaign=Badge_grade)
|
|
277
277
|
[](https://www.codefactor.io/repository/github/felixdittrich92/onnxtr)
|
|
278
|
-
[](https://pypi.org/project/OnnxTR/)
|
|
279
279
|
|
|
280
280
|
> :warning: Please note that this is a wrapper around the [doctr](https://github.com/mindee/doctr) library to provide a Onnx pipeline for docTR. For feature requests, which are not directly related to the Onnx pipeline, please refer to the base project.
|
|
281
281
|
|
|
@@ -284,8 +284,9 @@ Requires-Dist: mplcursors >=0.3 ; extra == 'viz'
|
|
|
284
284
|
What you can expect from this repository:
|
|
285
285
|
|
|
286
286
|
- efficient ways to parse textual information (localize and identify each word) from your documents
|
|
287
|
-
- a Onnx pipeline for docTR, a wrapper around the [doctr](https://github.com/mindee/doctr) library
|
|
287
|
+
- a Onnx pipeline for docTR, a wrapper around the [doctr](https://github.com/mindee/doctr) library - no PyTorch or TensorFlow dependencies
|
|
288
288
|
- more lightweight package with faster inference latency and less required resources
|
|
289
|
+
- 8-Bit quantized models for faster inference on CPU
|
|
289
290
|
|
|
290
291
|

|
|
291
292
|
|
|
@@ -335,11 +336,11 @@ multi_img_doc = DocumentFile.from_images(["path/to/page1.jpg", "path/to/page2.jp
|
|
|
335
336
|
|
|
336
337
|
### Putting it together
|
|
337
338
|
|
|
338
|
-
Let's use the default
|
|
339
|
+
Let's use the default `ocr_predictor` model for an example:
|
|
339
340
|
|
|
340
341
|
```python
|
|
341
342
|
from onnxtr.io import DocumentFile
|
|
342
|
-
from onnxtr.models import ocr_predictor
|
|
343
|
+
from onnxtr.models import ocr_predictor, EngineConfig
|
|
343
344
|
|
|
344
345
|
model = ocr_predictor(
|
|
345
346
|
det_arch='fast_base', # detection architecture
|
|
@@ -356,8 +357,15 @@ model = ocr_predictor(
|
|
|
356
357
|
detect_language=False, # set to `True` if the language of the pages should be detected (default: False)
|
|
357
358
|
# DocumentBuilder specific parameters
|
|
358
359
|
resolve_lines=True, # whether words should be automatically grouped into lines (default: True)
|
|
359
|
-
resolve_blocks=
|
|
360
|
+
resolve_blocks=False, # whether lines should be automatically grouped into blocks (default: False)
|
|
360
361
|
paragraph_break=0.035, # relative length of the minimum space separating paragraphs (default: 0.035)
|
|
362
|
+
# OnnxTR specific parameters
|
|
363
|
+
# NOTE: 8-Bit quantized models are not available for FAST detection models and can in general lead to poorer accuracy
|
|
364
|
+
load_in_8_bit=False, # set to `True` to load 8-bit quantized models instead of the full precision onces (default: False)
|
|
365
|
+
# Advanced engine configuration options
|
|
366
|
+
det_engine_cfg=EngineConfig(), # detection model engine configuration (default: internal predefined configuration)
|
|
367
|
+
reco_engine_cfg=EngineConfig(), # recognition model engine configuration (default: internal predefined configuration)
|
|
368
|
+
clf_engine_cfg=EngineConfig(), # classification (orientation) model engine configuration (default: internal predefined configuration)
|
|
361
369
|
)
|
|
362
370
|
# PDF
|
|
363
371
|
doc = DocumentFile.from_pdf("path/to/your/doc.pdf")
|
|
@@ -395,6 +403,39 @@ for output in xml_output:
|
|
|
395
403
|
|
|
396
404
|
```
|
|
397
405
|
|
|
406
|
+
<details>
|
|
407
|
+
<summary>Advanced engine configuration options</summary>
|
|
408
|
+
|
|
409
|
+
You can also define advanced engine configurations for the models / predictors:
|
|
410
|
+
|
|
411
|
+
```python
|
|
412
|
+
from onnxruntime import SessionOptions
|
|
413
|
+
|
|
414
|
+
from onnxtr.models import ocr_predictor, EngineConfig
|
|
415
|
+
|
|
416
|
+
general_options = SessionOptions() # For configuartion options see: https://onnxruntime.ai/docs/api/python/api_summary.html#sessionoptions
|
|
417
|
+
general_options.enable_cpu_mem_arena = False
|
|
418
|
+
|
|
419
|
+
# NOTE: The following would force to run only on the GPU if no GPU is available it will raise an error
|
|
420
|
+
# List of strings e.g. ["CUDAExecutionProvider", "CPUExecutionProvider"] or a list of tuples with the provider and its options e.g.
|
|
421
|
+
# [("CUDAExecutionProvider", {"device_id": 0}), ("CPUExecutionProvider", {"arena_extend_strategy": "kSameAsRequested"})]
|
|
422
|
+
providers = [("CUDAExecutionProvider", {"device_id": 0})] # For available providers see: https://onnxruntime.ai/docs/execution-providers/
|
|
423
|
+
|
|
424
|
+
engine_config = EngineConfig(
|
|
425
|
+
session_options=general_options,
|
|
426
|
+
providers=providers
|
|
427
|
+
)
|
|
428
|
+
# We use the default predictor with the custom engine configuration
|
|
429
|
+
# NOTE: You can define differnt engine configurations for detection, recognition and classification depending on your needs
|
|
430
|
+
predictor = ocr_predictor(
|
|
431
|
+
det_engine_cfg=engine_config,
|
|
432
|
+
reco_engine_cfg=engine_config,
|
|
433
|
+
clf_engine_cfg=engine_config
|
|
434
|
+
)
|
|
435
|
+
```
|
|
436
|
+
|
|
437
|
+
</details>
|
|
438
|
+
|
|
398
439
|
## Loading custom exported models
|
|
399
440
|
|
|
400
441
|
You can also load docTR custom exported models:
|
|
@@ -438,9 +479,9 @@ predictor.list_archs()
|
|
|
438
479
|
'linknet_resnet18',
|
|
439
480
|
'linknet_resnet34',
|
|
440
481
|
'linknet_resnet50',
|
|
441
|
-
'fast_tiny',
|
|
442
|
-
'fast_small',
|
|
443
|
-
'fast_base'
|
|
482
|
+
'fast_tiny', # No 8-bit support
|
|
483
|
+
'fast_small', # No 8-bit support
|
|
484
|
+
'fast_base' # No 8-bit support
|
|
444
485
|
],
|
|
445
486
|
'recognition archs':
|
|
446
487
|
[
|
|
@@ -469,14 +510,38 @@ NOTE:
|
|
|
469
510
|
|
|
470
511
|
### Benchmarks
|
|
471
512
|
|
|
472
|
-
The benchmarks was measured on a `i7-14700K Intel CPU`.
|
|
513
|
+
The CPU benchmarks was measured on a `i7-14700K Intel CPU`.
|
|
514
|
+
|
|
515
|
+
The GPU benchmarks was measured on a `RTX 4080 Nvidia GPU`.
|
|
516
|
+
|
|
517
|
+
Benchmarking performed on the FUNSD dataset and CORD dataset.
|
|
518
|
+
|
|
519
|
+
docTR / OnnxTR models used for the benchmarks are `fast_base` (full precision) | `db_resnet50` (8-bit variant) for detection and `crnn_vgg16_bn` for recognition.
|
|
520
|
+
|
|
521
|
+
The smallest combination in OnnxTR (docTR) of `db_mobilenet_v3_large` and `crnn_mobilenet_v3_small` takes as comparison `~0.17s / Page` on the FUNSD dataset and `~0.12s / Page` on the CORD dataset in **full precision**.
|
|
522
|
+
|
|
523
|
+
- CPU benchmarks:
|
|
524
|
+
|
|
525
|
+
|Library |FUNSD (199 pages) |CORD (900 pages) |
|
|
526
|
+
|---------------------------------|-------------------------------|-------------------------------|
|
|
527
|
+
|docTR (CPU) - v0.8.1 | ~1.29s / Page | ~0.60s / Page |
|
|
528
|
+
|**OnnxTR (CPU)** - v0.1.2 | ~0.57s / Page | **~0.25s / Page** |
|
|
529
|
+
|**OnnxTR (CPU) 8-bit** - v0.1.2 | **~0.38s / Page** | **~0.14s / Page** |
|
|
530
|
+
|EasyOCR (CPU) - v1.7.1 | ~1.96s / Page | ~1.75s / Page |
|
|
531
|
+
|**PyTesseract (CPU)** - v0.3.10 | **~0.50s / Page** | ~0.52s / Page |
|
|
532
|
+
|Surya (line) (CPU) - v0.4.4 | ~48.76s / Page | ~35.49s / Page |
|
|
533
|
+
|PaddleOCR (CPU) - no cls - v2.7.3| ~1.27s / Page | ~0.38s / Page |
|
|
473
534
|
|
|
474
|
-
|
|
535
|
+
- GPU benchmarks:
|
|
475
536
|
|
|
476
|
-
|
|
|
477
|
-
|
|
478
|
-
|
|
|
479
|
-
|
|
537
|
+
|Library |FUNSD (199 pages) |CORD (900 pages) |
|
|
538
|
+
|-------------------------------------|-------------------------------|-------------------------------|
|
|
539
|
+
|docTR (GPU) - v0.8.1 | ~0.07s / Page | ~0.05s / Page |
|
|
540
|
+
|**docTR (GPU) float16** - v0.8.1 | **~0.06s / Page** | **~0.03s / Page** |
|
|
541
|
+
|OnnxTR (GPU) - v0.1.2 | **~0.06s / Page** | ~0.04s / Page |
|
|
542
|
+
|EasyOCR (GPU) - v1.7.1 | ~0.31s / Page | ~0.19s / Page |
|
|
543
|
+
|Surya (GPU) float16 - v0.4.4 | ~3.70s / Page | ~2.81s / Page |
|
|
544
|
+
|**PaddleOCR (GPU) - no cls - v2.7.3**| ~0.08s / Page | **~0.03s / Page** |
|
|
480
545
|
|
|
481
546
|
## Citation
|
|
482
547
|
|
|
@@ -1,52 +1,55 @@
|
|
|
1
1
|
onnxtr/__init__.py,sha256=h7Wc2tuHLsaoCk5xNpEFEK-g11A6SJA7nAasA76TQ_Y,100
|
|
2
2
|
onnxtr/file_utils.py,sha256=WjUKalEdR53aoeIY4e-ihy3r7J_C9qFxL40JHGPfutc,1107
|
|
3
|
-
onnxtr/
|
|
3
|
+
onnxtr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
|
+
onnxtr/version.py,sha256=3WzdRDDiKxM8JAvNhW3PVopgIZrHCvYuR4insIGe4bU,23
|
|
4
5
|
onnxtr/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
5
6
|
onnxtr/contrib/artefacts.py,sha256=tdmfhvfXVRYEH7uj4_hqf2cuUGoTieyNK8bXsD3zHwo,5383
|
|
6
7
|
onnxtr/contrib/base.py,sha256=PoCKtOIgj7u4xl-V0eBVh-QmVeTyk_eEggFHQ8R34AI,3445
|
|
7
8
|
onnxtr/io/__init__.py,sha256=kS7tKGFvzxOCWBOun-Y8n9CsziwRKNynjwpZEUUI03M,106
|
|
8
|
-
onnxtr/io/elements.py,sha256=
|
|
9
|
+
onnxtr/io/elements.py,sha256=h-IxpFqXrvg-fOhpnOqpGFLdG-lR-xYYIxk3chy_MN8,17769
|
|
9
10
|
onnxtr/io/html.py,sha256=Em_7PjZ56SugJ9bjjcWLCMVe5ee6uUMKeZovNxJFAXw,737
|
|
10
11
|
onnxtr/io/image.py,sha256=4tLTh2bGdA0ohh3a6mV6xD0KqNOtIVi5lJ06XSmeyMI,1759
|
|
11
|
-
onnxtr/io/pdf.py,sha256=
|
|
12
|
+
onnxtr/io/pdf.py,sha256=tD0klmxI-gkMXp56f_ZXWyPHLsUBKa_xlhNTtGV6tpU,1367
|
|
12
13
|
onnxtr/io/reader.py,sha256=BA7DPhW-Gkmce_ZfzrOl4H3pSXVy2JBeQEuY3pWrBFg,2852
|
|
13
|
-
onnxtr/models/__init__.py,sha256=
|
|
14
|
-
onnxtr/models/_utils.py,sha256=
|
|
15
|
-
onnxtr/models/builder.py,sha256=
|
|
16
|
-
onnxtr/models/engine.py,sha256=
|
|
17
|
-
onnxtr/models/zoo.py,sha256=
|
|
14
|
+
onnxtr/models/__init__.py,sha256=Rg-5P2e622q-5ScfxVE3G8GXa51HUPS7b0jkvdukFzM,134
|
|
15
|
+
onnxtr/models/_utils.py,sha256=KncsNcoWqbsxFwduce2STuGHLhv63nXEHv7CMuh6wYA,6606
|
|
16
|
+
onnxtr/models/builder.py,sha256=Bzg-XHZc5k16Ti2XeV9hm4POTHofe581Azq1a3d1O6E,14296
|
|
17
|
+
onnxtr/models/engine.py,sha256=SOK-KTNWMozIjErWQAY56iB2eXyRD44Q08TdL9YOVAY,4717
|
|
18
|
+
onnxtr/models/zoo.py,sha256=MJIT3OZ4kyj2xBfQdCVxl2uBdiLCnnv8czPtHbZl5e4,5343
|
|
18
19
|
onnxtr/models/classification/__init__.py,sha256=h1bZs55iLJBMATtzS4ntTKwfD6OGXBiiqGv_hEnOFnE,41
|
|
19
|
-
onnxtr/models/classification/zoo.py,sha256=
|
|
20
|
+
onnxtr/models/classification/zoo.py,sha256=1oaKfW646IVa-MmLqGi58BtBWdHdu4hI8r79wVdLQ2o,3426
|
|
20
21
|
onnxtr/models/classification/models/__init__.py,sha256=rohbM6ZQslfYchi7feZwwh-sX3XXRUhgtEJQeurAytQ,24
|
|
21
|
-
onnxtr/models/classification/models/mobilenet.py,sha256=
|
|
22
|
+
onnxtr/models/classification/models/mobilenet.py,sha256=vTBHhA1okhnCgn36qKlM2eDCm4ftFZDH8Bk2VpkWm4U,4880
|
|
22
23
|
onnxtr/models/classification/predictor/__init__.py,sha256=ERmmOxz_9mUkIuccNbzUa5Y6gVLLVDdyc4cCxbCCUbY,20
|
|
23
|
-
onnxtr/models/classification/predictor/base.py,sha256=
|
|
24
|
+
onnxtr/models/classification/predictor/base.py,sha256=Xfaj2XlaJuQ2R81XqF5RB0Wcvzd4wh7Z6j1ifn2niFc,2097
|
|
24
25
|
onnxtr/models/detection/__init__.py,sha256=h1bZs55iLJBMATtzS4ntTKwfD6OGXBiiqGv_hEnOFnE,41
|
|
25
26
|
onnxtr/models/detection/core.py,sha256=ZmVDHLJ1l4LQ8rFSKc7enXDkGcOWrcQv4H0SJWyLsag,3584
|
|
26
|
-
onnxtr/models/detection/zoo.py,sha256=
|
|
27
|
+
onnxtr/models/detection/zoo.py,sha256=dpxLC7jMNZyl3a-o4dSCwsMnqtgoRwxy4psZ8WPC6cE,2725
|
|
28
|
+
onnxtr/models/detection/_utils/__init__.py,sha256=oPkIYbySSbLsOk02wVPNO9bUuywC47YjaenfyTwfOsw,20
|
|
29
|
+
onnxtr/models/detection/_utils/base.py,sha256=fOWnvBKluWKTNXSBKg3U6ckzYuF7onEKQ4AvheuTJQk,2346
|
|
27
30
|
onnxtr/models/detection/models/__init__.py,sha256=6Ea6knYrVCR2jAmPlsVWmCdHe-c6lSRETSAuZGfhx8I,85
|
|
28
|
-
onnxtr/models/detection/models/differentiable_binarization.py,sha256=
|
|
29
|
-
onnxtr/models/detection/models/fast.py,sha256=
|
|
30
|
-
onnxtr/models/detection/models/linknet.py,sha256=
|
|
31
|
+
onnxtr/models/detection/models/differentiable_binarization.py,sha256=o6Y0iDRHxArLqBE-EKz3Ru9l6L7sqHmHkNny60-gV4Q,6734
|
|
32
|
+
onnxtr/models/detection/models/fast.py,sha256=YUnbKLIZdeMd-lfFyWEtRbxpiXsRBizLb0VpcruJD-U,6293
|
|
33
|
+
onnxtr/models/detection/models/linknet.py,sha256=aXOZ6ieczvAoJQcVuVpJZVXqfEIL4OHr5NqQ5nEI2QY,6771
|
|
31
34
|
onnxtr/models/detection/postprocessor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
32
|
-
onnxtr/models/detection/postprocessor/base.py,sha256=
|
|
35
|
+
onnxtr/models/detection/postprocessor/base.py,sha256=FIhSNktNLQjGWup3xEMaOCjKQmRvtt0h8M9IFQk_5jM,5823
|
|
33
36
|
onnxtr/models/detection/predictor/__init__.py,sha256=ERmmOxz_9mUkIuccNbzUa5Y6gVLLVDdyc4cCxbCCUbY,20
|
|
34
|
-
onnxtr/models/detection/predictor/base.py,sha256=
|
|
37
|
+
onnxtr/models/detection/predictor/base.py,sha256=bt8M6I14tWC9DYjrFrqg-AU5u670_uPpuC7LmcegcCQ,2328
|
|
35
38
|
onnxtr/models/predictor/__init__.py,sha256=XL25XkRkgyK7mldF-CWhg2MMakSdP5vLpDLwL59hphk,25
|
|
36
|
-
onnxtr/models/predictor/base.py,sha256=
|
|
37
|
-
onnxtr/models/predictor/predictor.py,sha256=
|
|
39
|
+
onnxtr/models/predictor/base.py,sha256=wROvnIvMQb_SPPX8m8_RmBSqZqIDlH7Vfo81D8teQQA,8860
|
|
40
|
+
onnxtr/models/predictor/predictor.py,sha256=kmU6hj89k1QvFpljr3JEWneT7X5RQLcUNn3Ecbb1jm8,6237
|
|
38
41
|
onnxtr/models/preprocessor/__init__.py,sha256=ERmmOxz_9mUkIuccNbzUa5Y6gVLLVDdyc4cCxbCCUbY,20
|
|
39
42
|
onnxtr/models/preprocessor/base.py,sha256=f0t0rMCzvuxwgq7jlKvcVWyjeDOx7yCLUw52quEaETM,3990
|
|
40
43
|
onnxtr/models/recognition/__init__.py,sha256=h1bZs55iLJBMATtzS4ntTKwfD6OGXBiiqGv_hEnOFnE,41
|
|
41
44
|
onnxtr/models/recognition/core.py,sha256=0Q1dVXqRcDUr_ycT5tpoSH9-zuDF58GtnmxWpUS8Ibo,739
|
|
42
45
|
onnxtr/models/recognition/utils.py,sha256=04abbjx-_OuF5iEANWIAOK3tQQl1tExPmBQx4IG04Lc,3569
|
|
43
|
-
onnxtr/models/recognition/zoo.py,sha256=
|
|
46
|
+
onnxtr/models/recognition/zoo.py,sha256=F0hiymT8Tfv115u_34PvmD8rpXw1fPinYno1DE9a8bo,2511
|
|
44
47
|
onnxtr/models/recognition/models/__init__.py,sha256=IXfiuzzkft8O1CpBZWYTpFw19y49mt5rJ_iGSdaWiU0,105
|
|
45
|
-
onnxtr/models/recognition/models/crnn.py,sha256=
|
|
46
|
-
onnxtr/models/recognition/models/master.py,sha256=
|
|
47
|
-
onnxtr/models/recognition/models/parseq.py,sha256=
|
|
48
|
-
onnxtr/models/recognition/models/sar.py,sha256=
|
|
49
|
-
onnxtr/models/recognition/models/vitstr.py,sha256=
|
|
48
|
+
onnxtr/models/recognition/models/crnn.py,sha256=Ki2DeIQahvIJterFs2RYf-y21LFmFVuhmoem3-nVlXQ,8963
|
|
49
|
+
onnxtr/models/recognition/models/master.py,sha256=VgPwyCpVv6UmTDaeeeGWWgcKPKeEq6Osif-Tq97xmj8,4777
|
|
50
|
+
onnxtr/models/recognition/models/parseq.py,sha256=Ig0Tu31KgVEVWOX630VhEV2hoi5QtABxBrTsgiguK74,4577
|
|
51
|
+
onnxtr/models/recognition/models/sar.py,sha256=OTyXC5_0-DPghHG9zY4ZCnFqAIf-3eBlWoRQOTfjZTc,4588
|
|
52
|
+
onnxtr/models/recognition/models/vitstr.py,sha256=xED7mK1b2d3dUJkLjiFn1JQKe_CU0JE7fhPnEVilT7s,6054
|
|
50
53
|
onnxtr/models/recognition/predictor/__init__.py,sha256=ERmmOxz_9mUkIuccNbzUa5Y6gVLLVDdyc4cCxbCCUbY,20
|
|
51
54
|
onnxtr/models/recognition/predictor/_utils.py,sha256=ZNm5I7ibiWfTlz302uiifCkUOu65YWa-oUBUMPrrUuQ,3406
|
|
52
55
|
onnxtr/models/recognition/predictor/base.py,sha256=YvqSNEM3rCEttxl6hsC9zl1R97N9zO2WZfD5_-nfkR0,2483
|
|
@@ -56,15 +59,15 @@ onnxtr/utils/__init__.py,sha256=pESRJKtcQyjRxiMgZPhtPYeLbCj-YSGyMVRHTbcMONU,94
|
|
|
56
59
|
onnxtr/utils/common_types.py,sha256=eC_NyIwbo9qVF33LiNPqHKfyabWq9mYEKD9gAloo5UU,601
|
|
57
60
|
onnxtr/utils/data.py,sha256=Dh0mgeHJhyPwmm63J90uDVmIYbrp63hh1_SnYLnpgJI,4354
|
|
58
61
|
onnxtr/utils/fonts.py,sha256=OiOHFwkjN4L7QBrzMi7Ex7qj_KcTEJ1sHEJWSfiGNZU,1281
|
|
59
|
-
onnxtr/utils/geometry.py,sha256=
|
|
62
|
+
onnxtr/utils/geometry.py,sha256=u9ei6WW8Yd29rtwnrDYercAY-tWkOLkzBd5Oi6NNyDI,17774
|
|
60
63
|
onnxtr/utils/multithreading.py,sha256=30T7AylM3rb52ZEI3Pk1pfB0VYraTbc7yO2vNODVVFY,2011
|
|
61
64
|
onnxtr/utils/reconstitution.py,sha256=Hx1_ddLevKLzuxXc19UelPdsGlAwqi4f6vRSYKHDUB4,2617
|
|
62
65
|
onnxtr/utils/repr.py,sha256=kfbjGL6KymGT8spo2UL4FJXZ0XRwa7CO7Y1dTVR8dIk,2129
|
|
63
66
|
onnxtr/utils/visualization.py,sha256=CX09qvDnNIw3BFW5F3jM4R9OcpLWAeZyoDyTAOGRvls,9925
|
|
64
67
|
onnxtr/utils/vocabs.py,sha256=SCQ4XQjbHSxunj1tg2iHRiPfE8OaTAMhcJbKq5BNvFs,3138
|
|
65
|
-
onnxtr-0.
|
|
66
|
-
onnxtr-0.
|
|
67
|
-
onnxtr-0.
|
|
68
|
-
onnxtr-0.
|
|
69
|
-
onnxtr-0.
|
|
70
|
-
onnxtr-0.
|
|
68
|
+
onnxtr-0.3.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
|
69
|
+
onnxtr-0.3.0.dist-info/METADATA,sha256=0cPAKQr-w-WHimev0v9mtys9NetS_oYHZHTslgcSNu4,29756
|
|
70
|
+
onnxtr-0.3.0.dist-info/WHEEL,sha256=mguMlWGMX-VHnMpKOjjQidIo1ssRlCFu4a4mBpz1s2M,91
|
|
71
|
+
onnxtr-0.3.0.dist-info/top_level.txt,sha256=r_MSUTpspp4pWEEWvly-s7ZkfCg1KwrK6-kBlXkWKU8,7
|
|
72
|
+
onnxtr-0.3.0.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
|
73
|
+
onnxtr-0.3.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|