doctra 0.3.3__py3-none-any.whl → 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- doctra/__init__.py +4 -0
- doctra/cli/main.py +170 -9
- doctra/cli/utils.py +2 -3
- doctra/engines/image_restoration/__init__.py +10 -0
- doctra/engines/image_restoration/docres_engine.py +561 -0
- doctra/engines/vlm/outlines_types.py +13 -9
- doctra/engines/vlm/service.py +4 -2
- doctra/exporters/excel_writer.py +89 -0
- doctra/parsers/enhanced_pdf_parser.py +374 -0
- doctra/parsers/structured_pdf_parser.py +6 -0
- doctra/parsers/table_chart_extractor.py +6 -0
- doctra/third_party/docres/data/MBD/MBD.py +110 -0
- doctra/third_party/docres/data/MBD/MBD_utils.py +291 -0
- doctra/third_party/docres/data/MBD/infer.py +151 -0
- doctra/third_party/docres/data/MBD/model/deep_lab_model/aspp.py +95 -0
- doctra/third_party/docres/data/MBD/model/deep_lab_model/backbone/__init__.py +13 -0
- doctra/third_party/docres/data/MBD/model/deep_lab_model/backbone/drn.py +402 -0
- doctra/third_party/docres/data/MBD/model/deep_lab_model/backbone/mobilenet.py +151 -0
- doctra/third_party/docres/data/MBD/model/deep_lab_model/backbone/resnet.py +170 -0
- doctra/third_party/docres/data/MBD/model/deep_lab_model/backbone/xception.py +288 -0
- doctra/third_party/docres/data/MBD/model/deep_lab_model/decoder.py +59 -0
- doctra/third_party/docres/data/MBD/model/deep_lab_model/deeplab.py +81 -0
- doctra/third_party/docres/data/MBD/model/deep_lab_model/sync_batchnorm/__init__.py +12 -0
- doctra/third_party/docres/data/MBD/model/deep_lab_model/sync_batchnorm/batchnorm.py +282 -0
- doctra/third_party/docres/data/MBD/model/deep_lab_model/sync_batchnorm/comm.py +129 -0
- doctra/third_party/docres/data/MBD/model/deep_lab_model/sync_batchnorm/replicate.py +88 -0
- doctra/third_party/docres/data/MBD/model/deep_lab_model/sync_batchnorm/unittest.py +29 -0
- doctra/third_party/docres/data/preprocess/crop_merge_image.py +142 -0
- doctra/third_party/docres/inference.py +370 -0
- doctra/third_party/docres/models/restormer_arch.py +308 -0
- doctra/third_party/docres/utils.py +464 -0
- doctra/ui/app.py +8 -14
- doctra/utils/structured_utils.py +5 -2
- doctra/version.py +1 -1
- {doctra-0.3.3.dist-info → doctra-0.4.1.dist-info}/METADATA +1 -1
- doctra-0.4.1.dist-info/RECORD +67 -0
- doctra-0.3.3.dist-info/RECORD +0 -44
- {doctra-0.3.3.dist-info → doctra-0.4.1.dist-info}/WHEEL +0 -0
- {doctra-0.3.3.dist-info → doctra-0.4.1.dist-info}/licenses/LICENSE +0 -0
- {doctra-0.3.3.dist-info → doctra-0.4.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,464 @@
|
|
1
|
+
from collections import OrderedDict
|
2
|
+
import os
|
3
|
+
import numpy as np
|
4
|
+
import torch
|
5
|
+
import torch.nn.functional as F
|
6
|
+
import os
|
7
|
+
from skimage.filters import threshold_sauvola
|
8
|
+
import cv2
|
9
|
+
|
10
|
+
def second2hours(seconds):
|
11
|
+
h = seconds//3600
|
12
|
+
seconds %= 3600
|
13
|
+
m = seconds//60
|
14
|
+
seconds %= 60
|
15
|
+
|
16
|
+
hms = '{:d} H : {:d} Min'.format(int(h),int(m))
|
17
|
+
return hms
|
18
|
+
|
19
|
+
|
20
|
+
def dict2string(loss_dict):
|
21
|
+
loss_string = ''
|
22
|
+
for key, value in loss_dict.items():
|
23
|
+
loss_string += key+' {:.4f}, '.format(value)
|
24
|
+
return loss_string[:-2]
|
25
|
+
def mkdir(dir):
|
26
|
+
if not os.path.exists(dir):
|
27
|
+
os.makedirs(dir)
|
28
|
+
|
29
|
+
def convert_state_dict(state_dict):
|
30
|
+
"""Converts a state dict saved from a dataParallel module to normal
|
31
|
+
module state_dict inplace
|
32
|
+
:param state_dict is the loaded DataParallel model_state
|
33
|
+
|
34
|
+
"""
|
35
|
+
new_state_dict = OrderedDict()
|
36
|
+
for k, v in state_dict.items():
|
37
|
+
name = k[7:] # remove `module.`
|
38
|
+
new_state_dict[name] = v
|
39
|
+
return new_state_dict
|
40
|
+
|
41
|
+
|
42
|
+
def get_lr(optimizer):
|
43
|
+
for param_group in optimizer.param_groups:
|
44
|
+
return float(param_group['lr'])
|
45
|
+
|
46
|
+
|
47
|
+
def torch2cvimg(tensor,min=0,max=1):
|
48
|
+
'''
|
49
|
+
input:
|
50
|
+
tensor -> torch.tensor BxCxHxW C can be 1,3
|
51
|
+
return
|
52
|
+
im -> ndarray uint8 HxWxC
|
53
|
+
'''
|
54
|
+
im_list = []
|
55
|
+
for i in range(tensor.shape[0]):
|
56
|
+
im = tensor.detach().cpu().data.numpy()[i]
|
57
|
+
im = im.transpose(1,2,0)
|
58
|
+
im = np.clip(im,min,max)
|
59
|
+
im = ((im-min)/(max-min)*255).astype(np.uint8)
|
60
|
+
im_list.append(im)
|
61
|
+
return im_list
|
62
|
+
def cvimg2torch(img,min=0,max=1):
|
63
|
+
'''
|
64
|
+
input:
|
65
|
+
im -> ndarray uint8 HxWxC
|
66
|
+
return
|
67
|
+
tensor -> torch.tensor BxCxHxW
|
68
|
+
'''
|
69
|
+
img = img.astype(float) / 255.0
|
70
|
+
img = img.transpose(2, 0, 1) # NHWC -> NCHW
|
71
|
+
img = np.expand_dims(img, 0)
|
72
|
+
img = torch.from_numpy(img).float()
|
73
|
+
return img
|
74
|
+
|
75
|
+
|
76
|
+
def setup_seed(seed):
|
77
|
+
# np.random.seed(seed)
|
78
|
+
# random.seed(seed)
|
79
|
+
# torch.manual_seed(seed) #cpu
|
80
|
+
# torch.cuda.manual_seed_all(seed) #并行gpu
|
81
|
+
torch.backends.cudnn.deterministic = True #cpu/gpu结果一致
|
82
|
+
# torch.backends.cudnn.benchmark = False #训练集变化不大时使训练加速
|
83
|
+
|
84
|
+
def SauvolaModBinarization(image,n1=51,n2=51,k1=0.3,k2=0.3,default=True):
|
85
|
+
'''
|
86
|
+
Binarization using Sauvola's algorithm
|
87
|
+
@name : SauvolaModBinarization
|
88
|
+
parameters
|
89
|
+
@param image (numpy array of shape (3/1) of type np.uint8): color or gray scale image
|
90
|
+
optional parameters
|
91
|
+
@param n1 (int) : window size for running sauvola during the first pass
|
92
|
+
@param n2 (int): window size for running sauvola during the second pass
|
93
|
+
@param k1 (float): k value corresponding to sauvola during the first pass
|
94
|
+
@param k2 (float): k value corresponding to sauvola during the second pass
|
95
|
+
@param default (bool) : bollean variable to set the above parameter as default.
|
96
|
+
@param default is set to True : thus default values of the above optional parameters (n1,n2,k1,k2) are set to
|
97
|
+
n1 = 5 % of min(image height, image width)
|
98
|
+
n2 = 10 % of min(image height, image width)
|
99
|
+
k1 = 0.5
|
100
|
+
k2 = 0.5
|
101
|
+
Returns
|
102
|
+
@return A binary image of same size as @param image
|
103
|
+
|
104
|
+
@cite https://drive.google.com/file/d/1D3CyI5vtodPJeZaD2UV5wdcaIMtkBbdZ/view?usp=sharing
|
105
|
+
'''
|
106
|
+
|
107
|
+
if(default):
|
108
|
+
n1 = int(0.05*min(image.shape[0],image.shape[1]))
|
109
|
+
if (n1%2==0):
|
110
|
+
n1 = n1+1
|
111
|
+
n2 = int(0.1*min(image.shape[0],image.shape[1]))
|
112
|
+
if (n2%2==0):
|
113
|
+
n2 = n2+1
|
114
|
+
k1 = 0.5
|
115
|
+
k2 = 0.5
|
116
|
+
if(image.ndim==3):
|
117
|
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
118
|
+
else:
|
119
|
+
gray = np.copy(image)
|
120
|
+
T1 = threshold_sauvola(gray, window_size=n1,k=k1)
|
121
|
+
max_val = np.amax(gray)
|
122
|
+
min_val = np.amin(gray)
|
123
|
+
C = np.copy(T1)
|
124
|
+
C = C.astype(np.float32)
|
125
|
+
C[gray > T1] = (gray[gray > T1] - T1[gray > T1])/(max_val - T1[gray > T1])
|
126
|
+
C[gray <= T1] = 0
|
127
|
+
C = C * 255.0
|
128
|
+
new_in = np.copy(C.astype(np.uint8))
|
129
|
+
T2 = threshold_sauvola(new_in, window_size=n2,k=k2)
|
130
|
+
binary = np.copy(gray)
|
131
|
+
binary[new_in <= T2] = 0
|
132
|
+
binary[new_in > T2] = 255
|
133
|
+
return binary,T2
|
134
|
+
|
135
|
+
|
136
|
+
def getBasecoord(h,w):
|
137
|
+
base_coord0 = np.tile(np.arange(h).reshape(h,1),(1,w)).astype(np.float32)
|
138
|
+
base_coord1 = np.tile(np.arange(w).reshape(1,w),(h,1)).astype(np.float32)
|
139
|
+
base_coord = np.concatenate((np.expand_dims(base_coord1,-1),np.expand_dims(base_coord0,-1)),-1)
|
140
|
+
return base_coord
|
141
|
+
|
142
|
+
|
143
|
+
|
144
|
+
|
145
|
+
|
146
|
+
|
147
|
+
import numpy as np
|
148
|
+
from scipy import ndimage as ndi
|
149
|
+
|
150
|
+
# lookup tables for bwmorph_thin
|
151
|
+
|
152
|
+
G123_LUT = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1,
|
153
|
+
0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
154
|
+
0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0,
|
155
|
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0,
|
156
|
+
1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,
|
157
|
+
0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
158
|
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
159
|
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
160
|
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
161
|
+
0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0,
|
162
|
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1,
|
163
|
+
0, 0, 0], dtype=np.bool)
|
164
|
+
|
165
|
+
G123P_LUT = np.array([0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0,
|
166
|
+
0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
167
|
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,
|
168
|
+
1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
169
|
+
0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
170
|
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0,
|
171
|
+
0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,
|
172
|
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
173
|
+
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0,
|
174
|
+
1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1,
|
175
|
+
0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
176
|
+
0, 0, 0], dtype=np.bool)
|
177
|
+
|
178
|
+
def bwmorph(image, n_iter=None):
|
179
|
+
"""
|
180
|
+
Perform morphological thinning of a binary image
|
181
|
+
|
182
|
+
Parameters
|
183
|
+
----------
|
184
|
+
image : binary (M, N) ndarray
|
185
|
+
The image to be thinned.
|
186
|
+
|
187
|
+
n_iter : int, number of iterations, optional
|
188
|
+
Regardless of the value of this parameter, the thinned image
|
189
|
+
is returned immediately if an iteration produces no change.
|
190
|
+
If this parameter is specified it thus sets an upper bound on
|
191
|
+
the number of iterations performed.
|
192
|
+
|
193
|
+
Returns
|
194
|
+
-------
|
195
|
+
out : ndarray of bools
|
196
|
+
Thinned image.
|
197
|
+
|
198
|
+
See also
|
199
|
+
--------
|
200
|
+
skeletonize
|
201
|
+
|
202
|
+
Notes
|
203
|
+
-----
|
204
|
+
This algorithm [1]_ works by making multiple passes over the image,
|
205
|
+
removing pixels matching a set of criteria designed to thin
|
206
|
+
connected regions while preserving eight-connected components and
|
207
|
+
2 x 2 squares [2]_. In each of the two sub-iterations the algorithm
|
208
|
+
correlates the intermediate skeleton image with a neighborhood mask,
|
209
|
+
then looks up each neighborhood in a lookup table indicating whether
|
210
|
+
the central pixel should be deleted in that sub-iteration.
|
211
|
+
|
212
|
+
References
|
213
|
+
----------
|
214
|
+
.. [1] Z. Guo and R. W. Hall, "Parallel thinning with
|
215
|
+
two-subiteration algorithms," Comm. ACM, vol. 32, no. 3,
|
216
|
+
pp. 359-373, 1989.
|
217
|
+
.. [2] Lam, L., Seong-Whan Lee, and Ching Y. Suen, "Thinning
|
218
|
+
Methodologies-A Comprehensive Survey," IEEE Transactions on
|
219
|
+
Pattern Analysis and Machine Intelligence, Vol 14, No. 9,
|
220
|
+
September 1992, p. 879
|
221
|
+
|
222
|
+
Examples
|
223
|
+
--------
|
224
|
+
>>> square = np.zeros((7, 7), dtype=np.uint8)
|
225
|
+
>>> square[1:-1, 2:-2] = 1
|
226
|
+
>>> square[0,1] = 1
|
227
|
+
>>> square
|
228
|
+
array([[0, 1, 0, 0, 0, 0, 0],
|
229
|
+
[0, 0, 1, 1, 1, 0, 0],
|
230
|
+
[0, 0, 1, 1, 1, 0, 0],
|
231
|
+
[0, 0, 1, 1, 1, 0, 0],
|
232
|
+
[0, 0, 1, 1, 1, 0, 0],
|
233
|
+
[0, 0, 1, 1, 1, 0, 0],
|
234
|
+
[0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
|
235
|
+
>>> skel = bwmorph_thin(square)
|
236
|
+
>>> skel.astype(np.uint8)
|
237
|
+
array([[0, 1, 0, 0, 0, 0, 0],
|
238
|
+
[0, 0, 1, 0, 0, 0, 0],
|
239
|
+
[0, 0, 0, 1, 0, 0, 0],
|
240
|
+
[0, 0, 0, 1, 0, 0, 0],
|
241
|
+
[0, 0, 0, 1, 0, 0, 0],
|
242
|
+
[0, 0, 0, 0, 0, 0, 0],
|
243
|
+
[0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
|
244
|
+
"""
|
245
|
+
# check parameters
|
246
|
+
if n_iter is None:
|
247
|
+
n = -1
|
248
|
+
elif n_iter <= 0:
|
249
|
+
raise ValueError('n_iter must be > 0')
|
250
|
+
else:
|
251
|
+
n = n_iter
|
252
|
+
|
253
|
+
# check that we have a 2d binary image, and convert it
|
254
|
+
# to uint8
|
255
|
+
skel = np.array(image).astype(np.uint8)
|
256
|
+
|
257
|
+
if skel.ndim != 2:
|
258
|
+
raise ValueError('2D array required')
|
259
|
+
if not np.all(np.in1d(image.flat,(0,1))):
|
260
|
+
raise ValueError('Image contains values other than 0 and 1')
|
261
|
+
|
262
|
+
# neighborhood mask
|
263
|
+
mask = np.array([[ 8, 4, 2],
|
264
|
+
[16, 0, 1],
|
265
|
+
[32, 64,128]],dtype=np.uint8)
|
266
|
+
|
267
|
+
# iterate either 1) indefinitely or 2) up to iteration limit
|
268
|
+
while n != 0:
|
269
|
+
before = np.sum(skel) # count points before thinning
|
270
|
+
|
271
|
+
# for each subiteration
|
272
|
+
for lut in [G123_LUT, G123P_LUT]:
|
273
|
+
# correlate image with neighborhood mask
|
274
|
+
N = ndi.correlate(skel, mask, mode='constant')
|
275
|
+
# take deletion decision from this subiteration's LUT
|
276
|
+
D = np.take(lut, N)
|
277
|
+
# perform deletion
|
278
|
+
skel[D] = 0
|
279
|
+
|
280
|
+
after = np.sum(skel) # coint points after thinning
|
281
|
+
|
282
|
+
if before == after:
|
283
|
+
# iteration had no effect: finish
|
284
|
+
break
|
285
|
+
|
286
|
+
# count down to iteration limit (or endlessly negative)
|
287
|
+
n -= 1
|
288
|
+
|
289
|
+
return skel.astype(np.bool)
|
290
|
+
|
291
|
+
"""
|
292
|
+
# here's how to make the LUTs
|
293
|
+
def nabe(n):
|
294
|
+
return np.array([n>>i&1 for i in range(0,9)]).astype(np.bool)
|
295
|
+
def hood(n):
|
296
|
+
return np.take(nabe(n), np.array([[3, 2, 1],
|
297
|
+
[4, 8, 0],
|
298
|
+
[5, 6, 7]]))
|
299
|
+
def G1(n):
|
300
|
+
s = 0
|
301
|
+
bits = nabe(n)
|
302
|
+
for i in (0,2,4,6):
|
303
|
+
if not(bits[i]) and (bits[i+1] or bits[(i+2) % 8]):
|
304
|
+
s += 1
|
305
|
+
return s==1
|
306
|
+
|
307
|
+
g1_lut = np.array([G1(n) for n in range(256)])
|
308
|
+
def G2(n):
|
309
|
+
n1, n2 = 0, 0
|
310
|
+
bits = nabe(n)
|
311
|
+
for k in (1,3,5,7):
|
312
|
+
if bits[k] or bits[k-1]:
|
313
|
+
n1 += 1
|
314
|
+
if bits[k] or bits[(k+1) % 8]:
|
315
|
+
n2 += 1
|
316
|
+
return min(n1,n2) in [2,3]
|
317
|
+
g2_lut = np.array([G2(n) for n in range(256)])
|
318
|
+
g12_lut = g1_lut & g2_lut
|
319
|
+
def G3(n):
|
320
|
+
bits = nabe(n)
|
321
|
+
return not((bits[1] or bits[2] or not(bits[7])) and bits[0])
|
322
|
+
def G3p(n):
|
323
|
+
bits = nabe(n)
|
324
|
+
return not((bits[5] or bits[6] or not(bits[3])) and bits[4])
|
325
|
+
g3_lut = np.array([G3(n) for n in range(256)])
|
326
|
+
g3p_lut = np.array([G3p(n) for n in range(256)])
|
327
|
+
g123_lut = g12_lut & g3_lut
|
328
|
+
g123p_lut = g12_lut & g3p_lut
|
329
|
+
"""
|
330
|
+
|
331
|
+
"""
|
332
|
+
author : Peb Ruswono Aryan
|
333
|
+
|
334
|
+
metric for evaluating binarization algorithms
|
335
|
+
implemented :
|
336
|
+
|
337
|
+
* F-Measure
|
338
|
+
* pseudo F-Measure (as in H-DIBCO 2010 & 2012)
|
339
|
+
* Peak Signal to Noise Ratio (PSNR)
|
340
|
+
* Negative Rate Measure (NRM)
|
341
|
+
* Misclassification Penaltiy Measure (MPM)
|
342
|
+
* Distance Reciprocal Distortion (DRD)
|
343
|
+
|
344
|
+
usage:
|
345
|
+
python metric.py test-image.png ground-truth-image.png
|
346
|
+
"""
|
347
|
+
|
348
|
+
|
349
|
+
def drd_fn(im, im_gt):
|
350
|
+
height, width = im.shape
|
351
|
+
neg = np.zeros(im.shape)
|
352
|
+
neg[im_gt!=im] = 1
|
353
|
+
y, x = np.unravel_index(np.flatnonzero(neg), im.shape)
|
354
|
+
|
355
|
+
n = 2
|
356
|
+
m = n*2+1
|
357
|
+
W = np.zeros((m,m), dtype=np.uint8)
|
358
|
+
W[n,n] = 1.
|
359
|
+
W = cv2.distanceTransform(1-W, cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
|
360
|
+
W[n,n] = 1.
|
361
|
+
W = 1./W
|
362
|
+
W[n,n] = 0.
|
363
|
+
W /= W.sum()
|
364
|
+
|
365
|
+
nubn = 0.
|
366
|
+
block_size = 8
|
367
|
+
for y1 in range(0, height, block_size):
|
368
|
+
for x1 in range(0, width, block_size):
|
369
|
+
y2 = min(y1+block_size-1,height-1)
|
370
|
+
x2 = min(x1+block_size-1,width-1)
|
371
|
+
block_dim = (x2-x1+1)*(y1-y1+1)
|
372
|
+
block = 1-im_gt[y1:y2, x1:x2]
|
373
|
+
block_sum = np.sum(block)
|
374
|
+
if block_sum>0 and block_sum<block_dim:
|
375
|
+
nubn += 1
|
376
|
+
|
377
|
+
drd_sum= 0.
|
378
|
+
tmp = np.zeros(W.shape)
|
379
|
+
for i in range(min(1,len(y))):
|
380
|
+
tmp[:,:] = 0
|
381
|
+
|
382
|
+
x1 = max(0, x[i]-n)
|
383
|
+
y1 = max(0, y[i]-n)
|
384
|
+
x2 = min(width-1, x[i]+n)
|
385
|
+
y2 = min(height-1, y[i]+n)
|
386
|
+
|
387
|
+
yy1 = y1-y[i]+n
|
388
|
+
yy2 = y2-y[i]+n
|
389
|
+
xx1 = x1-x[i]+n
|
390
|
+
xx2 = x2-x[i]+n
|
391
|
+
|
392
|
+
tmp[yy1:yy2+1,xx1:xx2+1] = np.abs(im[y[i],x[i]]-im_gt[y1:y2+1,x1:x2+1])
|
393
|
+
tmp *= W
|
394
|
+
|
395
|
+
drd_sum += np.sum(tmp)
|
396
|
+
return drd_sum/nubn
|
397
|
+
|
398
|
+
def bin_metric(im,im_gt):
|
399
|
+
height, width = im.shape
|
400
|
+
npixel = height*width
|
401
|
+
|
402
|
+
im[im>0] = 1
|
403
|
+
gt_mask = im_gt==0
|
404
|
+
im_gt[im_gt>0] = 1
|
405
|
+
|
406
|
+
sk = bwmorph(1-im_gt)
|
407
|
+
im_sk = np.ones(im_gt.shape)
|
408
|
+
im_sk[sk] = 0
|
409
|
+
|
410
|
+
kernel = np.ones((3,3), dtype=np.uint8)
|
411
|
+
im_dil = cv2.erode(im_gt, kernel)
|
412
|
+
im_gtb = im_gt-im_dil
|
413
|
+
im_gtbd = cv2.distanceTransform(1-im_gtb, cv2.DIST_L2, 3)
|
414
|
+
|
415
|
+
nd = im_gtbd.sum()
|
416
|
+
|
417
|
+
ptp = np.zeros(im_gt.shape)
|
418
|
+
ptp[(im==0) & (im_sk==0)] = 1
|
419
|
+
numptp = ptp.sum()
|
420
|
+
|
421
|
+
tp = np.zeros(im_gt.shape)
|
422
|
+
tp[(im==0) & (im_gt==0)] = 1
|
423
|
+
numtp = tp.sum()
|
424
|
+
|
425
|
+
tn = np.zeros(im_gt.shape)
|
426
|
+
tn[(im==1) & (im_gt==1)] = 1
|
427
|
+
numtn = tn.sum()
|
428
|
+
|
429
|
+
fp = np.zeros(im_gt.shape)
|
430
|
+
fp[(im==0) & (im_gt==1)] = 1
|
431
|
+
numfp = fp.sum()
|
432
|
+
|
433
|
+
fn = np.zeros(im_gt.shape)
|
434
|
+
fn[(im==1) & (im_gt==0)] = 1
|
435
|
+
numfn = fn.sum()
|
436
|
+
|
437
|
+
precision = numtp / (numtp + numfp)
|
438
|
+
recall = numtp / (numtp + numfn)
|
439
|
+
precall = numptp / np.sum(1-im_sk)
|
440
|
+
fmeasure = (2*recall*precision)/(recall+precision)
|
441
|
+
pfmeasure = (2*precall*precision)/(precall+precision)
|
442
|
+
|
443
|
+
mse = (numfp+numfn)/npixel
|
444
|
+
psnr = 10.*np.log10(1./mse)
|
445
|
+
|
446
|
+
nrfn = numfn / (numfn + numtp)
|
447
|
+
nrfp = numfp / (numfp + numtn)
|
448
|
+
nrm = (nrfn + nrfp)/2
|
449
|
+
|
450
|
+
im_dn = im_gtbd.copy()
|
451
|
+
im_dn[fn==0] = 0
|
452
|
+
dn = np.sum(im_dn)
|
453
|
+
mpfn = dn / nd
|
454
|
+
|
455
|
+
im_dp = im_gtbd.copy()
|
456
|
+
im_dp[fp==0] = 0
|
457
|
+
dp = np.sum(im_dp)
|
458
|
+
mpfp = dp / nd
|
459
|
+
|
460
|
+
mpm = (mpfp + mpfn) / 2
|
461
|
+
drd = drd_fn(im, im_gt)
|
462
|
+
|
463
|
+
return fmeasure, pfmeasure,psnr,nrm, mpm,drd
|
464
|
+
# print("F-measure\t: {0}\npF-measure\t: {1}\nPSNR\t\t: {2}\nNRM\t\t: {3}\nMPM\t\t: {4}\nDRD\t\t: {5}".format(fmeasure, pfmeasure, psnr, nrm, mpm, drd))
|
doctra/ui/app.py
CHANGED
@@ -2,6 +2,11 @@ import os
|
|
2
2
|
import shutil
|
3
3
|
import tempfile
|
4
4
|
import re
|
5
|
+
import traceback
|
6
|
+
import pandas as pd
|
7
|
+
import html as _html
|
8
|
+
import base64
|
9
|
+
import json
|
5
10
|
from pathlib import Path
|
6
11
|
from typing import Optional, Tuple, List, Dict, Any
|
7
12
|
|
@@ -9,6 +14,7 @@ import gradio as gr
|
|
9
14
|
|
10
15
|
from doctra.parsers.structured_pdf_parser import StructuredPDFParser
|
11
16
|
from doctra.parsers.table_chart_extractor import ChartTablePDFParser
|
17
|
+
from doctra.utils.pdf_io import render_pdf_to_images
|
12
18
|
|
13
19
|
|
14
20
|
def _gather_outputs(out_dir: Path, allowed_kinds: Optional[List[str]] = None, zip_filename: Optional[str] = None, is_structured_parsing: bool = False) -> Tuple[List[tuple[str, str]], List[str], str]:
|
@@ -100,7 +106,6 @@ def _parse_markdown_by_pages(md_content: str) -> List[Dict[str, Any]]:
|
|
100
106
|
Parse markdown content and organize it by pages.
|
101
107
|
Returns a list of page dictionaries with content, tables, charts, and figures.
|
102
108
|
"""
|
103
|
-
import re
|
104
109
|
|
105
110
|
pages = []
|
106
111
|
current_page = None
|
@@ -209,7 +214,6 @@ def run_full_parse(
|
|
209
214
|
try:
|
210
215
|
parser.parse(str(input_pdf))
|
211
216
|
except Exception as e:
|
212
|
-
import traceback
|
213
217
|
traceback.print_exc()
|
214
218
|
# Safely encode error message for return value
|
215
219
|
try:
|
@@ -325,8 +329,6 @@ def run_extract(
|
|
325
329
|
if excel_filename:
|
326
330
|
excel_path = out_dir / excel_filename
|
327
331
|
if excel_path.exists():
|
328
|
-
import pandas as pd
|
329
|
-
import html as _html
|
330
332
|
|
331
333
|
# Read Excel file and create HTML tables
|
332
334
|
xl_file = pd.ExcelFile(excel_path)
|
@@ -489,7 +491,6 @@ def build_demo() -> gr.Blocks:
|
|
489
491
|
|
490
492
|
def parse_markdown_by_pages(md_content: str):
|
491
493
|
"""Parse markdown content and organize it by pages."""
|
492
|
-
import re
|
493
494
|
|
494
495
|
pages = []
|
495
496
|
current_page = None
|
@@ -548,7 +549,6 @@ def build_demo() -> gr.Blocks:
|
|
548
549
|
return "Page not found", None
|
549
550
|
|
550
551
|
# Build HTML with inline base64 images, render markdown tables, and preserve paragraphs/line breaks
|
551
|
-
import html as _html, base64, re as _re
|
552
552
|
base_dir = None
|
553
553
|
try:
|
554
554
|
stem = Path(pdf_path).stem if pdf_path else ""
|
@@ -589,7 +589,7 @@ def build_demo() -> gr.Blocks:
|
|
589
589
|
stripped = line.strip()
|
590
590
|
if stripped.startswith(':
|
591
591
|
flush_paragraph()
|
592
|
-
match =
|
592
|
+
match = re.match(r'!\[([^\]]+)\]\(([^)]+)\)', stripped)
|
593
593
|
if match and base_dir is not None:
|
594
594
|
caption = match.group(1)
|
595
595
|
rel_path = match.group(2).replace('\\\\', '/').replace('\\', '/').lstrip('/')
|
@@ -646,7 +646,6 @@ def build_demo() -> gr.Blocks:
|
|
646
646
|
# Ensure page images are prepared
|
647
647
|
try:
|
648
648
|
if pdf_path and not page_images:
|
649
|
-
from doctra.utils.pdf_io import render_pdf_to_images
|
650
649
|
tmp_img_dir = Path(tempfile.mkdtemp(prefix="doctra_pages_"))
|
651
650
|
pil_pages = render_pdf_to_images(pdf_path)
|
652
651
|
saved_paths: List[str] = []
|
@@ -726,7 +725,6 @@ def build_demo() -> gr.Blocks:
|
|
726
725
|
for page in pages_data:
|
727
726
|
for line in page['content']:
|
728
727
|
if line.strip().startswith(':
|
729
|
-
import re
|
730
728
|
match = re.match(r'!\[([^\]]+)\]\(([^)]+)\)', line.strip())
|
731
729
|
if match:
|
732
730
|
caption = match.group(1)
|
@@ -745,7 +743,6 @@ def build_demo() -> gr.Blocks:
|
|
745
743
|
saved_paths: List[str] = []
|
746
744
|
try:
|
747
745
|
if input_pdf_path:
|
748
|
-
from doctra.utils.pdf_io import render_pdf_to_images
|
749
746
|
tmp_img_dir = Path(tempfile.mkdtemp(prefix="doctra_pages_"))
|
750
747
|
pil_pages = render_pdf_to_images(input_pdf_path)
|
751
748
|
for idx, (im, _, _) in enumerate(pil_pages, start=1):
|
@@ -759,7 +756,6 @@ def build_demo() -> gr.Blocks:
|
|
759
756
|
|
760
757
|
# Build initial HTML with inline images and proper blocks for first page
|
761
758
|
if pages_data:
|
762
|
-
import html as _html, base64, re as _re
|
763
759
|
base_dir = None
|
764
760
|
try:
|
765
761
|
stem = Path(input_pdf_path).stem if input_pdf_path else ""
|
@@ -771,7 +767,7 @@ def build_demo() -> gr.Blocks:
|
|
771
767
|
for raw_line in pages_data[0]['content']:
|
772
768
|
line = raw_line.strip()
|
773
769
|
if line.startswith(':
|
774
|
-
match =
|
770
|
+
match = re.match(r'!\[([^\]]+)\]\(([^)]+)\)', line)
|
775
771
|
if match and base_dir is not None:
|
776
772
|
caption = match.group(1)
|
777
773
|
rel_path = match.group(2).replace('\\\\', '/').replace('\\', '/').lstrip('/')
|
@@ -874,7 +870,6 @@ def build_demo() -> gr.Blocks:
|
|
874
870
|
if not mapping.exists():
|
875
871
|
return gr.Dropdown(choices=[], value=None, visible=False)
|
876
872
|
|
877
|
-
import json
|
878
873
|
data = json.loads(mapping.read_text(encoding="utf-8"))
|
879
874
|
choices = []
|
880
875
|
|
@@ -902,7 +897,6 @@ def build_demo() -> gr.Blocks:
|
|
902
897
|
if not mapping.exists():
|
903
898
|
return "", None
|
904
899
|
|
905
|
-
import json, html as _html
|
906
900
|
data = json.loads(mapping.read_text(encoding="utf-8"))
|
907
901
|
|
908
902
|
for entry in data:
|
doctra/utils/structured_utils.py
CHANGED
@@ -14,7 +14,7 @@ def to_structured_dict(obj: Any) -> Optional[Dict[str, Any]]:
|
|
14
14
|
- JSON string
|
15
15
|
- dict
|
16
16
|
- Pydantic BaseModel (v1 .dict() or v2 .model_dump())
|
17
|
-
Returns a normalized dict with keys: title, headers, rows — or None.
|
17
|
+
Returns a normalized dict with keys: title, description, headers, rows, page, type — or None.
|
18
18
|
"""
|
19
19
|
if obj is None:
|
20
20
|
return None
|
@@ -36,10 +36,13 @@ def to_structured_dict(obj: Any) -> Optional[Dict[str, Any]]:
|
|
36
36
|
|
37
37
|
if isinstance(obj, dict):
|
38
38
|
title = obj.get("title") or "Untitled"
|
39
|
+
description = obj.get("description") or ""
|
39
40
|
headers = obj.get("headers") or []
|
40
41
|
rows = obj.get("rows") or []
|
42
|
+
page = obj.get("page", "Unknown")
|
43
|
+
item_type = obj.get("type", "Table")
|
41
44
|
if not isinstance(headers, list) or not isinstance(rows, list):
|
42
45
|
return None
|
43
|
-
return {"title": title, "headers": headers, "rows": rows}
|
46
|
+
return {"title": title, "description": description, "headers": headers, "rows": rows, "page": page, "type": item_type}
|
44
47
|
|
45
48
|
return None
|
doctra/version.py
CHANGED
@@ -1,2 +1,2 @@
|
|
1
1
|
"""Version information for Doctra."""
|
2
|
-
__version__ = '0.
|
2
|
+
__version__ = '0.4.1'
|
@@ -0,0 +1,67 @@
|
|
1
|
+
doctra/__init__.py,sha256=rNLCyODOpaPb_TTP6qmQnuWZJW9JPXrxg1IfKnvb1No,773
|
2
|
+
doctra/version.py,sha256=gJX4jQdS3czcKE2h1k17fJPgWzxHyGH2oFP9nW9cTLw,62
|
3
|
+
doctra/cli/__init__.py,sha256=4PTujjYRShOOUlZ7PwuWckShPWLC4v4CYIhJpzgyv1k,911
|
4
|
+
doctra/cli/main.py,sha256=_gvG8bm-Mn1tIEw6eJUgqz9dYEo9klXGiJDJzjqgPyo,43503
|
5
|
+
doctra/cli/utils.py,sha256=w3Bxyzczcbl_cs1Cea8C3ehv7dkGl_wecprYZXrcGhk,11772
|
6
|
+
doctra/engines/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
|
+
doctra/engines/image_restoration/__init__.py,sha256=vzcN6Rw7_U-5jIK2pdo2NlgqdLdXDShigrOGM7QLNEE,263
|
8
|
+
doctra/engines/image_restoration/docres_engine.py,sha256=n9Pr0R7dbu_UHv51urGv_wC6ZYW-43bmXxiyTCOEOMo,21612
|
9
|
+
doctra/engines/layout/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
|
+
doctra/engines/layout/layout_models.py,sha256=vuTzjWd3FD-SkFPngktmUVhOJ6Xvff6ufwFEq796PQs,3162
|
11
|
+
doctra/engines/layout/paddle_layout.py,sha256=P2-Gk8wHpWoA5Jpmo_3OLI59zWq3HeAOBOUKKVdXu8I,6792
|
12
|
+
doctra/engines/ocr/__init__.py,sha256=h6bFiveGXdI59fsKzCqOXki3C74DCndEmvloOtMqnR0,133
|
13
|
+
doctra/engines/ocr/api.py,sha256=YOBKDLExXpvSiOsc_TDJasaMPxzdVx1llQCtYlsruWo,1280
|
14
|
+
doctra/engines/ocr/path_resolver.py,sha256=2_7Nsekt3dCDU3oVsgdr62iMrlAhbGNfYwgh4G7S3pA,1492
|
15
|
+
doctra/engines/ocr/pytesseract_engine.py,sha256=Imz2uwju6himkBiS8CH7DLxBRe-LtmMYZiOdb_6PoQw,2911
|
16
|
+
doctra/engines/vlm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
17
|
+
doctra/engines/vlm/outlines_types.py,sha256=fQK6ru7XiXHaa8JPpaTTBaTk_zQ93ZyhFp4SyAnUdVU,1337
|
18
|
+
doctra/engines/vlm/provider.py,sha256=aE8Eo1U-8XqAimakNlT0-T4etIyCV8rZ3DwxdqbFeTc,3131
|
19
|
+
doctra/engines/vlm/service.py,sha256=nygxMe7uTq6Bv70ycBPL59F2a0ESp1Hix4j833p6rUM,4343
|
20
|
+
doctra/exporters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
21
|
+
doctra/exporters/excel_writer.py,sha256=rwyqlH73P7z413BELovQY_pS6IMkkqHEho6mbPrJ2Sk,11857
|
22
|
+
doctra/exporters/html_writer.py,sha256=OlW24Eg5bZcjldRHtd3GDD7RrajuRXj43EJpXIJkYf8,38810
|
23
|
+
doctra/exporters/image_saver.py,sha256=zsPoQ0CwoE643ui4iZMdXk96kv5mU8L_zC2JfF22N1A,1639
|
24
|
+
doctra/exporters/markdown_table.py,sha256=4_OJIwG_WoIPYBzJx1njy_3tNVdkK6QKSP-P9r-b0zw,2030
|
25
|
+
doctra/exporters/markdown_writer.py,sha256=L7EjF2MB8jYX7XkZ3a3NeeEC8gnb0qzRPTzIN9tdfuw,1027
|
26
|
+
doctra/parsers/__init__.py,sha256=8M6LVzcWGpuTIK_1SMXML3ll7zK1CTHXGI5qXvqdm-A,206
|
27
|
+
doctra/parsers/enhanced_pdf_parser.py,sha256=NBBopYdSIHWd_O96J0qR3DqZvbAt3CfK1hwUkXu8540,18377
|
28
|
+
doctra/parsers/layout_order.py,sha256=W6b-T11H907RZ2FaZwNvnYhmvH11rpUzxC5yLkdf28k,640
|
29
|
+
doctra/parsers/structured_pdf_parser.py,sha256=AU6yLW2kpd8bxZjelmm73L4CVBysnVAdKxwPkTV1Fzk,19602
|
30
|
+
doctra/parsers/table_chart_extractor.py,sha256=ePmk9m9n-mvkqOvxpWC42ElxbnKMmDnq-e6SWiNqgzA,14195
|
31
|
+
doctra/third_party/docres/inference.py,sha256=krD5EQDiqki-5uTMqqHYivhL38sfSOhYgaihI751070,13576
|
32
|
+
doctra/third_party/docres/utils.py,sha256=N0ZVmOTB3wsinFlYu5hT84C4_MhWGdc98T8LTG-S9dA,14566
|
33
|
+
doctra/third_party/docres/data/MBD/MBD.py,sha256=-d6cVQX1FVcGmQ_yJ5l-PQ3xKmkmveQQBytZ38pEGfY,4653
|
34
|
+
doctra/third_party/docres/data/MBD/MBD_utils.py,sha256=z8La4F-yglk6fh8KraBUom8QXJLseYden7OBmFtoO7g,11783
|
35
|
+
doctra/third_party/docres/data/MBD/infer.py,sha256=xm9ZCnLJLV5TxJjAH-ith7pSFI18J_8CEUpcYCFDLu8,6205
|
36
|
+
doctra/third_party/docres/data/MBD/model/deep_lab_model/aspp.py,sha256=IdVcmhl88N6vFe2bnr-G4CEtbUxLSfhKa9T-Wy60otM,3708
|
37
|
+
doctra/third_party/docres/data/MBD/model/deep_lab_model/decoder.py,sha256=5eQ9FT-J1Bd-CmA2dYXvVo_PjrpEy-RmSjESLpNReBA,2435
|
38
|
+
doctra/third_party/docres/data/MBD/model/deep_lab_model/deeplab.py,sha256=u8pNRvuBzZoDhlr8IsxPzjN2-sMBN3ZnyBFydS9aGj8,3111
|
39
|
+
doctra/third_party/docres/data/MBD/model/deep_lab_model/backbone/__init__.py,sha256=TsynqBTJgmodrSTDqlKlbYdnYkJJd19lsDrpR-3XMno,539
|
40
|
+
doctra/third_party/docres/data/MBD/model/deep_lab_model/backbone/drn.py,sha256=M3kHjU4stdY7B-urpkmEmw-aTJ1dleOGXNOqGv2JrxU,15034
|
41
|
+
doctra/third_party/docres/data/MBD/model/deep_lab_model/backbone/mobilenet.py,sha256=yBqu1LLs7M_5NIztnC8alB42E2xT59pvziIFhYNrMkY,5553
|
42
|
+
doctra/third_party/docres/data/MBD/model/deep_lab_model/backbone/resnet.py,sha256=W53_VRAlrkY743s_Kpt9MLP3cFVJ0zYVW2J7EBhUQW4,6704
|
43
|
+
doctra/third_party/docres/data/MBD/model/deep_lab_model/backbone/xception.py,sha256=HupHNVeeWNsiX8tWy8lQ0ZPUtrf_AaO7rwf_1xV_k0I,11854
|
44
|
+
doctra/third_party/docres/data/MBD/model/deep_lab_model/sync_batchnorm/__init__.py,sha256=LHo9Qoia89mwgGImciJALSiMjdzpFHWLwXEi52S6aRg,458
|
45
|
+
doctra/third_party/docres/data/MBD/model/deep_lab_model/sync_batchnorm/batchnorm.py,sha256=62Ku4DciMwETTxyjFFPxDB8-UJl53Vi3MCtb82lbfkA,13213
|
46
|
+
doctra/third_party/docres/data/MBD/model/deep_lab_model/sync_batchnorm/comm.py,sha256=bGjH2iEqQXOfXmuzm3zzGuB1Ocui6N79Nd8rVGgLqag,4569
|
47
|
+
doctra/third_party/docres/data/MBD/model/deep_lab_model/sync_batchnorm/replicate.py,sha256=idizPejwQvHo6W38VoBMcBF3kb7_zlrRvPBqg2Pi-Ko,3305
|
48
|
+
doctra/third_party/docres/data/MBD/model/deep_lab_model/sync_batchnorm/unittest.py,sha256=Ub3j1dz25V09d6wL5lpwR4NNQ4cktOe4mzzpOP-jLd4,863
|
49
|
+
doctra/third_party/docres/data/preprocess/crop_merge_image.py,sha256=f2NANY92s6IQ1hl1MAXfftFPIyIrj24O4TONjg7SXEc,4747
|
50
|
+
doctra/third_party/docres/models/restormer_arch.py,sha256=BSwv_odCcp4HUZj3gv21e4IzFRBiyk8FjKAO8kF4YS8,12510
|
51
|
+
doctra/ui/__init__.py,sha256=XzOOKeGSBnUREuDQiCIWds1asFSa2nypFQTJXwclROA,85
|
52
|
+
doctra/ui/app.py,sha256=iFSAVZacL7iHB1SHhcUzperJGNQVWqUhvOYdlgjjt50,43623
|
53
|
+
doctra/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
54
|
+
doctra/utils/bbox.py,sha256=R2-95p0KiWvet3TH27TQVvCar7WJg6z0u3L21iEDF-A,674
|
55
|
+
doctra/utils/constants.py,sha256=ZWOvNDrvETbQ_pxHiX7vUW4J5Oj8_qnov0QacUOBizI,189
|
56
|
+
doctra/utils/file_ops.py,sha256=3IS0EQncs6Kaj27fcg2zxQX3xRSvtItIsyKGLYgeOgw,815
|
57
|
+
doctra/utils/io_utils.py,sha256=L1bWV4-ybs2j_3ZEN7GfQVgdC73JKVECVnpwKbP0dy0,219
|
58
|
+
doctra/utils/ocr_utils.py,sha256=Doa1uYBg3kRgRYd2aPq9fICHgHfrM_efdhZfI7jl6OM,780
|
59
|
+
doctra/utils/pdf_io.py,sha256=c8EY47Z1iqVtlLFHS_n0qGuXJ5ERFaMUd84ivXV0b9E,706
|
60
|
+
doctra/utils/progress.py,sha256=IKQ_YErWSEd4hddYMUiCORy0_kW4TOYJM891HUEq2_E,11901
|
61
|
+
doctra/utils/quiet.py,sha256=5XPS-1CtJ0sVk6qgSQctdhr_wR8mP1xoJLoUbmkXROA,387
|
62
|
+
doctra/utils/structured_utils.py,sha256=vU84dsD8wIlTyMsA9hitorGH-eroQiVuWEpBTQBUT24,1478
|
63
|
+
doctra-0.4.1.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
64
|
+
doctra-0.4.1.dist-info/METADATA,sha256=wXduiq7VJS5vf-TXdxpYFCKGfPyGYr5jGK0mwH3OjUw,28298
|
65
|
+
doctra-0.4.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
66
|
+
doctra-0.4.1.dist-info/top_level.txt,sha256=jI7E8jHci2gP9y0GYaWxlg9jG0O5n3FjHJJPLXDXMds,7
|
67
|
+
doctra-0.4.1.dist-info/RECORD,,
|