magic-pdf 0.5.9__py3-none-any.whl → 0.5.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- magic_pdf/cli/magicpdf.py +9 -10
- magic_pdf/libs/version.py +1 -1
- magic_pdf/para/para_split_v2.py +43 -28
- magic_pdf/pdf_parse_by_ocr.py +18 -219
- magic_pdf/pdf_parse_by_txt.py +10 -401
- magic_pdf/pre_proc/equations_replace.py +1 -1
- magic_pdf/user_api.py +3 -41
- {magic_pdf-0.5.9.dist-info → magic_pdf-0.5.10.dist-info}/METADATA +44 -7
- {magic_pdf-0.5.9.dist-info → magic_pdf-0.5.10.dist-info}/RECORD +13 -15
- magic_pdf/pdf_parse_by_ocr_v2.py +0 -17
- magic_pdf/pdf_parse_by_txt_v2.py +0 -56
- {magic_pdf-0.5.9.dist-info → magic_pdf-0.5.10.dist-info}/LICENSE.md +0 -0
- {magic_pdf-0.5.9.dist-info → magic_pdf-0.5.10.dist-info}/WHEEL +0 -0
- {magic_pdf-0.5.9.dist-info → magic_pdf-0.5.10.dist-info}/entry_points.txt +0 -0
- {magic_pdf-0.5.9.dist-info → magic_pdf-0.5.10.dist-info}/top_level.txt +0 -0
magic_pdf/cli/magicpdf.py
CHANGED
@@ -17,8 +17,8 @@
|
|
17
17
|
|
18
18
|
|
19
19
|
效果:
|
20
|
-
python magicpdf.py --json s3://llm-pdf-text/scihub/xxxx.json?bytes=0,81350
|
21
|
-
python magicpdf.py --pdf /home/llm/Downloads/xxxx.pdf --model /home/llm/Downloads/xxxx.json 或者 python magicpdf.py --pdf /home/llm/Downloads/xxxx.pdf
|
20
|
+
python magicpdf.py json-command --json s3://llm-pdf-text/scihub/xxxx.json?bytes=0,81350
|
21
|
+
python magicpdf.py pdf-command --pdf /home/llm/Downloads/xxxx.pdf --model /home/llm/Downloads/xxxx.json 或者 python magicpdf.py --pdf /home/llm/Downloads/xxxx.pdf
|
22
22
|
"""
|
23
23
|
|
24
24
|
import os
|
@@ -45,6 +45,7 @@ from magic_pdf.rw.S3ReaderWriter import S3ReaderWriter
|
|
45
45
|
from magic_pdf.rw.DiskReaderWriter import DiskReaderWriter
|
46
46
|
from magic_pdf.rw.AbsReaderWriter import AbsReaderWriter
|
47
47
|
import csv
|
48
|
+
import copy
|
48
49
|
|
49
50
|
parse_pdf_methods = click.Choice(["ocr", "txt", "auto"])
|
50
51
|
|
@@ -81,10 +82,11 @@ def do_parse(
|
|
81
82
|
f_dump_orig_pdf=True,
|
82
83
|
f_dump_content_list=True,
|
83
84
|
):
|
85
|
+
orig_model_list = copy.deepcopy(model_list)
|
84
86
|
|
85
87
|
local_image_dir, local_md_dir = prepare_env(pdf_file_name, parse_method)
|
86
88
|
image_writer, md_writer = DiskReaderWriter(local_image_dir), DiskReaderWriter(local_md_dir)
|
87
|
-
image_dir = (os.path.basename(local_image_dir)
|
89
|
+
image_dir = str(os.path.basename(local_image_dir))
|
88
90
|
|
89
91
|
if parse_method == "auto":
|
90
92
|
jso_useful_key = {"_pdf_type": "", "model_list": model_list}
|
@@ -110,10 +112,7 @@ def do_parse(
|
|
110
112
|
if f_draw_span_bbox:
|
111
113
|
draw_span_bbox(pdf_info, pdf_bytes, local_md_dir)
|
112
114
|
|
113
|
-
|
114
|
-
# [pdf_file_name, pipe.pdf_mid_data['not_common_character_rate'], pipe.pdf_mid_data['not_printable_rate']])
|
115
|
-
|
116
|
-
md_content = pipe.pipe_mk_markdown(str(image_dir), drop_mode=DropMode.NONE)
|
115
|
+
md_content = pipe.pipe_mk_markdown(image_dir, drop_mode=DropMode.NONE)
|
117
116
|
if f_dump_md:
|
118
117
|
"""写markdown"""
|
119
118
|
md_writer.write(
|
@@ -133,7 +132,7 @@ def do_parse(
|
|
133
132
|
if f_dump_model_json:
|
134
133
|
"""写model_json"""
|
135
134
|
md_writer.write(
|
136
|
-
content=json_parse.dumps(
|
135
|
+
content=json_parse.dumps(orig_model_list, ensure_ascii=False, indent=4),
|
137
136
|
path=f"{pdf_file_name}_model.json",
|
138
137
|
mode=AbsReaderWriter.MODE_TXT,
|
139
138
|
)
|
@@ -146,7 +145,7 @@ def do_parse(
|
|
146
145
|
mode=AbsReaderWriter.MODE_BIN,
|
147
146
|
)
|
148
147
|
|
149
|
-
content_list = pipe.pipe_mk_uni_format(
|
148
|
+
content_list = pipe.pipe_mk_uni_format(image_dir, drop_mode=DropMode.NONE)
|
150
149
|
if f_dump_content_list:
|
151
150
|
"""写content_list"""
|
152
151
|
md_writer.write(
|
@@ -281,7 +280,7 @@ def pdf_command(pdf, model, method):
|
|
281
280
|
model_path = pdf.replace(".pdf", ".json")
|
282
281
|
if not os.path.exists(model_path):
|
283
282
|
logger.warning(
|
284
|
-
f"not found json {model_path} existed
|
283
|
+
f"not found json {model_path} existed"
|
285
284
|
)
|
286
285
|
# 本地无模型数据则调用内置paddle分析,先传空list,在内部识别到空list再调用paddle
|
287
286
|
model_json = "[]"
|
magic_pdf/libs/version.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "0.5.
|
1
|
+
__version__ = "0.5.10"
|
magic_pdf/para/para_split_v2.py
CHANGED
@@ -11,6 +11,7 @@ LINE_STOP_FLAG = ['.', '!', '?', '。', '!', '?', ":", ":", ")", ")", ";
|
|
11
11
|
INLINE_EQUATION = ContentType.InlineEquation
|
12
12
|
INTERLINE_EQUATION = ContentType.InterlineEquation
|
13
13
|
TEXT = ContentType.Text
|
14
|
+
debug_able = False
|
14
15
|
|
15
16
|
|
16
17
|
def __get_span_text(span):
|
@@ -22,6 +23,7 @@ def __get_span_text(span):
|
|
22
23
|
|
23
24
|
|
24
25
|
def __detect_list_lines(lines, new_layout_bboxes, lang):
|
26
|
+
global debug_able
|
25
27
|
"""
|
26
28
|
探测是否包含了列表,并且把列表的行分开.
|
27
29
|
这样的段落特点是,顶格字母大写/数字,紧跟着几行缩进的。缩进的行首字母含小写的。
|
@@ -41,12 +43,14 @@ def __detect_list_lines(lines, new_layout_bboxes, lang):
|
|
41
43
|
if lst[i] == 1:
|
42
44
|
ones_in_this_interval.append(i)
|
43
45
|
i += 1
|
44
|
-
if len(ones_in_this_interval) > 1 or (
|
46
|
+
if len(ones_in_this_interval) > 1 or (
|
47
|
+
start < len(lst) - 1 and ones_in_this_interval and lst[start + 1] in [2, 3]):
|
45
48
|
indices.append((start, i - 1))
|
46
49
|
ones_indices.append(ones_in_this_interval)
|
47
50
|
else:
|
48
51
|
i += 1
|
49
52
|
return indices, ones_indices
|
53
|
+
|
50
54
|
def find_repeating_patterns(lst):
|
51
55
|
indices = []
|
52
56
|
ones_indices = []
|
@@ -132,7 +136,8 @@ def __detect_list_lines(lines, new_layout_bboxes, lang):
|
|
132
136
|
|
133
137
|
list_indice, list_start_idx = find_repeating_patterns2(line_fea_encode)
|
134
138
|
if len(list_indice) > 0:
|
135
|
-
|
139
|
+
if debug_able:
|
140
|
+
logger.info(f"发现了列表,列表行数:{list_indice}, {list_start_idx}")
|
136
141
|
|
137
142
|
# TODO check一下这个特列表里缩进的行左侧是不是对齐的。
|
138
143
|
segments = []
|
@@ -140,13 +145,16 @@ def __detect_list_lines(lines, new_layout_bboxes, lang):
|
|
140
145
|
for i in range(start, end + 1):
|
141
146
|
if i > 0:
|
142
147
|
if line_fea_encode[i] == 4:
|
143
|
-
|
148
|
+
if debug_able:
|
149
|
+
logger.info(f"列表行的第{i}行不是顶格的")
|
144
150
|
break
|
145
151
|
else:
|
146
|
-
|
152
|
+
if debug_able:
|
153
|
+
logger.info(f"列表行的第{start}到第{end}行是列表")
|
147
154
|
|
148
155
|
return split_indices(total_lines, list_indice), list_start_idx
|
149
156
|
|
157
|
+
|
150
158
|
def cluster_line_x(lines: list) -> dict:
|
151
159
|
"""
|
152
160
|
对一个block内所有lines的bbox的x0聚类
|
@@ -170,6 +178,7 @@ def cluster_line_x(lines: list) -> dict:
|
|
170
178
|
min_x0 = x0_new_val
|
171
179
|
return x0_2_new_val, min_x0
|
172
180
|
|
181
|
+
|
173
182
|
def if_match_reference_list(text: str) -> bool:
|
174
183
|
pattern = re.compile(r'^\d+\..*')
|
175
184
|
if pattern.match(text):
|
@@ -190,7 +199,8 @@ def __valign_lines(blocks, layout_bboxes):
|
|
190
199
|
new_layout_bboxes = []
|
191
200
|
|
192
201
|
for layout_box in layout_bboxes:
|
193
|
-
blocks_in_layoutbox = [b for b in blocks if
|
202
|
+
blocks_in_layoutbox = [b for b in blocks if
|
203
|
+
b["type"] == BlockType.Text and is_in_layout(b['bbox'], layout_box['layout_bbox'])]
|
194
204
|
if len(blocks_in_layoutbox) == 0 or len(blocks_in_layoutbox[0]["lines"]) == 0:
|
195
205
|
new_layout_bboxes.append(layout_box['layout_bbox'])
|
196
206
|
continue
|
@@ -253,7 +263,8 @@ def __align_text_in_layout(blocks, layout_bboxes):
|
|
253
263
|
"""
|
254
264
|
for layout in layout_bboxes:
|
255
265
|
lb = layout['layout_bbox']
|
256
|
-
blocks_in_layoutbox = [block for block in blocks if
|
266
|
+
blocks_in_layoutbox = [block for block in blocks if
|
267
|
+
block["type"] == BlockType.Text and is_in_layout(block['bbox'], lb)]
|
257
268
|
if len(blocks_in_layoutbox) == 0:
|
258
269
|
continue
|
259
270
|
|
@@ -383,7 +394,6 @@ def __split_para_lines(lines: list, text_blocks: list) -> list:
|
|
383
394
|
continue
|
384
395
|
text_lines.append(line)
|
385
396
|
|
386
|
-
|
387
397
|
for block in text_blocks:
|
388
398
|
block_bbox = block["bbox"]
|
389
399
|
para = []
|
@@ -394,14 +404,12 @@ def __split_para_lines(lines: list, text_blocks: list) -> list:
|
|
394
404
|
if len(para) > 0:
|
395
405
|
text_paras.append(para)
|
396
406
|
paras = other_paras.extend(text_paras)
|
397
|
-
paras_sorted = sorted(paras, key
|
407
|
+
paras_sorted = sorted(paras, key=lambda x: x[0]["bbox"][1])
|
398
408
|
return paras_sorted
|
399
409
|
|
400
410
|
|
401
|
-
|
402
|
-
|
403
|
-
|
404
411
|
def __connect_list_inter_layout(blocks_group, new_layout_bbox, layout_list_info, page_num, lang):
|
412
|
+
global debug_able
|
405
413
|
"""
|
406
414
|
如果上个layout的最后一个段落是列表,下一个layout的第一个段落也是列表,那么将他们连接起来。 TODO 因为没有区分列表和段落,所以这个方法暂时不实现。
|
407
415
|
根据layout_list_info判断是不是列表。,下个layout的第一个段如果不是列表,那么看他们是否有几行都有相同的缩进。
|
@@ -410,7 +418,7 @@ def __connect_list_inter_layout(blocks_group, new_layout_bbox, layout_list_info,
|
|
410
418
|
return blocks_group, [False, False]
|
411
419
|
|
412
420
|
for i in range(1, len(blocks_group)):
|
413
|
-
if len(blocks_group[i]) == 0 or len(blocks_group[i-1]) == 0:
|
421
|
+
if len(blocks_group[i]) == 0 or len(blocks_group[i - 1]) == 0:
|
414
422
|
continue
|
415
423
|
pre_layout_list_info = layout_list_info[i - 1]
|
416
424
|
next_layout_list_info = layout_list_info[i]
|
@@ -418,8 +426,10 @@ def __connect_list_inter_layout(blocks_group, new_layout_bbox, layout_list_info,
|
|
418
426
|
next_paras = blocks_group[i]
|
419
427
|
next_first_para = next_paras[0]
|
420
428
|
|
421
|
-
if pre_layout_list_info[1] and not next_layout_list_info[0] and next_first_para[
|
422
|
-
|
429
|
+
if pre_layout_list_info[1] and not next_layout_list_info[0] and next_first_para[
|
430
|
+
"type"] == BlockType.Text: # 前一个是列表结尾,后一个是非列表开头,此时检测是否有相同的缩进
|
431
|
+
if debug_able:
|
432
|
+
logger.info(f"连接page {page_num} 内的list")
|
423
433
|
# 向layout_paras[i] 寻找开头具有相同缩进的连续的行
|
424
434
|
may_list_lines = []
|
425
435
|
lines = next_first_para.get("lines", [])
|
@@ -450,7 +460,8 @@ def __connect_list_inter_page(pre_page_paras, next_page_paras, pre_page_layout_b
|
|
450
460
|
if pre_page_paras[-1][-1]["type"] != BlockType.Text or next_page_paras[0][0]["type"] != BlockType.Text:
|
451
461
|
return False
|
452
462
|
if pre_page_list_info[1] and not next_page_list_info[0]: # 前一个是列表结尾,后一个是非列表开头,此时检测是否有相同的缩进
|
453
|
-
|
463
|
+
if debug_able:
|
464
|
+
logger.info(f"连接page {page_num} 内的list")
|
454
465
|
# 向layout_paras[i] 寻找开头具有相同缩进的连续的行
|
455
466
|
may_list_lines = []
|
456
467
|
next_page_first_para = next_page_paras[0][0]
|
@@ -503,7 +514,7 @@ def __connect_para_inter_layoutbox(blocks_group, new_layout_bbox):
|
|
503
514
|
try:
|
504
515
|
if len(blocks_group[i]) == 0:
|
505
516
|
continue
|
506
|
-
if
|
517
|
+
if len(blocks_group[i - 1]) == 0: # TODO 考虑连接问题,
|
507
518
|
connected_layout_blocks.append(blocks_group[i])
|
508
519
|
continue
|
509
520
|
# text类型的段才需要考虑layout间的合并
|
@@ -534,13 +545,14 @@ def __connect_para_inter_layoutbox(blocks_group, new_layout_bbox):
|
|
534
545
|
|
535
546
|
pre_last_line_text = pre_last_line_text.strip()
|
536
547
|
next_first_line_text = next_first_line_text.strip()
|
537
|
-
if pre_last_line['bbox'][2] == pre_x2_max and pre_last_line_text and pre_last_line_text[
|
548
|
+
if pre_last_line['bbox'][2] == pre_x2_max and pre_last_line_text and pre_last_line_text[
|
549
|
+
-1] not in LINE_STOP_FLAG and \
|
538
550
|
next_first_line['bbox'][0] == next_x0_min: # 前面一行沾满了整个行,并且没有结尾符号.下一行没有空白开头。
|
539
551
|
"""连接段落条件成立,将前一个layout的段落和后一个layout的段落连接。"""
|
540
552
|
#connected_layout_paras[-1][-1].extend(layout_paras[i][0])
|
541
553
|
connected_layout_blocks[-1][-1]["lines"].extend(blocks_group[i][0]["lines"])
|
542
554
|
#layout_paras[i].pop(0) # 删除后一个layout的第一个段落, 因为他已经被合并到前一个layout的最后一个段落了。
|
543
|
-
blocks_group[i][0]["lines"] = []
|
555
|
+
blocks_group[i][0]["lines"] = [] #删除后一个layout第一个段落中的lines,因为他已经被合并到前一个layout的最后一个段落了
|
544
556
|
blocks_group[i][0][LINES_DELETED] = True
|
545
557
|
# if len(layout_paras[i]) == 0:
|
546
558
|
# layout_paras.pop(i)
|
@@ -638,7 +650,8 @@ def find_consecutive_true_regions(input_array):
|
|
638
650
|
return regions
|
639
651
|
|
640
652
|
|
641
|
-
def __connect_middle_align_text(page_paras, new_layout_bbox, page_num, lang
|
653
|
+
def __connect_middle_align_text(page_paras, new_layout_bbox, page_num, lang):
|
654
|
+
global debug_able
|
642
655
|
"""
|
643
656
|
找出来中间对齐的连续单行文本,如果连续行高度相同,那么合并为一个段落。
|
644
657
|
一个line居中的条件是:
|
@@ -660,11 +673,12 @@ def __connect_middle_align_text(page_paras, new_layout_bbox, page_num, lang, deb
|
|
660
673
|
for start, end in consecutive_single_line_indices:
|
661
674
|
#start += index_offset
|
662
675
|
#end += index_offset
|
663
|
-
line_hi = np.array([block["lines"][0]['bbox'][3] - block["lines"][0]['bbox'][1] for block in
|
676
|
+
line_hi = np.array([block["lines"][0]['bbox'][3] - block["lines"][0]['bbox'][1] for block in
|
677
|
+
layout_para[start:end + 1]])
|
664
678
|
first_line_text = ''.join([__get_span_text(span) for span in layout_para[start]["lines"][0]['spans']])
|
665
679
|
if "Table" in first_line_text or "Figure" in first_line_text:
|
666
680
|
pass
|
667
|
-
if
|
681
|
+
if debug_able:
|
668
682
|
logger.info(line_hi.std())
|
669
683
|
|
670
684
|
if line_hi.std() < 2:
|
@@ -677,10 +691,10 @@ def __connect_middle_align_text(page_paras, new_layout_bbox, page_num, lang, deb
|
|
677
691
|
and not all([x1 == layout_box[2] for x1 in all_right_x1]):
|
678
692
|
merge_para = [block["lines"][0] for block in layout_para[start:end + 1]]
|
679
693
|
para_text = ''.join([__get_span_text(span) for line in merge_para for span in line['spans']])
|
680
|
-
if
|
694
|
+
if debug_able:
|
681
695
|
logger.info(para_text)
|
682
696
|
layout_para[start]["lines"] = merge_para
|
683
|
-
for i_para in range(start+1, end+1):
|
697
|
+
for i_para in range(start + 1, end + 1):
|
684
698
|
layout_para[i_para]["lines"] = []
|
685
699
|
layout_para[i_para][LINES_DELETED] = True
|
686
700
|
#layout_para[start:end + 1] = [merge_para]
|
@@ -713,14 +727,15 @@ def __do_split_page(blocks, layout_bboxes, new_layout_bbox, page_num, lang):
|
|
713
727
|
blocks_group = __group_line_by_layout(blocks, layout_bboxes) # block内分段
|
714
728
|
layout_list_info = __split_para_in_layoutbox(blocks_group, new_layout_bbox, lang) # layout内分段
|
715
729
|
blocks_group, page_list_info = __connect_list_inter_layout(blocks_group, new_layout_bbox, layout_list_info,
|
716
|
-
|
730
|
+
page_num, lang) # layout之间连接列表段落
|
717
731
|
connected_layout_blocks = __connect_para_inter_layoutbox(blocks_group, new_layout_bbox) # layout间链接段落
|
718
732
|
|
719
733
|
return connected_layout_blocks, page_list_info
|
720
734
|
|
721
735
|
|
722
|
-
|
723
736
|
def para_split(pdf_info_dict, debug_mode, lang="en"):
|
737
|
+
global debug_able
|
738
|
+
debug_able = debug_mode
|
724
739
|
new_layout_of_pages = [] # 数组的数组,每个元素是一个页面的layoutS
|
725
740
|
all_page_list_info = [] # 保存每个页面开头和结尾是否是列表
|
726
741
|
for page_num, page in pdf_info_dict.items():
|
@@ -744,14 +759,14 @@ def para_split(pdf_info_dict, debug_mode, lang="en"):
|
|
744
759
|
|
745
760
|
is_conn = __connect_para_inter_page(pre_page_paras, next_page_paras, pre_page_layout_bbox,
|
746
761
|
next_page_layout_bbox, page_num, lang)
|
747
|
-
if
|
762
|
+
if debug_able:
|
748
763
|
if is_conn:
|
749
764
|
logger.info(f"连接了第{page_num - 1}页和第{page_num}页的段落")
|
750
765
|
|
751
766
|
is_list_conn = __connect_list_inter_page(pre_page_paras, next_page_paras, pre_page_layout_bbox,
|
752
767
|
next_page_layout_bbox, all_page_list_info[page_num - 1],
|
753
768
|
all_page_list_info[page_num], page_num, lang)
|
754
|
-
if
|
769
|
+
if debug_able:
|
755
770
|
if is_list_conn:
|
756
771
|
logger.info(f"连接了第{page_num - 1}页和第{page_num}页的列表段落")
|
757
772
|
|
@@ -762,7 +777,7 @@ def para_split(pdf_info_dict, debug_mode, lang="en"):
|
|
762
777
|
for page_num, page in enumerate(pdf_info_dict.values()):
|
763
778
|
page_paras = page['para_blocks']
|
764
779
|
new_layout_bbox = new_layout_of_pages[page_num]
|
765
|
-
__connect_middle_align_text(page_paras, new_layout_bbox, page_num, lang
|
780
|
+
__connect_middle_align_text(page_paras, new_layout_bbox, page_num, lang)
|
766
781
|
__merge_signle_list_text(page_paras, new_layout_bbox, page_num, lang)
|
767
782
|
|
768
783
|
# layout展平
|
magic_pdf/pdf_parse_by_ocr.py
CHANGED
@@ -1,219 +1,18 @@
|
|
1
|
-
import
|
2
|
-
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
from magic_pdf.pre_proc.cut_image import ocr_cut_image_and_table
|
20
|
-
from magic_pdf.pre_proc.ocr_detect_layout import layout_detect
|
21
|
-
from magic_pdf.pre_proc.ocr_dict_merge import (
|
22
|
-
merge_spans_to_line_by_layout, merge_lines_to_block,
|
23
|
-
)
|
24
|
-
from magic_pdf.pre_proc.ocr_span_list_modify import remove_spans_by_bboxes, remove_overlaps_min_spans, \
|
25
|
-
adjust_bbox_for_standalone_block, modify_y_axis, modify_inline_equation, get_qa_need_list, \
|
26
|
-
remove_spans_by_bboxes_dict
|
27
|
-
from magic_pdf.pre_proc.remove_bbox_overlap import remove_overlap_between_bbox_for_span
|
28
|
-
|
29
|
-
|
30
|
-
def parse_pdf_by_ocr(
|
31
|
-
pdf_bytes,
|
32
|
-
pdf_model_output,
|
33
|
-
imageWriter,
|
34
|
-
start_page_id=0,
|
35
|
-
end_page_id=None,
|
36
|
-
debug_mode=False,
|
37
|
-
):
|
38
|
-
pdf_bytes_md5 = compute_md5(pdf_bytes)
|
39
|
-
|
40
|
-
pdf_docs = fitz.open("pdf", pdf_bytes)
|
41
|
-
# 初始化空的pdf_info_dict
|
42
|
-
pdf_info_dict = {}
|
43
|
-
|
44
|
-
start_time = time.time()
|
45
|
-
|
46
|
-
end_page_id = end_page_id if end_page_id else len(pdf_docs) - 1
|
47
|
-
for page_id in range(start_page_id, end_page_id + 1):
|
48
|
-
|
49
|
-
# 获取当前页的page对象
|
50
|
-
page = pdf_docs[page_id]
|
51
|
-
# 获取当前页的宽高
|
52
|
-
page_w = page.rect.width
|
53
|
-
page_h = page.rect.height
|
54
|
-
|
55
|
-
if debug_mode:
|
56
|
-
time_now = time.time()
|
57
|
-
logger.info(
|
58
|
-
f"page_id: {page_id}, last_page_cost_time: {get_delta_time(start_time)}"
|
59
|
-
)
|
60
|
-
start_time = time_now
|
61
|
-
|
62
|
-
# 获取当前页的模型数据
|
63
|
-
ocr_page_info = get_docx_model_output(
|
64
|
-
pdf_model_output, page_id
|
65
|
-
)
|
66
|
-
|
67
|
-
"""从json中获取每页的页码、页眉、页脚的bbox"""
|
68
|
-
page_no_bboxes = parse_pageNos(page_id, page, ocr_page_info)
|
69
|
-
header_bboxes = parse_headers(page_id, page, ocr_page_info)
|
70
|
-
footer_bboxes = parse_footers(page_id, page, ocr_page_info)
|
71
|
-
footnote_bboxes = parse_footnotes_by_model(page_id, page, ocr_page_info, debug_mode=debug_mode)
|
72
|
-
|
73
|
-
# 构建需要remove的bbox字典
|
74
|
-
need_remove_spans_bboxes_dict = {
|
75
|
-
DropTag.PAGE_NUMBER: page_no_bboxes,
|
76
|
-
DropTag.HEADER: header_bboxes,
|
77
|
-
DropTag.FOOTER: footer_bboxes,
|
78
|
-
DropTag.FOOTNOTE: footnote_bboxes,
|
79
|
-
}
|
80
|
-
|
81
|
-
layout_dets = ocr_page_info["layout_dets"]
|
82
|
-
spans = []
|
83
|
-
|
84
|
-
# 计算模型坐标和pymu坐标的缩放比例
|
85
|
-
horizontal_scale_ratio, vertical_scale_ratio = get_scale_ratio(
|
86
|
-
ocr_page_info, page
|
87
|
-
)
|
88
|
-
|
89
|
-
for layout_det in layout_dets:
|
90
|
-
category_id = layout_det["category_id"]
|
91
|
-
allow_category_id_list = [1, 7, 13, 14, 15]
|
92
|
-
if category_id in allow_category_id_list:
|
93
|
-
x0, y0, _, _, x1, y1, _, _ = layout_det["poly"]
|
94
|
-
bbox = [
|
95
|
-
int(x0 / horizontal_scale_ratio),
|
96
|
-
int(y0 / vertical_scale_ratio),
|
97
|
-
int(x1 / horizontal_scale_ratio),
|
98
|
-
int(y1 / vertical_scale_ratio),
|
99
|
-
]
|
100
|
-
# 删除高度或者宽度为0的spans
|
101
|
-
if bbox[2] - bbox[0] == 0 or bbox[3] - bbox[1] == 0:
|
102
|
-
continue
|
103
|
-
"""要删除的"""
|
104
|
-
# 3: 'header', # 页眉
|
105
|
-
# 4: 'page number', # 页码
|
106
|
-
# 5: 'footnote', # 脚注
|
107
|
-
# 6: 'footer', # 页脚
|
108
|
-
"""当成span拼接的"""
|
109
|
-
# 1: 'image', # 图片
|
110
|
-
# 7: 'table', # 表格
|
111
|
-
# 13: 'inline_equation', # 行内公式
|
112
|
-
# 14: 'interline_equation', # 行间公式
|
113
|
-
# 15: 'text', # ocr识别文本
|
114
|
-
"""layout信息"""
|
115
|
-
# 11: 'full column', # 单栏
|
116
|
-
# 12: 'sub column', # 多栏
|
117
|
-
span = {
|
118
|
-
"bbox": bbox,
|
119
|
-
}
|
120
|
-
if category_id == 1:
|
121
|
-
span["type"] = ContentType.Image
|
122
|
-
|
123
|
-
elif category_id == 7:
|
124
|
-
span["type"] = ContentType.Table
|
125
|
-
|
126
|
-
elif category_id == 13:
|
127
|
-
span["content"] = layout_det["latex"]
|
128
|
-
span["type"] = ContentType.InlineEquation
|
129
|
-
elif category_id == 14:
|
130
|
-
span["content"] = layout_det["latex"]
|
131
|
-
span["type"] = ContentType.InterlineEquation
|
132
|
-
elif category_id == 15:
|
133
|
-
span["content"] = layout_det["text"]
|
134
|
-
span["type"] = ContentType.Text
|
135
|
-
# print(span)
|
136
|
-
spans.append(span)
|
137
|
-
else:
|
138
|
-
continue
|
139
|
-
|
140
|
-
'''删除重叠spans中较小的那些'''
|
141
|
-
spans, dropped_spans_by_span_overlap = remove_overlaps_min_spans(spans)
|
142
|
-
|
143
|
-
'''
|
144
|
-
删除remove_span_block_bboxes中的bbox
|
145
|
-
并增加drop相关数据
|
146
|
-
'''
|
147
|
-
spans, dropped_spans_by_removed_bboxes = remove_spans_by_bboxes_dict(spans, need_remove_spans_bboxes_dict)
|
148
|
-
|
149
|
-
'''对image和table截图'''
|
150
|
-
spans = ocr_cut_image_and_table(spans, page, page_id, pdf_bytes_md5, imageWriter)
|
151
|
-
|
152
|
-
'''行内公式调整, 高度调整至与同行文字高度一致(优先左侧, 其次右侧)'''
|
153
|
-
displayed_list = []
|
154
|
-
text_inline_lines = []
|
155
|
-
modify_y_axis(spans, displayed_list, text_inline_lines)
|
156
|
-
|
157
|
-
'''模型识别错误的行间公式, type类型转换成行内公式'''
|
158
|
-
spans = modify_inline_equation(spans, displayed_list, text_inline_lines)
|
159
|
-
|
160
|
-
'''bbox去除粘连'''
|
161
|
-
spans = remove_overlap_between_bbox_for_span(spans)
|
162
|
-
'''
|
163
|
-
对tpye=["interline_equation", "image", "table"]进行额外处理,
|
164
|
-
如果左边有字的话,将该span的bbox中y0调整至不高于文字的y0
|
165
|
-
'''
|
166
|
-
spans = adjust_bbox_for_standalone_block(spans)
|
167
|
-
|
168
|
-
'''从ocr_page_info中解析layout信息(按自然阅读方向排序,并修复重叠和交错的bad case)'''
|
169
|
-
layout_bboxes, layout_tree = layout_detect(ocr_page_info['subfield_dets'], page, ocr_page_info)
|
170
|
-
|
171
|
-
'''将spans合并成line(在layout内,从上到下,从左到右)'''
|
172
|
-
lines, dropped_spans_by_layout = merge_spans_to_line_by_layout(spans, layout_bboxes)
|
173
|
-
|
174
|
-
'''将lines合并成block'''
|
175
|
-
blocks = merge_lines_to_block(lines)
|
176
|
-
|
177
|
-
'''获取QA需要外置的list'''
|
178
|
-
images, tables, interline_equations, inline_equations = get_qa_need_list(blocks)
|
179
|
-
|
180
|
-
'''drop的span_list合并'''
|
181
|
-
dropped_spans = []
|
182
|
-
dropped_spans.extend(dropped_spans_by_span_overlap)
|
183
|
-
dropped_spans.extend(dropped_spans_by_removed_bboxes)
|
184
|
-
dropped_spans.extend(dropped_spans_by_layout)
|
185
|
-
|
186
|
-
dropped_text_block = []
|
187
|
-
dropped_image_block = []
|
188
|
-
dropped_table_block = []
|
189
|
-
dropped_equation_block = []
|
190
|
-
for span in dropped_spans:
|
191
|
-
# drop出的spans进行分类
|
192
|
-
if span['type'] == ContentType.Text:
|
193
|
-
dropped_text_block.append(span)
|
194
|
-
elif span['type'] == ContentType.Image:
|
195
|
-
dropped_image_block.append(span)
|
196
|
-
elif span['type'] == ContentType.Table:
|
197
|
-
dropped_table_block.append(span)
|
198
|
-
elif span['type'] in [ContentType.InlineEquation, ContentType.InterlineEquation]:
|
199
|
-
dropped_equation_block.append(span)
|
200
|
-
|
201
|
-
'''构造pdf_info_dict'''
|
202
|
-
page_info = ocr_construct_page_component(blocks, layout_bboxes, page_id, page_w, page_h, layout_tree,
|
203
|
-
images, tables, interline_equations, inline_equations,
|
204
|
-
dropped_text_block, dropped_image_block, dropped_table_block,
|
205
|
-
dropped_equation_block,
|
206
|
-
need_remove_spans_bboxes_dict)
|
207
|
-
pdf_info_dict[f"page_{page_id}"] = page_info
|
208
|
-
|
209
|
-
"""分段"""
|
210
|
-
|
211
|
-
para_split(pdf_info_dict, debug_mode=debug_mode)
|
212
|
-
|
213
|
-
"""dict转list"""
|
214
|
-
pdf_info_list = dict_to_list(pdf_info_dict)
|
215
|
-
new_pdf_info_dict = {
|
216
|
-
"pdf_info": pdf_info_list,
|
217
|
-
}
|
218
|
-
|
219
|
-
return new_pdf_info_dict
|
1
|
+
from magic_pdf.pdf_parse_union_core import pdf_parse_union
|
2
|
+
|
3
|
+
|
4
|
+
def parse_pdf_by_ocr(pdf_bytes,
|
5
|
+
model_list,
|
6
|
+
imageWriter,
|
7
|
+
start_page_id=0,
|
8
|
+
end_page_id=None,
|
9
|
+
debug_mode=False,
|
10
|
+
):
|
11
|
+
return pdf_parse_union(pdf_bytes,
|
12
|
+
model_list,
|
13
|
+
imageWriter,
|
14
|
+
"ocr",
|
15
|
+
start_page_id=start_page_id,
|
16
|
+
end_page_id=end_page_id,
|
17
|
+
debug_mode=debug_mode,
|
18
|
+
)
|
magic_pdf/pdf_parse_by_txt.py
CHANGED
@@ -1,410 +1,19 @@
|
|
1
|
-
import
|
2
|
-
|
3
|
-
# from anyio import Path
|
4
|
-
|
5
|
-
from magic_pdf.libs.commons import fitz, get_delta_time, get_img_s3_client, get_docx_model_output
|
6
|
-
import json
|
7
|
-
import os
|
8
|
-
import math
|
9
|
-
from loguru import logger
|
10
|
-
from magic_pdf.layout.bbox_sort import (
|
11
|
-
prepare_bboxes_for_layout_split,
|
12
|
-
)
|
13
|
-
from magic_pdf.layout.layout_sort import LAYOUT_UNPROC, get_bboxes_layout, get_columns_cnt_of_layout, sort_text_block
|
14
|
-
from magic_pdf.libs.convert_utils import dict_to_list
|
15
|
-
from magic_pdf.libs.drop_reason import DropReason
|
16
|
-
from magic_pdf.libs.hash_utils import compute_md5
|
17
|
-
from magic_pdf.libs.markdown_utils import escape_special_markdown_char
|
18
|
-
from magic_pdf.libs.safe_filename import sanitize_filename
|
19
|
-
from magic_pdf.libs.vis_utils import draw_bbox_on_page, draw_layout_bbox_on_page
|
20
|
-
from magic_pdf.pre_proc.cut_image import txt_save_images_by_bboxes
|
21
|
-
from magic_pdf.pre_proc.detect_images import parse_images
|
22
|
-
from magic_pdf.pre_proc.detect_tables import parse_tables # 获取tables的bbox
|
23
|
-
from magic_pdf.pre_proc.detect_equation import parse_equations # 获取equations的bbox
|
24
|
-
from magic_pdf.pre_proc.detect_header import parse_headers # 获取headers的bbox
|
25
|
-
from magic_pdf.pre_proc.detect_page_number import parse_pageNos # 获取pageNos的bbox
|
26
|
-
from magic_pdf.pre_proc.detect_footnote import parse_footnotes_by_model, parse_footnotes_by_rule # 获取footnotes的bbox
|
27
|
-
from magic_pdf.pre_proc.detect_footer_by_model import parse_footers # 获取footers的bbox
|
28
|
-
|
29
|
-
from magic_pdf.post_proc.detect_para import (
|
30
|
-
ParaProcessPipeline,
|
31
|
-
TitleDetectionException,
|
32
|
-
TitleLevelException,
|
33
|
-
ParaSplitException,
|
34
|
-
ParaMergeException,
|
35
|
-
DenseSingleLineBlockException,
|
36
|
-
)
|
37
|
-
from magic_pdf.pre_proc.main_text_font import get_main_text_font
|
38
|
-
from magic_pdf.pre_proc.remove_colored_strip_bbox import remove_colored_strip_textblock
|
39
|
-
from magic_pdf.pre_proc.remove_footer_header import remove_headder_footer_one_page
|
40
|
-
|
41
|
-
'''
|
42
|
-
from para.para_pipeline import ParaProcessPipeline
|
43
|
-
from para.exceptions import (
|
44
|
-
TitleDetectionException,
|
45
|
-
TitleLevelException,
|
46
|
-
ParaSplitException,
|
47
|
-
ParaMergeException,
|
48
|
-
DenseSingleLineBlockException,
|
49
|
-
)
|
50
|
-
'''
|
51
|
-
|
52
|
-
from magic_pdf.post_proc.remove_footnote import merge_footnote_blocks, remove_footnote_blocks
|
53
|
-
from magic_pdf.pre_proc.citationmarker_remove import remove_citation_marker
|
54
|
-
from magic_pdf.pre_proc.equations_replace import combine_chars_to_pymudict, remove_chars_in_text_blocks, replace_equations_in_textblock
|
55
|
-
from magic_pdf.pre_proc.pdf_pre_filter import pdf_filter
|
56
|
-
from magic_pdf.pre_proc.detect_footer_header_by_statistics import drop_footer_header
|
57
|
-
from magic_pdf.pre_proc.construct_page_dict import construct_page_component
|
58
|
-
from magic_pdf.pre_proc.fix_image import combine_images, fix_image_vertical, fix_seperated_image, include_img_title
|
59
|
-
from magic_pdf.post_proc.pdf_post_filter import pdf_post_filter
|
60
|
-
from magic_pdf.pre_proc.remove_rotate_bbox import get_side_boundry, remove_rotate_side_textblock, remove_side_blank_block
|
61
|
-
from magic_pdf.pre_proc.resolve_bbox_conflict import check_text_block_horizontal_overlap, resolve_bbox_overlap_conflict
|
62
|
-
from magic_pdf.pre_proc.fix_table import fix_table_text_block, fix_tables, include_table_title
|
63
|
-
from magic_pdf.pre_proc.solve_line_alien import solve_inline_too_large_interval
|
64
|
-
|
65
|
-
denseSingleLineBlockException_msg = DenseSingleLineBlockException().message
|
66
|
-
titleDetectionException_msg = TitleDetectionException().message
|
67
|
-
titleLevelException_msg = TitleLevelException().message
|
68
|
-
paraSplitException_msg = ParaSplitException().message
|
69
|
-
paraMergeException_msg = ParaMergeException().message
|
70
|
-
|
71
|
-
|
1
|
+
from magic_pdf.pdf_parse_union_core import pdf_parse_union
|
72
2
|
|
73
3
|
|
74
4
|
def parse_pdf_by_txt(
|
75
5
|
pdf_bytes,
|
76
|
-
|
6
|
+
model_list,
|
77
7
|
imageWriter,
|
78
8
|
start_page_id=0,
|
79
9
|
end_page_id=None,
|
80
10
|
debug_mode=False,
|
81
11
|
):
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
end_page_id = end_page_id if end_page_id else len(pdf_docs) - 1
|
92
|
-
for page_id in range(start_page_id, end_page_id + 1):
|
93
|
-
page = pdf_docs[page_id]
|
94
|
-
page_width = page.rect.width
|
95
|
-
page_height = page.rect.height
|
96
|
-
|
97
|
-
if debug_mode:
|
98
|
-
time_now = time.time()
|
99
|
-
logger.info(f"page_id: {page_id}, last_page_cost_time: {get_delta_time(start_time)}")
|
100
|
-
start_time = time_now
|
101
|
-
"""
|
102
|
-
# 通过一个规则,过滤掉单页超过1500非junkimg的pdf
|
103
|
-
# 对单页面非重复id的img数量做统计,如果当前页超过1500则直接return need_drop
|
104
|
-
"""
|
105
|
-
page_imgs = page.get_images()
|
106
|
-
|
107
|
-
# 去除对junkimg的依赖,简化逻辑
|
108
|
-
if len(page_imgs) > 1500: # 如果当前页超过1500张图片,直接跳过
|
109
|
-
logger.warning(f"page_id: {page_id}, img_counts: {len(page_imgs)}, drop this pdf")
|
110
|
-
result = {"_need_drop": True, "_drop_reason": DropReason.HIGH_COMPUTATIONAL_lOAD_BY_IMGS}
|
111
|
-
if not debug_mode:
|
112
|
-
return result
|
113
|
-
|
114
|
-
"""
|
115
|
-
==================================================================================================================================
|
116
|
-
首先获取基本的block数据,对pdf进行分解,获取图片、表格、公式、text的bbox
|
117
|
-
"""
|
118
|
-
# 解析pdf原始文本block
|
119
|
-
text_raw_blocks = page.get_text(
|
120
|
-
"dict",
|
121
|
-
flags=fitz.TEXTFLAGS_TEXT,
|
122
|
-
)["blocks"]
|
123
|
-
model_output_json = get_docx_model_output(pdf_model_output, page_id)
|
124
|
-
|
125
|
-
# 解析图片
|
126
|
-
image_bboxes = parse_images(page_id, page, model_output_json)
|
127
|
-
image_bboxes = fix_image_vertical(image_bboxes, text_raw_blocks) # 修正图片的位置
|
128
|
-
image_bboxes = fix_seperated_image(image_bboxes) # 合并有边重合的图片
|
129
|
-
image_bboxes = include_img_title(text_raw_blocks, image_bboxes) # 向图片上方和下方寻找title,使用规则进行匹配,暂时只支持英文规则
|
130
|
-
"""此时image_bboxes中可能出现这种情况,水平并列的2个图片,下方分别有各自的子标题,2个子标题下方又有大标题(形如Figxxx),会出现2个图片的bbox都包含了这个大标题,这种情况需要把图片合并"""
|
131
|
-
image_bboxes = combine_images(image_bboxes) # 合并图片
|
132
|
-
|
133
|
-
# 解析表格并对table_bboxes进行位置的微调,防止表格周围的文字被截断
|
134
|
-
table_bboxes = parse_tables(page_id, page, model_output_json)
|
135
|
-
table_bboxes = fix_tables(page, table_bboxes, include_table_title=True, scan_line_num=2) # 修正
|
136
|
-
table_bboxes = fix_table_text_block(text_raw_blocks, table_bboxes) # 修正与text block的关系,某些table修正与pymupdf获取到的table内textblock没有完全包含,因此要进行一次修正。
|
137
|
-
#debug_show_bbox(pdf_docs, page_id, table_bboxes, [], [b['bbox'] for b in text_raw_blocks], join_path(save_path, book_name, f"{book_name}_debug.pdf"), 7)
|
138
|
-
table_bboxes = include_table_title(text_raw_blocks, table_bboxes) # 向table上方和下方寻找title,使用规则进行匹配,暂时只支持英文规则
|
139
|
-
|
140
|
-
# 解析公式
|
141
|
-
equations_inline_bboxes, equations_interline_bboxes = parse_equations(page_id, page, model_output_json)
|
142
|
-
|
143
|
-
"""
|
144
|
-
==================================================================================================================================
|
145
|
-
进入预处理-1阶段
|
146
|
-
-------------------
|
147
|
-
# # 解析标题
|
148
|
-
# title_bboxs = parse_titles(page_id, page, model_output_json)
|
149
|
-
# # 评估Layout是否规整、简单
|
150
|
-
# isSimpleLayout_flag, fullColumn_cnt, subColumn_cnt, curPage_loss = evaluate_pdf_layout(page_id, page, model_output_json)
|
151
|
-
接下来开始进行预处理过程
|
152
|
-
"""
|
153
|
-
|
154
|
-
"""去掉每页的页码、页眉、页脚"""
|
155
|
-
page_no_bboxs = parse_pageNos(page_id, page, model_output_json)
|
156
|
-
header_bboxs = parse_headers(page_id, page, model_output_json)
|
157
|
-
footer_bboxs = parse_footers(page_id, page, model_output_json)
|
158
|
-
image_bboxes, table_bboxes, remain_text_blocks, removed_hdr_foot_txt_block, removed_hdr_foot_img_block, removed_hdr_foot_table = remove_headder_footer_one_page(text_raw_blocks, image_bboxes, table_bboxes, header_bboxs, footer_bboxs, page_no_bboxs, page_width, page_height)
|
159
|
-
|
160
|
-
"""去除页面上半部分长条色块内的文本块"""
|
161
|
-
remain_text_blocks, removed_colored_narrow_strip_background_text_block = remove_colored_strip_textblock(remain_text_blocks, page)
|
162
|
-
|
163
|
-
#debug_show_bbox(pdf_docs, page_id, footnote_bboxes_by_model, [b['bbox'] for b in remain_text_blocks], header_bboxs, join_path(save_path, book_name, f"{book_name}_debug.pdf"), 7)
|
164
|
-
|
165
|
-
"""去掉旋转的文字:水印、垂直排列的文字"""
|
166
|
-
remain_text_blocks, removed_non_horz_text_block = remove_rotate_side_textblock(
|
167
|
-
remain_text_blocks, page_width, page_height
|
168
|
-
) # 去掉水印,非水平文字
|
169
|
-
remain_text_blocks, removed_empty_side_block = remove_side_blank_block(remain_text_blocks, page_width, page_height) # 删除页面四周可能会留下的完全空白的textblock,这种block形成原因未知
|
170
|
-
|
171
|
-
"""出现在图片、表格上的文字块去掉,把层叠的图片单独分离出来,不参与layout的计算"""
|
172
|
-
(
|
173
|
-
image_bboxes,
|
174
|
-
table_bboxes,
|
175
|
-
equations_interline_bboxes,
|
176
|
-
equations_inline_bboxes,
|
177
|
-
remain_text_blocks,
|
178
|
-
text_block_on_image_removed,
|
179
|
-
images_overlap_backup,
|
180
|
-
interline_eq_temp_text_block
|
181
|
-
) = resolve_bbox_overlap_conflict(
|
182
|
-
image_bboxes, table_bboxes, equations_interline_bboxes, equations_inline_bboxes, remain_text_blocks
|
183
|
-
)
|
184
|
-
|
185
|
-
# """去掉footnote, 从文字和图片中"""
|
186
|
-
# # 通过模型识别到的footnote
|
187
|
-
# footnote_bboxes_by_model = parse_footnotes_by_model(page_id, page, model_output_json, md_bookname_save_path,
|
188
|
-
# debug_mode=debug_mode)
|
189
|
-
# # 通过规则识别到的footnote
|
190
|
-
# footnote_bboxes_by_rule = parse_footnotes_by_rule(remain_text_blocks, page_height, page_id)
|
191
|
-
"""
|
192
|
-
==================================================================================================================================
|
193
|
-
"""
|
194
|
-
|
195
|
-
# 把图、表、公式都进行截图,保存到存储上,返回图片路径作为内容
|
196
|
-
image_info, image_backup_info, table_info, inline_eq_info, interline_eq_info = txt_save_images_by_bboxes(
|
197
|
-
page_id,
|
198
|
-
page,
|
199
|
-
pdf_bytes_md5,
|
200
|
-
image_bboxes,
|
201
|
-
images_overlap_backup,
|
202
|
-
table_bboxes,
|
203
|
-
equations_inline_bboxes,
|
204
|
-
equations_interline_bboxes,
|
205
|
-
imageWriter
|
206
|
-
) # 只要表格和图片的截图
|
207
|
-
|
208
|
-
""""以下进入到公式替换环节 """
|
209
|
-
char_level_text_blocks = page.get_text("rawdict", flags=fitz.TEXTFLAGS_TEXT)['blocks']
|
210
|
-
remain_text_blocks = combine_chars_to_pymudict(remain_text_blocks, char_level_text_blocks)# 合并chars
|
211
|
-
remain_text_blocks = replace_equations_in_textblock(remain_text_blocks, inline_eq_info, interline_eq_info)
|
212
|
-
remain_text_blocks = remove_citation_marker(remain_text_blocks) # 公式替换之后去角标,防止公式无法替换成功。但是这样也会带来个问题就是把角标当公式。各有优劣。
|
213
|
-
remain_text_blocks = remove_chars_in_text_blocks(remain_text_blocks) # 减少中间态数据体积
|
214
|
-
#debug_show_bbox(pdf_docs, page_id, [b['bbox'] for b in inline_eq_info], [b['bbox'] for b in interline_eq_info], [], join_path(save_path, book_name, f"{book_name}_debug.pdf"), 3)
|
215
|
-
|
216
|
-
"""去掉footnote, 从文字和图片中(先去角标再去footnote试试)"""
|
217
|
-
# 通过模型识别到的footnote
|
218
|
-
footnote_bboxes_by_model = parse_footnotes_by_model(page_id, page, model_output_json, debug_mode=debug_mode)
|
219
|
-
# 通过规则识别到的footnote
|
220
|
-
footnote_bboxes_by_rule = parse_footnotes_by_rule(remain_text_blocks, page_height, page_id, main_text_font)
|
221
|
-
"""进入pdf过滤器,去掉一些不合理的pdf"""
|
222
|
-
is_good_pdf, err = pdf_filter(page, remain_text_blocks, table_bboxes, image_bboxes)
|
223
|
-
if not is_good_pdf:
|
224
|
-
logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {err}")
|
225
|
-
if not debug_mode:
|
226
|
-
return err
|
227
|
-
|
228
|
-
"""
|
229
|
-
==================================================================================================================================
|
230
|
-
进行版面布局切分和过滤
|
231
|
-
"""
|
232
|
-
"""在切分之前,先检查一下bbox是否有左右重叠的情况,如果有,那么就认为这个pdf暂时没有能力处理好,这种左右重叠的情况大概率是由于pdf里的行间公式、表格没有被正确识别出来造成的 """
|
233
|
-
|
234
|
-
is_text_block_horz_overlap = check_text_block_horizontal_overlap(remain_text_blocks, header_bboxs, footer_bboxs)
|
235
|
-
|
236
|
-
if is_text_block_horz_overlap:
|
237
|
-
# debug_show_bbox(pdf_docs, page_id, [b['bbox'] for b in remain_text_blocks], [], [], join_path(save_path, book_name, f"{book_name}_debug.pdf"), 0)
|
238
|
-
logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TEXT_BLCOK_HOR_OVERLAP}")
|
239
|
-
result = {"_need_drop": True, "_drop_reason": DropReason.TEXT_BLCOK_HOR_OVERLAP}
|
240
|
-
if not debug_mode:
|
241
|
-
return result
|
242
|
-
|
243
|
-
"""统一格式化成一个数据结构用于计算layout"""
|
244
|
-
page_y0 = 0 if len(header_bboxs) == 0 else max([b[3] for b in header_bboxs])
|
245
|
-
page_y1 = page_height if len(footer_bboxs) == 0 else min([b[1] for b in footer_bboxs])
|
246
|
-
left_x, right_x = get_side_boundry(removed_non_horz_text_block, page_width, page_height)
|
247
|
-
page_boundry = [math.floor(left_x), page_y0 + 1, math.ceil(right_x), page_y1 - 1]
|
248
|
-
# 返回的是一个数组,每个元素[x0, y0, x1, y1, block_content, idx_x, idx_y], 初始时候idx_x, idx_y都是None. 对于图片、公式来说,block_content是图片的地址, 对于段落来说,block_content是段落的内容
|
249
|
-
|
250
|
-
all_bboxes = prepare_bboxes_for_layout_split(
|
251
|
-
image_info, image_backup_info, table_info, inline_eq_info, interline_eq_info, remain_text_blocks, page_boundry, page)
|
252
|
-
#debug_show_bbox(pdf_docs, page_id, [], [], all_bboxes, join_path(save_path, book_name, f"{book_name}_debug.pdf"), 1)
|
253
|
-
"""page_y0, page_y1能够过滤掉页眉和页脚,不会算作layout内"""
|
254
|
-
layout_bboxes, layout_tree = get_bboxes_layout(all_bboxes, page_boundry, page_id)
|
255
|
-
|
256
|
-
if len(remain_text_blocks)>0 and len(all_bboxes)>0 and len(layout_bboxes)==0:
|
257
|
-
logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.CAN_NOT_DETECT_PAGE_LAYOUT}")
|
258
|
-
result = {"_need_drop": True, "_drop_reason": DropReason.CAN_NOT_DETECT_PAGE_LAYOUT}
|
259
|
-
if not debug_mode:
|
260
|
-
return result
|
261
|
-
|
262
|
-
"""以下去掉复杂的布局和超过2列的布局"""
|
263
|
-
if any([lay["layout_label"] == LAYOUT_UNPROC for lay in layout_bboxes]): # 复杂的布局
|
264
|
-
logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.COMPLICATED_LAYOUT}")
|
265
|
-
result = {"_need_drop": True, "_drop_reason": DropReason.COMPLICATED_LAYOUT}
|
266
|
-
if not debug_mode:
|
267
|
-
return result
|
268
|
-
|
269
|
-
layout_column_width = get_columns_cnt_of_layout(layout_tree)
|
270
|
-
if layout_column_width > 2: # 去掉超过2列的布局pdf
|
271
|
-
logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TOO_MANY_LAYOUT_COLUMNS}")
|
272
|
-
result = {
|
273
|
-
"_need_drop": True,
|
274
|
-
"_drop_reason": DropReason.TOO_MANY_LAYOUT_COLUMNS,
|
275
|
-
"extra_info": {"column_cnt": layout_column_width},
|
276
|
-
}
|
277
|
-
if not debug_mode:
|
278
|
-
return result
|
279
|
-
|
280
|
-
|
281
|
-
"""
|
282
|
-
==================================================================================================================================
|
283
|
-
构造出下游需要的数据结构
|
284
|
-
"""
|
285
|
-
remain_text_blocks = remain_text_blocks + interline_eq_temp_text_block # 把计算layout时候临时删除的行间公式再放回去,防止行间公式替换的时候丢失。
|
286
|
-
removed_text_blocks = []
|
287
|
-
removed_text_blocks.extend(removed_hdr_foot_txt_block)
|
288
|
-
# removed_text_blocks.extend(removed_footnote_text_block)
|
289
|
-
removed_text_blocks.extend(text_block_on_image_removed)
|
290
|
-
removed_text_blocks.extend(removed_non_horz_text_block)
|
291
|
-
removed_text_blocks.extend(removed_colored_narrow_strip_background_text_block)
|
292
|
-
|
293
|
-
removed_images = []
|
294
|
-
# removed_images.extend(footnote_imgs)
|
295
|
-
removed_images.extend(removed_hdr_foot_img_block)
|
296
|
-
|
297
|
-
images_backup = []
|
298
|
-
images_backup.extend(image_backup_info)
|
299
|
-
remain_text_blocks = escape_special_markdown_char(remain_text_blocks) # 转义span里的text
|
300
|
-
sorted_text_remain_text_block = sort_text_block(remain_text_blocks, layout_bboxes)
|
301
|
-
|
302
|
-
footnote_bboxes_tmp = []
|
303
|
-
footnote_bboxes_tmp.extend(footnote_bboxes_by_model)
|
304
|
-
footnote_bboxes_tmp.extend(footnote_bboxes_by_rule)
|
305
|
-
|
306
|
-
|
307
|
-
page_info = construct_page_component(
|
308
|
-
page_id,
|
309
|
-
image_info,
|
310
|
-
table_info,
|
311
|
-
sorted_text_remain_text_block,
|
312
|
-
layout_bboxes,
|
313
|
-
inline_eq_info,
|
314
|
-
interline_eq_info,
|
315
|
-
page.get_text("dict", flags=fitz.TEXTFLAGS_TEXT)["blocks"],
|
316
|
-
removed_text_blocks=removed_text_blocks,
|
317
|
-
removed_image_blocks=removed_images,
|
318
|
-
images_backup=images_backup,
|
319
|
-
droped_table_block=[],
|
320
|
-
table_backup=[],
|
321
|
-
layout_tree=layout_tree,
|
322
|
-
page_w=page.rect.width,
|
323
|
-
page_h=page.rect.height,
|
324
|
-
footnote_bboxes_tmp=footnote_bboxes_tmp
|
325
|
-
)
|
326
|
-
pdf_info_dict[f"page_{page_id}"] = page_info
|
327
|
-
|
328
|
-
# end page for
|
329
|
-
|
330
|
-
'''计算后处理阶段耗时'''
|
331
|
-
start_time = time.time()
|
332
|
-
|
333
|
-
"""
|
334
|
-
==================================================================================================================================
|
335
|
-
去掉页眉和页脚,这里需要用到一定的统计量,所以放到最后
|
336
|
-
页眉和页脚主要从文本box和图片box中去除,位于页面的四周。
|
337
|
-
下面函数会直接修改pdf_info_dict,从文字块中、图片中删除属于页眉页脚的内容,删除内容做相对应记录
|
338
|
-
"""
|
339
|
-
# 去页眉页脚
|
340
|
-
header, footer = drop_footer_header(pdf_info_dict)
|
341
|
-
|
342
|
-
"""对单个layout内footnote和他下面的所有textbbox合并"""
|
343
|
-
|
344
|
-
for page_key, page_info in pdf_info_dict.items():
|
345
|
-
page_info = merge_footnote_blocks(page_info, main_text_font)
|
346
|
-
page_info = remove_footnote_blocks(page_info)
|
347
|
-
pdf_info_dict[page_key] = page_info
|
348
|
-
|
349
|
-
"""进入pdf后置过滤器,去掉一些不合理的pdf"""
|
350
|
-
|
351
|
-
i = 0
|
352
|
-
for page_info in pdf_info_dict.values():
|
353
|
-
is_good_pdf, err = pdf_post_filter(page_info)
|
354
|
-
if not is_good_pdf:
|
355
|
-
logger.warning(f"page_id: {i}, drop this pdf: {pdf_bytes_md5}, reason: {err}")
|
356
|
-
if not debug_mode:
|
357
|
-
return err
|
358
|
-
i += 1
|
359
|
-
|
360
|
-
if debug_mode:
|
361
|
-
# 打印后处理阶段耗时
|
362
|
-
logger.info(f"post_processing_time: {get_delta_time(start_time)}")
|
363
|
-
|
364
|
-
"""
|
365
|
-
==================================================================================================================================
|
366
|
-
进入段落处理-2阶段
|
367
|
-
"""
|
368
|
-
|
369
|
-
# 处理行内文字间距较大问题
|
370
|
-
pdf_info_dict = solve_inline_too_large_interval(pdf_info_dict)
|
371
|
-
|
372
|
-
start_time = time.time()
|
373
|
-
|
374
|
-
para_process_pipeline = ParaProcessPipeline()
|
375
|
-
|
376
|
-
def _deal_with_text_exception(error_info):
|
377
|
-
logger.warning(f"page_id: {page_id}, drop this pdf: {pdf_bytes_md5}, reason: {error_info}")
|
378
|
-
if error_info == denseSingleLineBlockException_msg:
|
379
|
-
logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.DENSE_SINGLE_LINE_BLOCK}")
|
380
|
-
result = {"_need_drop": True, "_drop_reason": DropReason.DENSE_SINGLE_LINE_BLOCK}
|
381
|
-
return result
|
382
|
-
if error_info == titleDetectionException_msg:
|
383
|
-
logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TITLE_DETECTION_FAILED}")
|
384
|
-
result = {"_need_drop": True, "_drop_reason": DropReason.TITLE_DETECTION_FAILED}
|
385
|
-
return result
|
386
|
-
elif error_info == titleLevelException_msg:
|
387
|
-
logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.TITLE_LEVEL_FAILED}")
|
388
|
-
result = {"_need_drop": True, "_drop_reason": DropReason.TITLE_LEVEL_FAILED}
|
389
|
-
return result
|
390
|
-
elif error_info == paraSplitException_msg:
|
391
|
-
logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.PARA_SPLIT_FAILED}")
|
392
|
-
result = {"_need_drop": True, "_drop_reason": DropReason.PARA_SPLIT_FAILED}
|
393
|
-
return result
|
394
|
-
elif error_info == paraMergeException_msg:
|
395
|
-
logger.warning(f"Drop this pdf: {pdf_bytes_md5}, reason: {DropReason.PARA_MERGE_FAILED}")
|
396
|
-
result = {"_need_drop": True, "_drop_reason": DropReason.PARA_MERGE_FAILED}
|
397
|
-
return result
|
398
|
-
|
399
|
-
pdf_info_dict, error_info = para_process_pipeline.para_process_pipeline(pdf_info_dict)
|
400
|
-
if error_info is not None:
|
401
|
-
return _deal_with_text_exception(error_info)
|
402
|
-
|
403
|
-
|
404
|
-
"""dict转list"""
|
405
|
-
pdf_info_list = dict_to_list(pdf_info_dict)
|
406
|
-
new_pdf_info_dict = {
|
407
|
-
"pdf_info": pdf_info_list,
|
408
|
-
}
|
409
|
-
|
410
|
-
return new_pdf_info_dict
|
12
|
+
return pdf_parse_union(pdf_bytes,
|
13
|
+
model_list,
|
14
|
+
imageWriter,
|
15
|
+
"txt",
|
16
|
+
start_page_id=start_page_id,
|
17
|
+
end_page_id=end_page_id,
|
18
|
+
debug_mode=debug_mode,
|
19
|
+
)
|
@@ -299,7 +299,7 @@ def replace_line_v2(eqinfo, line):
|
|
299
299
|
[b["bbox"][2] for b in delete_chars]
|
300
300
|
)
|
301
301
|
else:
|
302
|
-
logger.debug(f"行内公式替换没有发生,尝试下一行匹配, eqinfo={eqinfo}")
|
302
|
+
# logger.debug(f"行内公式替换没有发生,尝试下一行匹配, eqinfo={eqinfo}")
|
303
303
|
return False
|
304
304
|
|
305
305
|
# 删除位于x0, x1这两个中间的span
|
magic_pdf/user_api.py
CHANGED
@@ -18,8 +18,8 @@ from loguru import logger
|
|
18
18
|
from magic_pdf.libs.version import __version__
|
19
19
|
from magic_pdf.model.doc_analyze_by_custom_model import doc_analyze
|
20
20
|
from magic_pdf.rw import AbsReaderWriter
|
21
|
-
from magic_pdf.
|
22
|
-
from magic_pdf.
|
21
|
+
from magic_pdf.pdf_parse_by_ocr import parse_pdf_by_ocr
|
22
|
+
from magic_pdf.pdf_parse_by_txt import parse_pdf_by_txt
|
23
23
|
|
24
24
|
PARSE_TYPE_TXT = "txt"
|
25
25
|
PARSE_TYPE_OCR = "ocr"
|
@@ -86,45 +86,7 @@ def parse_union_pdf(pdf_bytes: bytes, pdf_models: list, imageWriter: AbsReaderWr
|
|
86
86
|
return None
|
87
87
|
|
88
88
|
pdf_info_dict = parse_pdf(parse_pdf_by_txt)
|
89
|
-
|
90
|
-
# for page_dict in pdf_info_dict['pdf_info']:
|
91
|
-
# for para_block in page_dict['para_blocks']:
|
92
|
-
# if para_block['type'] in ['title', 'text']:
|
93
|
-
# for line in para_block['lines']:
|
94
|
-
# for span in line['spans']:
|
95
|
-
# text_all += span['content']
|
96
|
-
|
97
|
-
# def calculate_not_common_character_rate(text):
|
98
|
-
# garbage_regex = re.compile(r'[^\u4e00-\u9fa5\u0030-\u0039\u0041-\u005a\u0061-\u007a\u3000-\u303f\uff00-\uffef]')
|
99
|
-
# # 计算乱码字符的数量
|
100
|
-
# garbage_count = len(garbage_regex.findall(text))
|
101
|
-
# total = len(text)
|
102
|
-
# if total == 0:
|
103
|
-
# return 0 # 避免除以零的错误
|
104
|
-
# return garbage_count / total
|
105
|
-
#
|
106
|
-
# def calculate_not_printable_rate(text):
|
107
|
-
# printable_text = ""
|
108
|
-
# for c in text:
|
109
|
-
# if c.isprintable():
|
110
|
-
# printable_text += c
|
111
|
-
# printable_total = len(printable_text)
|
112
|
-
# total = len(text)
|
113
|
-
# if total == 0:
|
114
|
-
# return 0 # 避免除以零的错误
|
115
|
-
# return (total - printable_total) / total
|
116
|
-
#
|
117
|
-
# not_common_character_rate = calculate_not_common_character_rate(text_all)
|
118
|
-
# not_printable_rate = calculate_not_printable_rate(text_all)
|
119
|
-
# pdf_info_dict["_not_common_character_rate"] = not_common_character_rate
|
120
|
-
# pdf_info_dict["_not_printable_rate"] = not_printable_rate
|
121
|
-
# logger.info(f"not_common_character_rate: {not_common_character_rate}, not_printable_rate: {not_printable_rate}")
|
122
|
-
'''新逻辑使用pdfminer识别乱码pdf,准确率高且不会误伤,已在解析流程之前进行处理'''
|
123
|
-
# not_common_character_rate对小语种可能会有误伤,not_printable_rate对小语种较为友好
|
124
|
-
if (pdf_info_dict is None
|
125
|
-
or pdf_info_dict.get("_need_drop", False)
|
126
|
-
# or not_printable_rate > 0.02 # 参考一些正常的pdf,这个值没有超过0.01的,阈值设为0.02
|
127
|
-
):
|
89
|
+
if pdf_info_dict is None or pdf_info_dict.get("_need_drop", False):
|
128
90
|
logger.warning(f"parse_pdf_by_txt drop or error, switch to parse_pdf_by_ocr")
|
129
91
|
if input_model_is_empty:
|
130
92
|
pdf_models = doc_analyze(pdf_bytes, ocr=True)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: magic-pdf
|
3
|
-
Version: 0.5.
|
3
|
+
Version: 0.5.10
|
4
4
|
Summary: A practical tool for converting PDF to Markdown
|
5
5
|
Home-page: https://github.com/magicpdf/Magic-PDF
|
6
6
|
Requires-Python: >=3.9
|
@@ -72,20 +72,57 @@ Key features include:
|
|
72
72
|
|
73
73
|
### Usage Instructions
|
74
74
|
|
75
|
-
1.
|
76
|
-
|
75
|
+
#### 1. Install Magic-PDF
|
77
76
|
```bash
|
78
|
-
pip install magic-pdf
|
79
|
-
or
|
80
|
-
pip install magic-pdf[gpu] # Install the GPU version
|
77
|
+
pip install magic-pdf
|
81
78
|
```
|
82
79
|
|
83
|
-
2.
|
80
|
+
#### 2. Usage via Command Line
|
84
81
|
|
82
|
+
###### simple
|
83
|
+
```bash
|
84
|
+
cp magic-pdf.template.json to ~/magic-pdf.json
|
85
|
+
magic-pdf pdf-command --pdf "pdf_path" --model "model_json_path"
|
86
|
+
```
|
87
|
+
###### more
|
85
88
|
```bash
|
86
89
|
magic-pdf --help
|
87
90
|
```
|
88
91
|
|
92
|
+
#### 3. Usage via Api
|
93
|
+
|
94
|
+
###### Local
|
95
|
+
```python
|
96
|
+
image_writer = DiskReaderWriter(local_image_dir)
|
97
|
+
image_dir = str(os.path.basename(local_image_dir))
|
98
|
+
jso_useful_key = {"_pdf_type": "", "model_list": model_json}
|
99
|
+
pipe = UNIPipe(pdf_bytes, jso_useful_key, image_writer)
|
100
|
+
pipe.pipe_classify()
|
101
|
+
pipe.pipe_parse()
|
102
|
+
md_content = pipe.pipe_mk_markdown(image_dir, drop_mode="none")
|
103
|
+
```
|
104
|
+
|
105
|
+
###### Object Storage
|
106
|
+
```python
|
107
|
+
s3pdf_cli = S3ReaderWriter(pdf_ak, pdf_sk, pdf_endpoint)
|
108
|
+
image_dir = "s3://img_bucket/"
|
109
|
+
s3image_cli = S3ReaderWriter(img_ak, img_sk, img_endpoint, parent_path=image_dir)
|
110
|
+
pdf_bytes = s3pdf_cli.read(s3_pdf_path, mode=s3pdf_cli.MODE_BIN)
|
111
|
+
jso_useful_key = {"_pdf_type": "", "model_list": model_json}
|
112
|
+
pipe = UNIPipe(pdf_bytes, jso_useful_key, s3image_cli)
|
113
|
+
pipe.pipe_classify()
|
114
|
+
pipe.pipe_parse()
|
115
|
+
md_content = pipe.pipe_mk_markdown(image_dir, drop_mode="none")
|
116
|
+
```
|
117
|
+
|
118
|
+
Demo can be referred to [demo.py](https://github.com/magicpdf/Magic-PDF/blob/master/demo/demo.py)
|
119
|
+
|
120
|
+
## All Thanks To Our Contributors
|
121
|
+
|
122
|
+
<a href="https://github.com/magicpdf/Magic-PDF/graphs/contributors">
|
123
|
+
<img src="https://contrib.rocks/image?repo=magicpdf/Magic-PDF" />
|
124
|
+
</a>
|
125
|
+
|
89
126
|
## License Information
|
90
127
|
|
91
128
|
See [LICENSE.md](https://github.com/magicpdf/Magic-PDF/blob/master/LICENSE.md) for details.
|
@@ -1,13 +1,11 @@
|
|
1
1
|
magic_pdf/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
|
-
magic_pdf/pdf_parse_by_ocr.py,sha256=
|
3
|
-
magic_pdf/
|
4
|
-
magic_pdf/pdf_parse_by_txt.py,sha256=5_kdfvDkv_XwDove2AW7SopGysYLJ1-tsOQy2yuII1Y,21932
|
5
|
-
magic_pdf/pdf_parse_by_txt_v2.py,sha256=mGadyYamoCNGNsKOQM1uXQR65zMUKyL24yURGHADmVs,1908
|
2
|
+
magic_pdf/pdf_parse_by_ocr.py,sha256=IWnSWt1Z-d35xRqspzdLR2iUtma_SAu4W7K4kEk8SHc,638
|
3
|
+
magic_pdf/pdf_parse_by_txt.py,sha256=KUSH7Gh83CZmdyWw59pqDskwyJ2Kg-jU-9fnQGJQEs4,537
|
6
4
|
magic_pdf/pdf_parse_for_train.py,sha256=Oby61DMjJ716Jj_ri7lwXfv2Chus0pbBR2RPXrmBW08,28661
|
7
5
|
magic_pdf/pdf_parse_union_core.py,sha256=a67iQuEfuslAEF-wQplGZKXUuz5mT3HiCyvuR52E6Gw,10584
|
8
|
-
magic_pdf/user_api.py,sha256=
|
6
|
+
magic_pdf/user_api.py,sha256=CVQH-VSiZpz0bSkyMT4czk1epZriIPSJsLsPbluPa9Q,3054
|
9
7
|
magic_pdf/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
|
-
magic_pdf/cli/magicpdf.py,sha256=
|
8
|
+
magic_pdf/cli/magicpdf.py,sha256=FF6flO6wUcKG9Qx_FG6-xhHfmQzQWLjwrkMa5kowDgs,10937
|
11
9
|
magic_pdf/dict2md/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
12
10
|
magic_pdf/dict2md/mkcontent.py,sha256=rWUY-2opd0jeowEUEVOV_uWcKum1Q7ng4nOoT6-ka_s,17459
|
13
11
|
magic_pdf/dict2md/ocr_mkcontent.py,sha256=RyxebPtvFfNce_HCa-_YGxwFx_srzL-BfMKc85V9JG0,15442
|
@@ -46,7 +44,7 @@ magic_pdf/libs/pdf_check.py,sha256=MAe8wzwT0qvPf_I72wEZG7k1g4haNHS7oUtLqkB5rlE,2
|
|
46
44
|
magic_pdf/libs/pdf_image_tools.py,sha256=CAd01giTKr_UJz1_QtDOARG9G9z69GFpzRZwcWSfLtE,1282
|
47
45
|
magic_pdf/libs/safe_filename.py,sha256=ckwcM_eqoysTb5id8czp-tXq2G9da0-l3pshZDCHQtE,236
|
48
46
|
magic_pdf/libs/textbase.py,sha256=SC1Frhz3Fb7V7n2SFRBsl7Bmg0JZdlvZskq0lfW1vIk,732
|
49
|
-
magic_pdf/libs/version.py,sha256=
|
47
|
+
magic_pdf/libs/version.py,sha256=1nlPInsRzDbcDPveZ3ghSJ6v6KveN9n6gnj-twW4DkI,23
|
50
48
|
magic_pdf/libs/vis_utils.py,sha256=hTOTEakKV0pGMbk0tbRkVI_tku7A3dGc96ynObZ4kwI,10207
|
51
49
|
magic_pdf/model/360_layout_analysis.py,sha256=GbchKPJRVcrxvwNXMnR4vt8lOLPauTWMl-43ayyhX7U,221
|
52
50
|
magic_pdf/model/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -64,7 +62,7 @@ magic_pdf/para/exceptions.py,sha256=kpjGxrSZ-drNmoKlmuQ0asTjI8cKKKWsdDDBoDHQP9M,
|
|
64
62
|
magic_pdf/para/layout_match_processor.py,sha256=yr4FEO7GJ502udShqGRqIJQ_FQxoa0aG_mhmWd8nLwI,1554
|
65
63
|
magic_pdf/para/para_pipeline.py,sha256=zLaCHI9jLi1UPzh0lHP44mUjpKVTHS0gE_5YrkjVqEY,11796
|
66
64
|
magic_pdf/para/para_split.py,sha256=-UJM2jREW_2h3ZlJAU7dRD8bK3CMGKuhJrfgqv3Auvk,31310
|
67
|
-
magic_pdf/para/para_split_v2.py,sha256=
|
65
|
+
magic_pdf/para/para_split_v2.py,sha256=jGOhsubdh_CEgSv9WMNmp1loq1YNlpcAj3yh3g0gPhw,37027
|
68
66
|
magic_pdf/para/raw_processor.py,sha256=mHxD9FrdOSXH7NqM41s55URyCyuyACvm9kKtowkIb3k,6317
|
69
67
|
magic_pdf/para/stats.py,sha256=-6Pf9Y8jkP1uJOYWiHUjw9Lb-Fb9GY7MHr_ok7x2GX0,9731
|
70
68
|
magic_pdf/para/title_processor.py,sha256=pYZv9vEkIjAtCz8jIUtl9AVUy_ib5SdAZmMVoZtsMRI,38593
|
@@ -89,7 +87,7 @@ magic_pdf/pre_proc/detect_header.py,sha256=KOmRehgKMuMqNa_2weXkdNSiRVWMFgLMQE4e1
|
|
89
87
|
magic_pdf/pre_proc/detect_images.py,sha256=8DwGGTb5IjxqADZDTc_ngwJrTYXxK2qpRqI2FBoPr00,30432
|
90
88
|
magic_pdf/pre_proc/detect_page_number.py,sha256=qvYrBbCtBbREvw-MySL_p7byCRvcm1fkLJ5ZB4TP8OM,2848
|
91
89
|
magic_pdf/pre_proc/detect_tables.py,sha256=srJzgLVeVuOsqnESqfdJfVukTF84K8qmI5mgFX_BZGs,2800
|
92
|
-
magic_pdf/pre_proc/equations_replace.py,sha256=
|
90
|
+
magic_pdf/pre_proc/equations_replace.py,sha256=fXj7ZV7F3YtkDYrAhE9g5tHk4_3pVUyLbhDtMjbxjWU,20386
|
93
91
|
magic_pdf/pre_proc/fix_image.py,sha256=5MOfkXc8abfIp49g-68vll40wwTUZ5tcQ2gtsJuFmvs,11486
|
94
92
|
magic_pdf/pre_proc/fix_table.py,sha256=20sqJe27fAXcL7_C0qQ9mpsggmH37WuX-wPYWyRgACA,13227
|
95
93
|
magic_pdf/pre_proc/main_text_font.py,sha256=1gkjvPuBdKC4oVFkLvnRm2zghsLtVlfAEMKXouyVonM,1048
|
@@ -117,9 +115,9 @@ magic_pdf/train_utils/convert_to_train_format.py,sha256=ifo2FAoBMa_etCvz0O4v03xO
|
|
117
115
|
magic_pdf/train_utils/extract_caption.py,sha256=gommEqIEWLplSDEJWD7_66daqlOBsWhpRBW1DHpkny4,1825
|
118
116
|
magic_pdf/train_utils/remove_footer_header.py,sha256=pyeNNdJ-th3wl5Xwb10ZLYNaFN4-6BmahoMFE8VTNNs,5978
|
119
117
|
magic_pdf/train_utils/vis_utils.py,sha256=MV9N9cT3ifJ35u7LFKGF9I_bOIQrtU1zcsxu2hj3aqM,10111
|
120
|
-
magic_pdf-0.5.
|
121
|
-
magic_pdf-0.5.
|
122
|
-
magic_pdf-0.5.
|
123
|
-
magic_pdf-0.5.
|
124
|
-
magic_pdf-0.5.
|
125
|
-
magic_pdf-0.5.
|
118
|
+
magic_pdf-0.5.10.dist-info/LICENSE.md,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
|
119
|
+
magic_pdf-0.5.10.dist-info/METADATA,sha256=B3e0sVOyFhk47EfHPuLFRUNxzdasWYx3XuYR53LSJX8,4175
|
120
|
+
magic_pdf-0.5.10.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
121
|
+
magic_pdf-0.5.10.dist-info/entry_points.txt,sha256=NbSkSmE08UuTwdoJD8Uofq8iyufySA4x7jmIIk4YCzI,57
|
122
|
+
magic_pdf-0.5.10.dist-info/top_level.txt,sha256=J9I0AzmHWGkp9c6DL8Oe4mEx3yYphLzkRn4H25Lg1rE,10
|
123
|
+
magic_pdf-0.5.10.dist-info/RECORD,,
|
magic_pdf/pdf_parse_by_ocr_v2.py
DELETED
@@ -1,17 +0,0 @@
|
|
1
|
-
from magic_pdf.pdf_parse_union_core import pdf_parse_union
|
2
|
-
|
3
|
-
def parse_pdf_by_ocr(pdf_bytes,
|
4
|
-
model_list,
|
5
|
-
imageWriter,
|
6
|
-
start_page_id=0,
|
7
|
-
end_page_id=None,
|
8
|
-
debug_mode=False,
|
9
|
-
):
|
10
|
-
return pdf_parse_union(pdf_bytes,
|
11
|
-
model_list,
|
12
|
-
imageWriter,
|
13
|
-
"ocr",
|
14
|
-
start_page_id=start_page_id,
|
15
|
-
end_page_id=end_page_id,
|
16
|
-
debug_mode=debug_mode,
|
17
|
-
)
|
magic_pdf/pdf_parse_by_txt_v2.py
DELETED
@@ -1,56 +0,0 @@
|
|
1
|
-
from magic_pdf.pdf_parse_union_core import pdf_parse_union
|
2
|
-
|
3
|
-
|
4
|
-
def parse_pdf_by_txt(
|
5
|
-
pdf_bytes,
|
6
|
-
model_list,
|
7
|
-
imageWriter,
|
8
|
-
start_page_id=0,
|
9
|
-
end_page_id=None,
|
10
|
-
debug_mode=False,
|
11
|
-
):
|
12
|
-
return pdf_parse_union(pdf_bytes,
|
13
|
-
model_list,
|
14
|
-
imageWriter,
|
15
|
-
"txt",
|
16
|
-
start_page_id=start_page_id,
|
17
|
-
end_page_id=end_page_id,
|
18
|
-
debug_mode=debug_mode,
|
19
|
-
)
|
20
|
-
|
21
|
-
|
22
|
-
if __name__ == "__main__":
|
23
|
-
pass
|
24
|
-
# if 1:
|
25
|
-
# import fitz
|
26
|
-
# import json
|
27
|
-
#
|
28
|
-
# with open("/opt/data/pdf/20240418/25536-00.pdf", "rb") as f:
|
29
|
-
# pdf_bytes = f.read()
|
30
|
-
# pdf_docs = fitz.open("pdf", pdf_bytes)
|
31
|
-
#
|
32
|
-
# with open("/opt/data/pdf/20240418/25536-00.json") as f:
|
33
|
-
# model_list = json.loads(f.readline())
|
34
|
-
#
|
35
|
-
# magic_model = MagicModel(model_list, pdf_docs)
|
36
|
-
# for i in range(7):
|
37
|
-
# print(magic_model.get_imgs(i))
|
38
|
-
#
|
39
|
-
# for page_no, page in enumerate(pdf_docs):
|
40
|
-
# inline_equations, interline_equations, interline_equation_blocks = (
|
41
|
-
# magic_model.get_equations(page_no)
|
42
|
-
# )
|
43
|
-
#
|
44
|
-
# text_raw_blocks = page.get_text("dict", flags=fitz.TEXTFLAGS_TEXT)["blocks"]
|
45
|
-
# char_level_text_blocks = page.get_text(
|
46
|
-
# "rawdict", flags=fitz.TEXTFLAGS_TEXT
|
47
|
-
# )["blocks"]
|
48
|
-
# text_blocks = combine_chars_to_pymudict(
|
49
|
-
# text_raw_blocks, char_level_text_blocks
|
50
|
-
# )
|
51
|
-
# text_blocks = replace_equations_in_textblock(
|
52
|
-
# text_blocks, inline_equations, interline_equations
|
53
|
-
# )
|
54
|
-
# text_blocks = remove_citation_marker(text_blocks)
|
55
|
-
#
|
56
|
-
# text_blocks = remove_chars_in_text_blocks(text_blocks)
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|