magic-pdf 0.5.9__py3-none-any.whl → 0.5.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
magic_pdf/cli/magicpdf.py CHANGED
@@ -17,13 +17,12 @@
17
17
 
18
18
 
19
19
  效果:
20
- python magicpdf.py --json s3://llm-pdf-text/scihub/xxxx.json?bytes=0,81350
21
- python magicpdf.py --pdf /home/llm/Downloads/xxxx.pdf --model /home/llm/Downloads/xxxx.json 或者 python magicpdf.py --pdf /home/llm/Downloads/xxxx.pdf
20
+ python magicpdf.py json-command --json s3://llm-pdf-text/scihub/xxxx.json?bytes=0,81350
21
+ python magicpdf.py pdf-command --pdf /home/llm/Downloads/xxxx.pdf --model /home/llm/Downloads/xxxx.json 或者 python magicpdf.py --pdf /home/llm/Downloads/xxxx.pdf
22
22
  """
23
23
 
24
24
  import os
25
25
  import json as json_parse
26
- import sys
27
26
  import click
28
27
  from loguru import logger
29
28
  from pathlib import Path
@@ -45,6 +44,8 @@ from magic_pdf.rw.S3ReaderWriter import S3ReaderWriter
45
44
  from magic_pdf.rw.DiskReaderWriter import DiskReaderWriter
46
45
  from magic_pdf.rw.AbsReaderWriter import AbsReaderWriter
47
46
  import csv
47
+ import copy
48
+ import magic_pdf.model as model_config
48
49
 
49
50
  parse_pdf_methods = click.Choice(["ocr", "txt", "auto"])
50
51
 
@@ -65,26 +66,27 @@ def write_to_csv(csv_file_path, csv_data):
65
66
  csv_writer = csv.writer(csvfile)
66
67
  # 写入数据
67
68
  csv_writer.writerow(csv_data)
68
- print(f"数据已成功追加到 '{csv_file_path}'")
69
+ logger.info(f"数据已成功追加到 '{csv_file_path}'")
69
70
 
70
71
 
71
72
  def do_parse(
72
- pdf_file_name,
73
- pdf_bytes,
74
- model_list,
75
- parse_method,
76
- f_draw_span_bbox=True,
77
- f_draw_layout_bbox=True,
78
- f_dump_md=True,
79
- f_dump_middle_json=True,
80
- f_dump_model_json=True,
81
- f_dump_orig_pdf=True,
82
- f_dump_content_list=True,
73
+ pdf_file_name,
74
+ pdf_bytes,
75
+ model_list,
76
+ parse_method,
77
+ f_draw_span_bbox=True,
78
+ f_draw_layout_bbox=True,
79
+ f_dump_md=True,
80
+ f_dump_middle_json=True,
81
+ f_dump_model_json=True,
82
+ f_dump_orig_pdf=True,
83
+ f_dump_content_list=True,
83
84
  ):
85
+ orig_model_list = copy.deepcopy(model_list)
84
86
 
85
87
  local_image_dir, local_md_dir = prepare_env(pdf_file_name, parse_method)
86
88
  image_writer, md_writer = DiskReaderWriter(local_image_dir), DiskReaderWriter(local_md_dir)
87
- image_dir = (os.path.basename(local_image_dir),)
89
+ image_dir = str(os.path.basename(local_image_dir))
88
90
 
89
91
  if parse_method == "auto":
90
92
  jso_useful_key = {"_pdf_type": "", "model_list": model_list}
@@ -94,14 +96,18 @@ def do_parse(
94
96
  elif parse_method == "ocr":
95
97
  pipe = OCRPipe(pdf_bytes, model_list, image_writer, is_debug=True)
96
98
  else:
97
- print("unknown parse method")
98
- sys.exit(1)
99
+ logger.error("unknown parse method")
100
+ exit(1)
99
101
 
100
102
  pipe.pipe_classify()
101
103
 
102
- """如果没有传入有效的模型数据,则使用内置paddle解析"""
104
+ """如果没有传入有效的模型数据,则使用内置model解析"""
103
105
  if len(model_list) == 0:
104
- pipe.pipe_analyze()
106
+ if model_config.__use_inside_model__:
107
+ pipe.pipe_analyze()
108
+ else:
109
+ logger.error("need model list input")
110
+ exit(1)
105
111
 
106
112
  pipe.pipe_parse()
107
113
  pdf_info = pipe.pdf_mid_data["pdf_info"]
@@ -110,10 +116,7 @@ def do_parse(
110
116
  if f_draw_span_bbox:
111
117
  draw_span_bbox(pdf_info, pdf_bytes, local_md_dir)
112
118
 
113
- # write_to_csv(r"D:\project\20231108code-clean\linshixuqiu\pdf_dev\新模型\新建文件夹\luanma.csv",
114
- # [pdf_file_name, pipe.pdf_mid_data['not_common_character_rate'], pipe.pdf_mid_data['not_printable_rate']])
115
-
116
- md_content = pipe.pipe_mk_markdown(str(image_dir), drop_mode=DropMode.NONE)
119
+ md_content = pipe.pipe_mk_markdown(image_dir, drop_mode=DropMode.NONE)
117
120
  if f_dump_md:
118
121
  """写markdown"""
119
122
  md_writer.write(
@@ -133,7 +136,7 @@ def do_parse(
133
136
  if f_dump_model_json:
134
137
  """写model_json"""
135
138
  md_writer.write(
136
- content=json_parse.dumps(pipe.model_list, ensure_ascii=False, indent=4),
139
+ content=json_parse.dumps(orig_model_list, ensure_ascii=False, indent=4),
137
140
  path=f"{pdf_file_name}_model.json",
138
141
  mode=AbsReaderWriter.MODE_TXT,
139
142
  )
@@ -146,7 +149,7 @@ def do_parse(
146
149
  mode=AbsReaderWriter.MODE_BIN,
147
150
  )
148
151
 
149
- content_list = pipe.pipe_mk_uni_format(str(image_dir), drop_mode=DropMode.NONE)
152
+ content_list = pipe.pipe_mk_uni_format(image_dir, drop_mode=DropMode.NONE)
150
153
  if f_dump_content_list:
151
154
  """写content_list"""
152
155
  md_writer.write(
@@ -171,10 +174,13 @@ def cli():
171
174
  help="指定解析方法。txt: 文本型 pdf 解析方法, ocr: 光学识别解析 pdf, auto: 程序智能选择解析方法",
172
175
  default="auto",
173
176
  )
174
- def json_command(json, method):
177
+ @click.option("--inside_model", type=click.BOOL, default=False, help="使用内置模型测试")
178
+ def json_command(json, method, inside_model):
179
+ model_config.__use_inside_model__ = inside_model
180
+
175
181
  if not json.startswith("s3://"):
176
- print("usage: python magipdf.py --json s3://some_bucket/some_path")
177
- sys.exit(1)
182
+ logger.error("usage: magic-pdf json-command --json s3://some_bucket/some_path")
183
+ exit(1)
178
184
 
179
185
  def read_s3_path(s3path):
180
186
  bucket, key = parse_s3path(s3path)
@@ -219,7 +225,10 @@ def json_command(json, method):
219
225
  help="指定解析方法。txt: 文本型 pdf 解析方法, ocr: 光学识别解析 pdf, auto: 程序智能选择解析方法",
220
226
  default="auto",
221
227
  )
222
- def local_json_command(local_json, method):
228
+ @click.option("--inside_model", type=click.BOOL, default=False, help="使用内置模型测试")
229
+ def local_json_command(local_json, method, inside_model):
230
+ model_config.__use_inside_model__ = inside_model
231
+
223
232
  def read_s3_path(s3path):
224
233
  bucket, key = parse_s3path(s3path)
225
234
 
@@ -268,7 +277,10 @@ def local_json_command(local_json, method):
268
277
  help="指定解析方法。txt: 文本型 pdf 解析方法, ocr: 光学识别解析 pdf, auto: 程序智能选择解析方法",
269
278
  default="auto",
270
279
  )
271
- def pdf_command(pdf, model, method):
280
+ @click.option("--inside_model", type=click.BOOL, default=False, help="使用内置模型测试")
281
+ def pdf_command(pdf, model, method, inside_model):
282
+ model_config.__use_inside_model__ = inside_model
283
+
272
284
  def read_fn(path):
273
285
  disk_rw = DiskReaderWriter(os.path.dirname(path))
274
286
  return disk_rw.read(os.path.basename(path), AbsReaderWriter.MODE_BIN)
@@ -281,7 +293,7 @@ def pdf_command(pdf, model, method):
281
293
  model_path = pdf.replace(".pdf", ".json")
282
294
  if not os.path.exists(model_path):
283
295
  logger.warning(
284
- f"not found json {model_path} existed, use paddle analyze"
296
+ f"not found json {model_path} existed"
285
297
  )
286
298
  # 本地无模型数据则调用内置paddle分析,先传空list,在内部识别到空list再调用paddle
287
299
  model_json = "[]"
magic_pdf/libs/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.5.9"
1
+ __version__ = "0.5.11"
@@ -0,0 +1 @@
1
+ __use_inside_model__ = False
@@ -2,9 +2,10 @@ import fitz
2
2
  import cv2
3
3
  from PIL import Image
4
4
  import numpy as np
5
+ from loguru import logger
5
6
 
6
7
  from magic_pdf.model.model_list import MODEL
7
- from magic_pdf.model.pp_structure_v2 import CustomPaddleModel
8
+ import magic_pdf.model as model_config
8
9
 
9
10
 
10
11
  def dict_compare(d1, d2):
@@ -41,6 +42,13 @@ def load_images_from_pdf(pdf_bytes: bytes, dpi=200) -> list:
41
42
 
42
43
 
43
44
  def doc_analyze(pdf_bytes: bytes, ocr: bool = False, show_log: bool = False, model=MODEL.Paddle):
45
+
46
+ if model_config.__use_inside_model__:
47
+ from magic_pdf.model.pp_structure_v2 import CustomPaddleModel
48
+ else:
49
+ logger.error("use_inside_model is False, not allow to use inside model")
50
+ exit(1)
51
+
44
52
  images = load_images_from_pdf(pdf_bytes)
45
53
  custom_model = None
46
54
  if model == MODEL.Paddle:
@@ -1,7 +1,12 @@
1
1
  import random
2
2
 
3
3
  from loguru import logger
4
- from paddleocr import PPStructure
4
+
5
+ try:
6
+ from paddleocr import PPStructure
7
+ except ImportError:
8
+ logger.error('paddleocr not installed, please install by "pip install magic-pdf[cpu]" or "pip install magic-pdf[gpu]"')
9
+ exit(1)
5
10
 
6
11
 
7
12
  def region_to_bbox(region):
@@ -11,6 +11,7 @@ LINE_STOP_FLAG = ['.', '!', '?', '。', '!', '?', ":", ":", ")", ")", ";
11
11
  INLINE_EQUATION = ContentType.InlineEquation
12
12
  INTERLINE_EQUATION = ContentType.InterlineEquation
13
13
  TEXT = ContentType.Text
14
+ debug_able = False
14
15
 
15
16
 
16
17
  def __get_span_text(span):
@@ -22,6 +23,7 @@ def __get_span_text(span):
22
23
 
23
24
 
24
25
  def __detect_list_lines(lines, new_layout_bboxes, lang):
26
+ global debug_able
25
27
  """
26
28
  探测是否包含了列表,并且把列表的行分开.
27
29
  这样的段落特点是,顶格字母大写/数字,紧跟着几行缩进的。缩进的行首字母含小写的。
@@ -41,12 +43,14 @@ def __detect_list_lines(lines, new_layout_bboxes, lang):
41
43
  if lst[i] == 1:
42
44
  ones_in_this_interval.append(i)
43
45
  i += 1
44
- if len(ones_in_this_interval) > 1 or (start < len(lst) - 1 and ones_in_this_interval and lst[start + 1] in [2, 3]):
46
+ if len(ones_in_this_interval) > 1 or (
47
+ start < len(lst) - 1 and ones_in_this_interval and lst[start + 1] in [2, 3]):
45
48
  indices.append((start, i - 1))
46
49
  ones_indices.append(ones_in_this_interval)
47
50
  else:
48
51
  i += 1
49
52
  return indices, ones_indices
53
+
50
54
  def find_repeating_patterns(lst):
51
55
  indices = []
52
56
  ones_indices = []
@@ -132,7 +136,8 @@ def __detect_list_lines(lines, new_layout_bboxes, lang):
132
136
 
133
137
  list_indice, list_start_idx = find_repeating_patterns2(line_fea_encode)
134
138
  if len(list_indice) > 0:
135
- logger.info(f"发现了列表,列表行数:{list_indice}, {list_start_idx}")
139
+ if debug_able:
140
+ logger.info(f"发现了列表,列表行数:{list_indice}, {list_start_idx}")
136
141
 
137
142
  # TODO check一下这个特列表里缩进的行左侧是不是对齐的。
138
143
  segments = []
@@ -140,13 +145,16 @@ def __detect_list_lines(lines, new_layout_bboxes, lang):
140
145
  for i in range(start, end + 1):
141
146
  if i > 0:
142
147
  if line_fea_encode[i] == 4:
143
- logger.info(f"列表行的第{i}行不是顶格的")
148
+ if debug_able:
149
+ logger.info(f"列表行的第{i}行不是顶格的")
144
150
  break
145
151
  else:
146
- logger.info(f"列表行的第{start}到第{end}行是列表")
152
+ if debug_able:
153
+ logger.info(f"列表行的第{start}到第{end}行是列表")
147
154
 
148
155
  return split_indices(total_lines, list_indice), list_start_idx
149
156
 
157
+
150
158
  def cluster_line_x(lines: list) -> dict:
151
159
  """
152
160
  对一个block内所有lines的bbox的x0聚类
@@ -170,6 +178,7 @@ def cluster_line_x(lines: list) -> dict:
170
178
  min_x0 = x0_new_val
171
179
  return x0_2_new_val, min_x0
172
180
 
181
+
173
182
  def if_match_reference_list(text: str) -> bool:
174
183
  pattern = re.compile(r'^\d+\..*')
175
184
  if pattern.match(text):
@@ -190,7 +199,8 @@ def __valign_lines(blocks, layout_bboxes):
190
199
  new_layout_bboxes = []
191
200
 
192
201
  for layout_box in layout_bboxes:
193
- blocks_in_layoutbox = [b for b in blocks if b["type"] == BlockType.Text and is_in_layout(b['bbox'], layout_box['layout_bbox'])]
202
+ blocks_in_layoutbox = [b for b in blocks if
203
+ b["type"] == BlockType.Text and is_in_layout(b['bbox'], layout_box['layout_bbox'])]
194
204
  if len(blocks_in_layoutbox) == 0 or len(blocks_in_layoutbox[0]["lines"]) == 0:
195
205
  new_layout_bboxes.append(layout_box['layout_bbox'])
196
206
  continue
@@ -253,7 +263,8 @@ def __align_text_in_layout(blocks, layout_bboxes):
253
263
  """
254
264
  for layout in layout_bboxes:
255
265
  lb = layout['layout_bbox']
256
- blocks_in_layoutbox = [block for block in blocks if block["type"] == BlockType.Text and is_in_layout(block['bbox'], lb)]
266
+ blocks_in_layoutbox = [block for block in blocks if
267
+ block["type"] == BlockType.Text and is_in_layout(block['bbox'], lb)]
257
268
  if len(blocks_in_layoutbox) == 0:
258
269
  continue
259
270
 
@@ -383,7 +394,6 @@ def __split_para_lines(lines: list, text_blocks: list) -> list:
383
394
  continue
384
395
  text_lines.append(line)
385
396
 
386
-
387
397
  for block in text_blocks:
388
398
  block_bbox = block["bbox"]
389
399
  para = []
@@ -394,14 +404,12 @@ def __split_para_lines(lines: list, text_blocks: list) -> list:
394
404
  if len(para) > 0:
395
405
  text_paras.append(para)
396
406
  paras = other_paras.extend(text_paras)
397
- paras_sorted = sorted(paras, key = lambda x: x[0]["bbox"][1])
407
+ paras_sorted = sorted(paras, key=lambda x: x[0]["bbox"][1])
398
408
  return paras_sorted
399
409
 
400
410
 
401
-
402
-
403
-
404
411
  def __connect_list_inter_layout(blocks_group, new_layout_bbox, layout_list_info, page_num, lang):
412
+ global debug_able
405
413
  """
406
414
  如果上个layout的最后一个段落是列表,下一个layout的第一个段落也是列表,那么将他们连接起来。 TODO 因为没有区分列表和段落,所以这个方法暂时不实现。
407
415
  根据layout_list_info判断是不是列表。,下个layout的第一个段如果不是列表,那么看他们是否有几行都有相同的缩进。
@@ -410,7 +418,7 @@ def __connect_list_inter_layout(blocks_group, new_layout_bbox, layout_list_info,
410
418
  return blocks_group, [False, False]
411
419
 
412
420
  for i in range(1, len(blocks_group)):
413
- if len(blocks_group[i]) == 0 or len(blocks_group[i-1]) == 0:
421
+ if len(blocks_group[i]) == 0 or len(blocks_group[i - 1]) == 0:
414
422
  continue
415
423
  pre_layout_list_info = layout_list_info[i - 1]
416
424
  next_layout_list_info = layout_list_info[i]
@@ -418,8 +426,10 @@ def __connect_list_inter_layout(blocks_group, new_layout_bbox, layout_list_info,
418
426
  next_paras = blocks_group[i]
419
427
  next_first_para = next_paras[0]
420
428
 
421
- if pre_layout_list_info[1] and not next_layout_list_info[0] and next_first_para["type"] == BlockType.Text: # 前一个是列表结尾,后一个是非列表开头,此时检测是否有相同的缩进
422
- logger.info(f"连接page {page_num} 内的list")
429
+ if pre_layout_list_info[1] and not next_layout_list_info[0] and next_first_para[
430
+ "type"] == BlockType.Text: # 前一个是列表结尾,后一个是非列表开头,此时检测是否有相同的缩进
431
+ if debug_able:
432
+ logger.info(f"连接page {page_num} 内的list")
423
433
  # 向layout_paras[i] 寻找开头具有相同缩进的连续的行
424
434
  may_list_lines = []
425
435
  lines = next_first_para.get("lines", [])
@@ -450,7 +460,8 @@ def __connect_list_inter_page(pre_page_paras, next_page_paras, pre_page_layout_b
450
460
  if pre_page_paras[-1][-1]["type"] != BlockType.Text or next_page_paras[0][0]["type"] != BlockType.Text:
451
461
  return False
452
462
  if pre_page_list_info[1] and not next_page_list_info[0]: # 前一个是列表结尾,后一个是非列表开头,此时检测是否有相同的缩进
453
- logger.info(f"连接page {page_num} 内的list")
463
+ if debug_able:
464
+ logger.info(f"连接page {page_num} 内的list")
454
465
  # 向layout_paras[i] 寻找开头具有相同缩进的连续的行
455
466
  may_list_lines = []
456
467
  next_page_first_para = next_page_paras[0][0]
@@ -503,7 +514,7 @@ def __connect_para_inter_layoutbox(blocks_group, new_layout_bbox):
503
514
  try:
504
515
  if len(blocks_group[i]) == 0:
505
516
  continue
506
- if len(blocks_group[i - 1]) == 0: # TODO 考虑连接问题,
517
+ if len(blocks_group[i - 1]) == 0: # TODO 考虑连接问题,
507
518
  connected_layout_blocks.append(blocks_group[i])
508
519
  continue
509
520
  # text类型的段才需要考虑layout间的合并
@@ -534,13 +545,14 @@ def __connect_para_inter_layoutbox(blocks_group, new_layout_bbox):
534
545
 
535
546
  pre_last_line_text = pre_last_line_text.strip()
536
547
  next_first_line_text = next_first_line_text.strip()
537
- if pre_last_line['bbox'][2] == pre_x2_max and pre_last_line_text and pre_last_line_text[-1] not in LINE_STOP_FLAG and \
548
+ if pre_last_line['bbox'][2] == pre_x2_max and pre_last_line_text and pre_last_line_text[
549
+ -1] not in LINE_STOP_FLAG and \
538
550
  next_first_line['bbox'][0] == next_x0_min: # 前面一行沾满了整个行,并且没有结尾符号.下一行没有空白开头。
539
551
  """连接段落条件成立,将前一个layout的段落和后一个layout的段落连接。"""
540
552
  #connected_layout_paras[-1][-1].extend(layout_paras[i][0])
541
553
  connected_layout_blocks[-1][-1]["lines"].extend(blocks_group[i][0]["lines"])
542
554
  #layout_paras[i].pop(0) # 删除后一个layout的第一个段落, 因为他已经被合并到前一个layout的最后一个段落了。
543
- blocks_group[i][0]["lines"] = [] #删除后一个layout第一个段落中的lines,因为他已经被合并到前一个layout的最后一个段落了
555
+ blocks_group[i][0]["lines"] = [] #删除后一个layout第一个段落中的lines,因为他已经被合并到前一个layout的最后一个段落了
544
556
  blocks_group[i][0][LINES_DELETED] = True
545
557
  # if len(layout_paras[i]) == 0:
546
558
  # layout_paras.pop(i)
@@ -638,7 +650,8 @@ def find_consecutive_true_regions(input_array):
638
650
  return regions
639
651
 
640
652
 
641
- def __connect_middle_align_text(page_paras, new_layout_bbox, page_num, lang, debug_mode):
653
+ def __connect_middle_align_text(page_paras, new_layout_bbox, page_num, lang):
654
+ global debug_able
642
655
  """
643
656
  找出来中间对齐的连续单行文本,如果连续行高度相同,那么合并为一个段落。
644
657
  一个line居中的条件是:
@@ -660,11 +673,12 @@ def __connect_middle_align_text(page_paras, new_layout_bbox, page_num, lang, deb
660
673
  for start, end in consecutive_single_line_indices:
661
674
  #start += index_offset
662
675
  #end += index_offset
663
- line_hi = np.array([block["lines"][0]['bbox'][3] - block["lines"][0]['bbox'][1] for block in layout_para[start:end + 1]])
676
+ line_hi = np.array([block["lines"][0]['bbox'][3] - block["lines"][0]['bbox'][1] for block in
677
+ layout_para[start:end + 1]])
664
678
  first_line_text = ''.join([__get_span_text(span) for span in layout_para[start]["lines"][0]['spans']])
665
679
  if "Table" in first_line_text or "Figure" in first_line_text:
666
680
  pass
667
- if debug_mode:
681
+ if debug_able:
668
682
  logger.info(line_hi.std())
669
683
 
670
684
  if line_hi.std() < 2:
@@ -677,10 +691,10 @@ def __connect_middle_align_text(page_paras, new_layout_bbox, page_num, lang, deb
677
691
  and not all([x1 == layout_box[2] for x1 in all_right_x1]):
678
692
  merge_para = [block["lines"][0] for block in layout_para[start:end + 1]]
679
693
  para_text = ''.join([__get_span_text(span) for line in merge_para for span in line['spans']])
680
- if debug_mode:
694
+ if debug_able:
681
695
  logger.info(para_text)
682
696
  layout_para[start]["lines"] = merge_para
683
- for i_para in range(start+1, end+1):
697
+ for i_para in range(start + 1, end + 1):
684
698
  layout_para[i_para]["lines"] = []
685
699
  layout_para[i_para][LINES_DELETED] = True
686
700
  #layout_para[start:end + 1] = [merge_para]
@@ -713,14 +727,15 @@ def __do_split_page(blocks, layout_bboxes, new_layout_bbox, page_num, lang):
713
727
  blocks_group = __group_line_by_layout(blocks, layout_bboxes) # block内分段
714
728
  layout_list_info = __split_para_in_layoutbox(blocks_group, new_layout_bbox, lang) # layout内分段
715
729
  blocks_group, page_list_info = __connect_list_inter_layout(blocks_group, new_layout_bbox, layout_list_info,
716
- page_num, lang) # layout之间连接列表段落
730
+ page_num, lang) # layout之间连接列表段落
717
731
  connected_layout_blocks = __connect_para_inter_layoutbox(blocks_group, new_layout_bbox) # layout间链接段落
718
732
 
719
733
  return connected_layout_blocks, page_list_info
720
734
 
721
735
 
722
-
723
736
  def para_split(pdf_info_dict, debug_mode, lang="en"):
737
+ global debug_able
738
+ debug_able = debug_mode
724
739
  new_layout_of_pages = [] # 数组的数组,每个元素是一个页面的layoutS
725
740
  all_page_list_info = [] # 保存每个页面开头和结尾是否是列表
726
741
  for page_num, page in pdf_info_dict.items():
@@ -744,14 +759,14 @@ def para_split(pdf_info_dict, debug_mode, lang="en"):
744
759
 
745
760
  is_conn = __connect_para_inter_page(pre_page_paras, next_page_paras, pre_page_layout_bbox,
746
761
  next_page_layout_bbox, page_num, lang)
747
- if debug_mode:
762
+ if debug_able:
748
763
  if is_conn:
749
764
  logger.info(f"连接了第{page_num - 1}页和第{page_num}页的段落")
750
765
 
751
766
  is_list_conn = __connect_list_inter_page(pre_page_paras, next_page_paras, pre_page_layout_bbox,
752
767
  next_page_layout_bbox, all_page_list_info[page_num - 1],
753
768
  all_page_list_info[page_num], page_num, lang)
754
- if debug_mode:
769
+ if debug_able:
755
770
  if is_list_conn:
756
771
  logger.info(f"连接了第{page_num - 1}页和第{page_num}页的列表段落")
757
772
 
@@ -762,7 +777,7 @@ def para_split(pdf_info_dict, debug_mode, lang="en"):
762
777
  for page_num, page in enumerate(pdf_info_dict.values()):
763
778
  page_paras = page['para_blocks']
764
779
  new_layout_bbox = new_layout_of_pages[page_num]
765
- __connect_middle_align_text(page_paras, new_layout_bbox, page_num, lang, debug_mode=debug_mode)
780
+ __connect_middle_align_text(page_paras, new_layout_bbox, page_num, lang)
766
781
  __merge_signle_list_text(page_paras, new_layout_bbox, page_num, lang)
767
782
 
768
783
  # layout展平