magic-pdf 0.7.0a1__py3-none-any.whl → 0.7.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,4 +8,7 @@ CROSS_PAGE = "cross_page"
8
8
  block维度自定义字段
9
9
  """
10
10
  # block中lines是否被删除
11
- LINES_DELETED = "lines_deleted"
11
+ LINES_DELETED = "lines_deleted"
12
+
13
+ # table recognition max time default value
14
+ TABLE_MAX_TIME_VALUE = 400
@@ -1,6 +1,7 @@
1
1
  from magic_pdf.libs.Constants import CROSS_PAGE
2
2
  from magic_pdf.libs.commons import fitz # PyMuPDF
3
- from magic_pdf.libs.ocr_content_type import ContentType, BlockType
3
+ from magic_pdf.libs.ocr_content_type import ContentType, BlockType, CategoryId
4
+ from magic_pdf.model.magic_model import MagicModel
4
5
 
5
6
 
6
7
  def draw_bbox_without_number(i, bbox_list, page, rgb_config, fill_config):
@@ -225,3 +226,67 @@ def draw_span_bbox(pdf_info, pdf_bytes, out_path):
225
226
 
226
227
  # Save the PDF
227
228
  pdf_docs.save(f"{out_path}/spans.pdf")
229
+
230
+
231
+ def drow_model_bbox(model_list: list, pdf_bytes, out_path):
232
+ dropped_bbox_list = []
233
+ tables_body_list, tables_caption_list, tables_footnote_list = [], [], []
234
+ imgs_body_list, imgs_caption_list = [], []
235
+ titles_list = []
236
+ texts_list = []
237
+ interequations_list = []
238
+ pdf_docs = fitz.open("pdf", pdf_bytes)
239
+ magic_model = MagicModel(model_list, pdf_docs)
240
+ for i in range(len(model_list)):
241
+ page_dropped_list = []
242
+ tables_body, tables_caption, tables_footnote = [], [], []
243
+ imgs_body, imgs_caption = [], []
244
+ titles = []
245
+ texts = []
246
+ interequations = []
247
+ page_info = magic_model.get_model_list(i)
248
+ layout_dets = page_info["layout_dets"]
249
+ for layout_det in layout_dets:
250
+ bbox = layout_det["bbox"]
251
+ if layout_det["category_id"] == CategoryId.Text:
252
+ texts.append(bbox)
253
+ elif layout_det["category_id"] == CategoryId.Title:
254
+ titles.append(bbox)
255
+ elif layout_det["category_id"] == CategoryId.TableBody:
256
+ tables_body.append(bbox)
257
+ elif layout_det["category_id"] == CategoryId.TableCaption:
258
+ tables_caption.append(bbox)
259
+ elif layout_det["category_id"] == CategoryId.TableFootnote:
260
+ tables_footnote.append(bbox)
261
+ elif layout_det["category_id"] == CategoryId.ImageBody:
262
+ imgs_body.append(bbox)
263
+ elif layout_det["category_id"] == CategoryId.ImageCaption:
264
+ imgs_caption.append(bbox)
265
+ elif layout_det["category_id"] == CategoryId.InterlineEquation_YOLO:
266
+ interequations.append(bbox)
267
+ elif layout_det["category_id"] == CategoryId.Abandon:
268
+ page_dropped_list.append(bbox)
269
+
270
+ tables_body_list.append(tables_body)
271
+ tables_caption_list.append(tables_caption)
272
+ tables_footnote_list.append(tables_footnote)
273
+ imgs_body_list.append(imgs_body)
274
+ imgs_caption_list.append(imgs_caption)
275
+ titles_list.append(titles)
276
+ texts_list.append(texts)
277
+ interequations_list.append(interequations)
278
+ dropped_bbox_list.append(page_dropped_list)
279
+
280
+ for i, page in enumerate(pdf_docs):
281
+ draw_bbox_with_number(i, dropped_bbox_list, page, [158, 158, 158], True) # color !
282
+ draw_bbox_with_number(i, tables_body_list, page, [204, 204, 0], True)
283
+ draw_bbox_with_number(i, tables_caption_list, page, [255, 255, 102], True)
284
+ draw_bbox_with_number(i, tables_footnote_list, page, [229, 255, 204], True)
285
+ draw_bbox_with_number(i, imgs_body_list, page, [153, 255, 51], True)
286
+ draw_bbox_with_number(i, imgs_caption_list, page, [102, 178, 255], True)
287
+ draw_bbox_with_number(i, titles_list, page, [102, 102, 255], True)
288
+ draw_bbox_with_number(i, texts_list, page, [153, 0, 76], True)
289
+ draw_bbox_with_number(i, interequations_list, page, [0, 255, 0], True)
290
+
291
+ # Save the PDF
292
+ pdf_docs.save(f"{out_path}/model.pdf")
@@ -19,3 +19,17 @@ class BlockType:
19
19
  Footnote = "footnote"
20
20
  Discarded = "discarded"
21
21
 
22
+
23
+ class CategoryId:
24
+ Title = 0
25
+ Text = 1
26
+ Abandon = 2
27
+ ImageBody = 3
28
+ ImageCaption = 4
29
+ TableBody = 5
30
+ TableCaption = 6
31
+ TableFootnote = 7
32
+ InterlineEquation_Layout = 8
33
+ InlineEquation = 13
34
+ InterlineEquation_YOLO = 14
35
+ OcrText = 15
magic_pdf/libs/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.7.0a1"
1
+ __version__ = "0.7.0b1"
@@ -37,8 +37,8 @@ def load_images_from_pdf(pdf_bytes: bytes, dpi=200) -> list:
37
37
  mat = fitz.Matrix(dpi / 72, dpi / 72)
38
38
  pm = page.get_pixmap(matrix=mat, alpha=False)
39
39
 
40
- # if width or height > 3000 pixels, don't enlarge the image
41
- if pm.width > 3000 or pm.height > 3000:
40
+ # If the width or height exceeds 9000 after scaling, do not scale further.
41
+ if pm.width > 9000 or pm.height > 9000:
42
42
  pm = page.get_pixmap(matrix=fitz.Matrix(1, 1), alpha=False)
43
43
 
44
44
  img = Image.frombytes("RGB", (pm.width, pm.height), pm.samples)
@@ -2,6 +2,7 @@ from loguru import logger
2
2
  import os
3
3
  import time
4
4
 
5
+ from magic_pdf.libs.Constants import TABLE_MAX_TIME_VALUE
5
6
 
6
7
  os.environ['NO_ALBUMENTATIONS_UPDATE'] = '1' # 禁止albumentations检查更新
7
8
  try:
@@ -26,7 +27,7 @@ except ImportError as e:
26
27
  logger.exception(e)
27
28
  logger.error(
28
29
  'Required dependency not installed, please install by \n'
29
- '"pip install magic-pdf[full] detectron2 --extra-index-url https://myhloli.github.io/wheels/"')
30
+ '"pip install magic-pdf[full] --extra-index-url https://myhloli.github.io/wheels/"')
30
31
  exit(1)
31
32
 
32
33
  from magic_pdf.model.pek_sub_modules.layoutlmv3.model_init import Layoutlmv3_Predictor
@@ -35,7 +36,7 @@ from magic_pdf.model.pek_sub_modules.self_modify import ModifiedPaddleOCR
35
36
  from magic_pdf.model.pek_sub_modules.structeqtable.StructTableModel import StructTableModel
36
37
 
37
38
 
38
- def table_model_init(model_path, max_time=400, _device_='cpu'):
39
+ def table_model_init(model_path, max_time, _device_='cpu'):
39
40
  table_model = StructTableModel(model_path, max_time=max_time, device=_device_)
40
41
  return table_model
41
42
 
@@ -105,6 +106,7 @@ class CustomPEKModel:
105
106
  self.apply_formula = kwargs.get("apply_formula", self.configs["config"]["formula"])
106
107
  self.table_config = kwargs.get("table_config", self.configs["config"]["table_config"])
107
108
  self.apply_table = self.table_config.get("is_table_recog_enable", False)
109
+ self.table_max_time = self.table_config.get("max_time", TABLE_MAX_TIME_VALUE)
108
110
  self.apply_ocr = ocr
109
111
  logger.info(
110
112
  "DocAnalysis init, this may take some times. apply_layout: {}, apply_formula: {}, apply_ocr: {}, apply_table: {}".format(
@@ -141,9 +143,8 @@ class CustomPEKModel:
141
143
 
142
144
  # init structeqtable
143
145
  if self.apply_table:
144
- max_time = self.table_config.get("max_time", 400)
145
146
  self.table_model = table_model_init(str(os.path.join(models_dir, self.configs["weights"]["table"])),
146
- max_time=max_time, _device_=self.device)
147
+ max_time = self.table_max_time, _device_=self.device)
147
148
  logger.info('DocAnalysis init done!')
148
149
 
149
150
  def __call__(self, image):
@@ -187,50 +188,56 @@ class CustomPEKModel:
187
188
  mfr_cost = round(time.time() - mfr_start, 2)
188
189
  logger.info(f"formula nums: {len(mf_image_list)}, mfr time: {mfr_cost}")
189
190
 
191
+ # Select regions for OCR / formula regions / table regions
192
+ ocr_res_list = []
193
+ table_res_list = []
194
+ single_page_mfdetrec_res = []
195
+ for res in layout_res:
196
+ if int(res['category_id']) in [13, 14]:
197
+ single_page_mfdetrec_res.append({
198
+ "bbox": [int(res['poly'][0]), int(res['poly'][1]),
199
+ int(res['poly'][4]), int(res['poly'][5])],
200
+ })
201
+ elif int(res['category_id']) in [0, 1, 2, 4, 6, 7]:
202
+ ocr_res_list.append(res)
203
+ elif int(res['category_id']) in [5]:
204
+ table_res_list.append(res)
205
+
206
+ # Unified crop img logic
207
+ def crop_img(input_res, input_pil_img, crop_paste_x=0, crop_paste_y=0):
208
+ crop_xmin, crop_ymin = int(input_res['poly'][0]), int(input_res['poly'][1])
209
+ crop_xmax, crop_ymax = int(input_res['poly'][4]), int(input_res['poly'][5])
210
+ # Create a white background with an additional width and height of 50
211
+ crop_new_width = crop_xmax - crop_xmin + crop_paste_x * 2
212
+ crop_new_height = crop_ymax - crop_ymin + crop_paste_y * 2
213
+ return_image = Image.new('RGB', (crop_new_width, crop_new_height), 'white')
214
+
215
+ # Crop image
216
+ crop_box = (crop_xmin, crop_ymin, crop_xmax, crop_ymax)
217
+ cropped_img = input_pil_img.crop(crop_box)
218
+ return_image.paste(cropped_img, (crop_paste_x, crop_paste_y))
219
+ return_list = [crop_paste_x, crop_paste_y, crop_xmin, crop_ymin, crop_xmax, crop_ymax, crop_new_width, crop_new_height]
220
+ return return_image, return_list
221
+
222
+ pil_img = Image.fromarray(image)
223
+
190
224
  # ocr识别
191
225
  if self.apply_ocr:
192
226
  ocr_start = time.time()
193
- pil_img = Image.fromarray(image)
194
-
195
- # 筛选出需要OCR的区域和公式区域
196
- ocr_res_list = []
197
- single_page_mfdetrec_res = []
198
- for res in layout_res:
199
- if int(res['category_id']) in [13, 14]:
200
- single_page_mfdetrec_res.append({
201
- "bbox": [int(res['poly'][0]), int(res['poly'][1]),
202
- int(res['poly'][4]), int(res['poly'][5])],
203
- })
204
- elif int(res['category_id']) in [0, 1, 2, 4, 6, 7]:
205
- ocr_res_list.append(res)
206
-
207
- # 对每一个需OCR处理的区域进行处理
227
+ # Process each area that requires OCR processing
208
228
  for res in ocr_res_list:
209
- xmin, ymin = int(res['poly'][0]), int(res['poly'][1])
210
- xmax, ymax = int(res['poly'][4]), int(res['poly'][5])
211
-
212
- paste_x = 50
213
- paste_y = 50
214
- # 创建一个宽高各多50的白色背景
215
- new_width = xmax - xmin + paste_x * 2
216
- new_height = ymax - ymin + paste_y * 2
217
- new_image = Image.new('RGB', (new_width, new_height), 'white')
218
-
219
- # 裁剪图像
220
- crop_box = (xmin, ymin, xmax, ymax)
221
- cropped_img = pil_img.crop(crop_box)
222
- new_image.paste(cropped_img, (paste_x, paste_y))
223
-
224
- # 调整公式区域坐标
229
+ new_image, useful_list = crop_img(res, pil_img, crop_paste_x=50, crop_paste_y=50)
230
+ paste_x, paste_y, xmin, ymin, xmax, ymax, new_width, new_height = useful_list
231
+ # Adjust the coordinates of the formula area
225
232
  adjusted_mfdetrec_res = []
226
233
  for mf_res in single_page_mfdetrec_res:
227
234
  mf_xmin, mf_ymin, mf_xmax, mf_ymax = mf_res["bbox"]
228
- # 将公式区域坐标调整为相对于裁剪区域的坐标
235
+ # Adjust the coordinates of the formula area to the coordinates relative to the cropping area
229
236
  x0 = mf_xmin - xmin + paste_x
230
237
  y0 = mf_ymin - ymin + paste_y
231
238
  x1 = mf_xmax - xmin + paste_x
232
239
  y1 = mf_ymax - ymin + paste_y
233
- # 过滤在图外的公式块
240
+ # Filter formula blocks outside the graph
234
241
  if any([x1 < 0, y1 < 0]) or any([x0 > new_width, y0 > new_height]):
235
242
  continue
236
243
  else:
@@ -238,17 +245,17 @@ class CustomPEKModel:
238
245
  "bbox": [x0, y0, x1, y1],
239
246
  })
240
247
 
241
- # OCR识别
248
+ # OCR recognition
242
249
  new_image = cv2.cvtColor(np.asarray(new_image), cv2.COLOR_RGB2BGR)
243
250
  ocr_res = self.ocr_model.ocr(new_image, mfd_res=adjusted_mfdetrec_res)[0]
244
251
 
245
- # 整合结果
252
+ # Integration results
246
253
  if ocr_res:
247
254
  for box_ocr_res in ocr_res:
248
255
  p1, p2, p3, p4 = box_ocr_res[0]
249
256
  text, score = box_ocr_res[1]
250
257
 
251
- # 将坐标转换回原图坐标系
258
+ # Convert the coordinates back to the original coordinate system
252
259
  p1 = [p1[0] - paste_x + xmin, p1[1] - paste_y + ymin]
253
260
  p2 = [p2[0] - paste_x + xmin, p2[1] - paste_y + ymin]
254
261
  p3 = [p3[0] - paste_x + xmin, p3[1] - paste_y + ymin]
@@ -266,30 +273,24 @@ class CustomPEKModel:
266
273
 
267
274
  # 表格识别 table recognition
268
275
  if self.apply_table:
269
- pil_img = Image.fromarray(image)
270
- for layout in layout_res:
271
- if layout.get("category_id", -1) == 5:
272
- poly = layout["poly"]
273
- xmin, ymin = int(poly[0]), int(poly[1])
274
- xmax, ymax = int(poly[4]), int(poly[5])
275
-
276
- paste_x = 50
277
- paste_y = 50
278
- # 创建一个宽高各多50的白色背景 create a whiteboard with 50 larger width and length
279
- new_width = xmax - xmin + paste_x * 2
280
- new_height = ymax - ymin + paste_y * 2
281
- new_image = Image.new('RGB', (new_width, new_height), 'white')
282
-
283
- # 裁剪图像 crop image
284
- crop_box = (xmin, ymin, xmax, ymax)
285
- cropped_img = pil_img.crop(crop_box)
286
- new_image.paste(cropped_img, (paste_x, paste_y))
287
- start_time = time.time()
288
- logger.info("------------------table recognition processing begins-----------------")
276
+ table_start = time.time()
277
+ for res in table_res_list:
278
+ new_image, _ = crop_img(res, pil_img)
279
+ single_table_start_time = time.time()
280
+ logger.info("------------------table recognition processing begins-----------------")
281
+ with torch.no_grad():
289
282
  latex_code = self.table_model.image2latex(new_image)[0]
290
- end_time = time.time()
291
- run_time = end_time - start_time
292
- logger.info(f"------------table recognition processing ends within {run_time}s-----")
293
- layout["latex"] = latex_code
283
+ run_time = time.time() - single_table_start_time
284
+ logger.info(f"------------table recognition processing ends within {run_time}s-----")
285
+ if run_time > self.table_max_time:
286
+ logger.warning(f"------------table recognition processing exceeds max time {self.table_max_time}s----------")
287
+ # 判断是否返回正常
288
+ expected_ending = latex_code.strip().endswith('end{tabular}') or latex_code.strip().endswith('end{table}')
289
+ if latex_code and expected_ending:
290
+ res["latex"] = latex_code
291
+ else:
292
+ logger.warning(f"------------table recognition processing fails----------")
293
+ table_cost = round(time.time() - table_start, 2)
294
+ logger.info(f"table cost: {table_cost}")
294
295
 
295
296
  return layout_res
@@ -30,12 +30,10 @@ def read_s3_path(s3path):
30
30
  byte_start, byte_end = 0, None
31
31
  else:
32
32
  byte_start, byte_end = int(may_range_params[0]), int(may_range_params[1])
33
- byte_end += byte_start - 1
34
- return s3_rw.read_jsonl(
33
+ return s3_rw.read_offset(
35
34
  remove_non_official_s3_args(s3path),
36
35
  byte_start,
37
36
  byte_end,
38
- AbsReaderWriter.MODE_BIN,
39
37
  )
40
38
 
41
39
 
@@ -71,24 +69,23 @@ def cli():
71
69
  default="",
72
70
  )
73
71
  def jsonl(jsonl, method, output_dir):
74
- print("haha")
75
72
  model_config.__use_inside_model__ = False
76
- full_jsonl_path = os.path.realpath(jsonl)
77
- if output_dir == "":
78
- output_dir = os.path.join(os.path.dirname(full_jsonl_path), "output")
79
-
80
73
  if jsonl.startswith("s3://"):
81
74
  jso = json_parse.loads(read_s3_path(jsonl).decode("utf-8"))
75
+ full_jsonl_path = "."
82
76
  else:
77
+ full_jsonl_path = os.path.realpath(jsonl)
83
78
  with open(jsonl) as f:
84
79
  jso = json_parse.loads(f.readline())
80
+
81
+ if output_dir == "":
82
+ output_dir = os.path.join(os.path.dirname(full_jsonl_path), "output")
85
83
  s3_file_path = jso.get("file_location")
86
84
  if s3_file_path is None:
87
85
  s3_file_path = jso.get("path")
88
86
  pdf_file_name = Path(s3_file_path).stem
89
87
  pdf_data = read_s3_path(s3_file_path)
90
88
 
91
-
92
89
  print(pdf_file_name, jso, method)
93
90
  do_parse(
94
91
  output_dir,
@@ -97,6 +94,7 @@ def jsonl(jsonl, method, output_dir):
97
94
  jso["doc_layout_result"],
98
95
  method,
99
96
  f_dump_content_list=True,
97
+ f_draw_model_bbox=True,
100
98
  )
101
99
 
102
100
 
@@ -149,6 +147,7 @@ def pdf(pdf, json_data, output_dir, method):
149
147
  model_json_list,
150
148
  method,
151
149
  f_dump_content_list=True,
150
+ f_draw_model_bbox=True,
152
151
  )
153
152
 
154
153
 
magic_pdf/tools/common.py CHANGED
@@ -4,7 +4,7 @@ import copy
4
4
  import click
5
5
  from loguru import logger
6
6
  from magic_pdf.libs.MakeContentConfig import DropMode, MakeMode
7
- from magic_pdf.libs.draw_bbox import draw_layout_bbox, draw_span_bbox
7
+ from magic_pdf.libs.draw_bbox import draw_layout_bbox, draw_span_bbox, drow_model_bbox
8
8
  from magic_pdf.pipe.UNIPipe import UNIPipe
9
9
  from magic_pdf.pipe.OCRPipe import OCRPipe
10
10
  from magic_pdf.pipe.TXTPipe import TXTPipe
@@ -37,6 +37,7 @@ def do_parse(
37
37
  f_dump_orig_pdf=True,
38
38
  f_dump_content_list=False,
39
39
  f_make_md_mode=MakeMode.MM_MD,
40
+ f_draw_model_bbox=False,
40
41
  ):
41
42
  orig_model_list = copy.deepcopy(model_list)
42
43
  local_image_dir, local_md_dir = prepare_env(output_dir, pdf_file_name, parse_method)
@@ -73,6 +74,8 @@ def do_parse(
73
74
  draw_layout_bbox(pdf_info, pdf_bytes, local_md_dir)
74
75
  if f_draw_span_bbox:
75
76
  draw_span_bbox(pdf_info, pdf_bytes, local_md_dir)
77
+ if f_draw_model_bbox:
78
+ drow_model_bbox(orig_model_list, pdf_bytes, local_md_dir)
76
79
 
77
80
  md_content = pipe.pipe_mk_markdown(
78
81
  image_dir, drop_mode=DropMode.NONE, md_make_mode=f_make_md_mode
@@ -0,0 +1,421 @@
1
+ Metadata-Version: 2.1
2
+ Name: magic-pdf
3
+ Version: 0.7.0b1
4
+ Summary: A practical tool for converting PDF to Markdown
5
+ Home-page: https://github.com/opendatalab/MinerU
6
+ Requires-Python: >=3.9
7
+ Description-Content-Type: text/markdown
8
+ License-File: LICENSE.md
9
+ Requires-Dist: boto3>=1.28.43
10
+ Requires-Dist: Brotli>=1.1.0
11
+ Requires-Dist: click>=8.1.7
12
+ Requires-Dist: PyMuPDF>=1.24.9
13
+ Requires-Dist: loguru>=0.6.0
14
+ Requires-Dist: numpy<2.0.0,>=1.21.6
15
+ Requires-Dist: fast-langdetect==0.2.0
16
+ Requires-Dist: wordninja>=2.0.0
17
+ Requires-Dist: scikit-learn>=1.0.2
18
+ Requires-Dist: pdfminer.six==20231228
19
+ Provides-Extra: full
20
+ Requires-Dist: unimernet==0.1.6; extra == "full"
21
+ Requires-Dist: ultralytics; extra == "full"
22
+ Requires-Dist: paddleocr==2.7.3; extra == "full"
23
+ Requires-Dist: pypandoc; extra == "full"
24
+ Requires-Dist: struct-eqtable==0.1.0; extra == "full"
25
+ Requires-Dist: detectron2; extra == "full"
26
+ Requires-Dist: paddlepaddle==3.0.0b1; platform_system == "Linux" and extra == "full"
27
+ Requires-Dist: matplotlib; (platform_system == "Linux" or platform_system == "Darwin") and extra == "full"
28
+ Requires-Dist: matplotlib<=3.9.0; platform_system == "Windows" and extra == "full"
29
+ Requires-Dist: paddlepaddle==2.6.1; (platform_system == "Windows" or platform_system == "Darwin") and extra == "full"
30
+ Provides-Extra: lite
31
+ Requires-Dist: paddleocr==2.7.3; extra == "lite"
32
+ Requires-Dist: paddlepaddle==3.0.0b1; platform_system == "Linux" and extra == "lite"
33
+ Requires-Dist: paddlepaddle==2.6.1; (platform_system == "Windows" or platform_system == "Darwin") and extra == "lite"
34
+
35
+ <div align="center" xmlns="http://www.w3.org/1999/html">
36
+ <!-- logo -->
37
+ <p align="center">
38
+ <img src="docs/images/MinerU-logo.png" width="300px" style="vertical-align:middle;">
39
+ </p>
40
+
41
+ <!-- icon -->
42
+ [![stars](https://img.shields.io/github/stars/opendatalab/MinerU.svg)](https://github.com/opendatalab/MinerU)
43
+ [![forks](https://img.shields.io/github/forks/opendatalab/MinerU.svg)](https://github.com/opendatalab/MinerU)
44
+ [![open issues](https://img.shields.io/github/issues-raw/opendatalab/MinerU)](https://github.com/opendatalab/MinerU/issues)
45
+ [![issue resolution](https://img.shields.io/github/issues-closed-raw/opendatalab/MinerU)](https://github.com/opendatalab/MinerU/issues)
46
+ [![PyPI version](https://badge.fury.io/py/magic-pdf.svg)](https://badge.fury.io/py/magic-pdf)
47
+ [![Downloads](https://static.pepy.tech/badge/magic-pdf)](https://pepy.tech/project/magic-pdf)
48
+ [![Downloads](https://static.pepy.tech/badge/magic-pdf/month)](https://pepy.tech/project/magic-pdf)
49
+ <a href="https://trendshift.io/repositories/11174" target="_blank"><img src="https://trendshift.io/api/badge/repositories/11174" alt="opendatalab%2FMinerU | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
50
+
51
+ <!-- language -->
52
+ [English](README.md) | [简体中文](README_zh-CN.md)
53
+
54
+ <!-- hot link -->
55
+ <p align="center">
56
+ <a href="https://github.com/opendatalab/PDF-Extract-Kit">PDF-Extract-Kit: High-Quality PDF Extraction Toolkit</a>🔥🔥🔥
57
+ </p>
58
+
59
+ <!-- join us -->
60
+ <p align="center">
61
+ 👋 join us on <a href="https://discord.gg/Tdedn9GTXq" target="_blank">Discord</a> and <a href="https://cdn.vansin.top/internlm/mineru.jpg" target="_blank">WeChat</a>
62
+ </p>
63
+
64
+ </div>
65
+
66
+ # Changelog
67
+ - 2024/08/09: Version 0.7.0b1 released, simplified installation process, added table recognition functionality
68
+ - 2024/08/01: Version 0.6.2b1 released, optimized dependency conflict issues and installation documentation
69
+ - 2024/07/05: Initial open-source release
70
+
71
+ <!-- TABLE OF CONTENT -->
72
+ <details open="open">
73
+ <summary><h2 style="display: inline-block">Table of Contents</h2></summary>
74
+ <ol>
75
+ <li>
76
+ <a href="#mineru">MinerU</a>
77
+ <ul>
78
+ <li><a href="#project-introduction">Project Introduction</a></li>
79
+ <li><a href="#key-features">Key Features</a></li>
80
+ <li><a href="#quick-start">Quick Start</a>
81
+ <ul>
82
+ <li><a href="#online-demo">Online Demo</a></li>
83
+ <li><a href="#quick-cpu-demo">Quick CPU Demo</a></li>
84
+ <li><a href="#using-gpu">Using GPU</a></li>
85
+ </ul>
86
+ </li>
87
+ <li><a href="#usage">Usage</a>
88
+ <ul>
89
+ <li><a href="#command-line">Command Line</a></li>
90
+ <li><a href="#api">API</a></li>
91
+ <li><a href="#development-guide">Development Guide</a></li>
92
+ </ul>
93
+ </li>
94
+ </ul>
95
+ </li>
96
+ <li><a href="#todo">TODO</a></li>
97
+ <li><a href="#known-issues">Known Issues</a></li>
98
+ <li><a href="#faq">FAQ</a></li>
99
+ <li><a href="#all-thanks-to-our-contributors">All Thanks To Our Contributors</a></li>
100
+ <li><a href="#license-information">License Information</a></li>
101
+ <li><a href="#acknowledgments">Acknowledgments</a></li>
102
+ <li><a href="#citation">Citation</a></li>
103
+ <li><a href="#star-history">Star History</a></li>
104
+ <li><a href="#magic-doc">Magic-doc</a></li>
105
+ <li><a href="#magic-html">Magic-html</a></li>
106
+ <li><a href="#links">Links</a></li>
107
+ </ol>
108
+ </details>
109
+
110
+
111
+
112
+ # MinerU
113
+ ## Project Introduction
114
+ MinerU is a tool that converts PDFs into machine-readable formats (e.g., markdown, JSON), allowing for easy extraction into any format.
115
+ MinerU was born during the pre-training process of [InternLM](https://github.com/InternLM/InternLM). We focus on solving symbol conversion issues in scientific literature and hope to contribute to technological development in the era of large models.
116
+ Compared to well-known commercial products, MinerU is still young. If you encounter any issues or if the results are not as expected, please submit an issue on [issue](https://github.com/opendatalab/MinerU/issues) and **attach the relevant PDF**.
117
+
118
+ https://github.com/user-attachments/assets/4bea02c9-6d54-4cd6-97ed-dff14340982c
119
+
120
+ ## Key Features
121
+
122
+ - Removes elements such as headers, footers, footnotes, and page numbers while maintaining semantic continuity
123
+ - Outputs text in a human-readable order from multi-column documents
124
+ - Retains the original structure of the document, including titles, paragraphs, and lists
125
+ - Extracts images, image captions, tables, and table captions
126
+ - Automatically recognizes formulas in the document and converts them to LaTeX
127
+ - Automatically recognizes tables in the document and converts them to LaTeX
128
+ - Automatically detects and enables OCR for corrupted PDFs
129
+ - Supports both CPU and GPU environments
130
+ - Supports Windows, Linux, and Mac platforms
131
+
132
+ ## Quick Start
133
+
134
+ If you encounter any installation issues, please first consult the <a href="#faq">FAQ</a>. </br>
135
+ If the parsing results are not as expected, refer to the <a href="#known-issues">Known Issues</a>. </br>
136
+ There are three different ways to experience MinerU:
137
+ - [Online Demo (No Installation Required)](#online-demo)
138
+ - [Quick CPU Demo (Windows, Linux, Mac)](#quick-cpu-demo)
139
+ - [Linux/Windows + CUDA](#Using-GPU)
140
+
141
+ **⚠️ Pre-installation Notice—Hardware and Software Environment Support**
142
+
143
+ To ensure the stability and reliability of the project, we only optimize and test for specific hardware and software environments during development. This ensures that users deploying and running the project on recommended system configurations will get the best performance with the fewest compatibility issues.
144
+
145
+ By focusing resources on the mainline environment, our team can more efficiently resolve potential bugs and develop new features.
146
+
147
+ In non-mainline environments, due to the diversity of hardware and software configurations, as well as third-party dependency compatibility issues, we cannot guarantee 100% project availability. Therefore, for users who wish to use this project in non-recommended environments, we suggest carefully reading the documentation and FAQ first. Most issues already have corresponding solutions in the FAQ. We also encourage community feedback to help us gradually expand support.
148
+
149
+ <table>
150
+ <tr>
151
+ <td colspan="3" rowspan="2">Operating System</td>
152
+ </tr>
153
+ <tr>
154
+ <td>Ubuntu 22.04 LTS</td>
155
+ <td>Windows 10 / 11</td>
156
+ <td>macOS 11+</td>
157
+ </tr>
158
+ <tr>
159
+ <td colspan="3">CPU</td>
160
+ <td>x86_64</td>
161
+ <td>x86_64</td>
162
+ <td>x86_64 / arm64</td>
163
+ </tr>
164
+ <tr>
165
+ <td colspan="3">Memory</td>
166
+ <td colspan="3">16GB or more, recommended 32GB+</td>
167
+ </tr>
168
+ <tr>
169
+ <td colspan="3">Python Version</td>
170
+ <td colspan="3">3.10</td>
171
+ </tr>
172
+ <tr>
173
+ <td colspan="3">Nvidia Driver Version</td>
174
+ <td>latest (Proprietary Driver)</td>
175
+ <td>latest</td>
176
+ <td>None</td>
177
+ </tr>
178
+ <tr>
179
+ <td colspan="3">CUDA Environment</td>
180
+ <td>Automatic installation [12.1 (pytorch) + 11.8 (paddle)]</td>
181
+ <td>11.8 (manual installation) + cuDNN v8.7.0 (manual installation)</td>
182
+ <td>None</td>
183
+ </tr>
184
+ <tr>
185
+ <td rowspan="2">GPU Hardware Support List</td>
186
+ <td colspan="2">Minimum Requirement 8G+ VRAM</td>
187
+ <td colspan="2">3060ti/3070/3080/3080ti/4060/4070/4070ti<br>
188
+ 8G VRAM only enables layout and formula recognition acceleration</td>
189
+ <td rowspan="2">None</td>
190
+ </tr>
191
+ <tr>
192
+ <td colspan="2">Recommended Configuration 16G+ VRAM</td>
193
+ <td colspan="2">3090/3090ti/4070ti super/4080/4090<br>
194
+ 16G or more can enable layout, formula recognition, and OCR acceleration simultaneously</td>
195
+ </tr>
196
+ </table>
197
+
198
+ ### Online Demo
199
+
200
+ [Click here for the online demo](https://opendatalab.com/OpenSourceTools/Extractor/PDF)
201
+
202
+ ### Quick CPU Demo
203
+
204
+ #### 1. Install magic-pdf
205
+ ```bash
206
+ conda create -n MinerU python=3.10
207
+ conda activate MinerU
208
+ pip install magic-pdf[full]==0.7.0b1 --extra-index-url https://wheels.myhloli.com
209
+ ```
210
+ #### 2. Download model weight files
211
+
212
+ Refer to [How to Download Model Files](docs/how_to_download_models_en.md) for detailed instructions.
213
+ > ❗️After downloading the models, please make sure to verify the completeness of the model files.
214
+ >
215
+ > Check if the model file sizes match the description on the webpage. If possible, use sha256 to verify the integrity of the files.
216
+
217
+ #### 3. Copy and configure the template file
218
+ You can find the `magic-pdf.template.json` template configuration file in the root directory of the repository.
219
+ > ❗️Make sure to execute the following command to copy the configuration file to your **user directory**; otherwise, the program will not run.
220
+ >
221
+ > The user directory for Windows is `C:\Users\YourUsername`, for Linux it is `/home/YourUsername`, and for macOS it is `/Users/YourUsername`.
222
+ ```bash
223
+ cp magic-pdf.template.json ~/magic-pdf.json
224
+ ```
225
+
226
+ Find the `magic-pdf.json` file in your user directory and configure the "models-dir" path to point to the directory where the model weight files were downloaded in [Step 2](#2-download-model-weight-files).
227
+ > ❗️Make sure to correctly configure the **absolute path** to the model weight files directory, otherwise the program will not run because it can't find the model files.
228
+ >
229
+ > On Windows, this path should include the drive letter and all backslashes (`\`) in the path should be replaced with forward slashes (`/`) to avoid syntax errors in the JSON file due to escape sequences.
230
+ >
231
+ > For example: If the models are stored in the "models" directory at the root of the D drive, the "model-dir" value should be `D:/models`.
232
+ ```json
233
+ {
234
+ // other config
235
+ "models-dir": "D:/models",
236
+ "table-config": {
237
+ "is_table_recog_enable": false, // Table recognition is disabled by default, modify this value to enable it
238
+ "max_time": 400
239
+ }
240
+ }
241
+ ```
242
+
243
+
244
+ ### Using GPU
245
+ If your device supports CUDA and meets the GPU requirements of the mainline environment, you can use GPU acceleration. Please select the appropriate guide based on your system:
246
+
247
+ - [Ubuntu 22.04 LTS + GPU](docs/README_Ubuntu_CUDA_Acceleration_en_US.md)
248
+ - [Windows 10/11 + GPU](docs/README_Windows_CUDA_Acceleration_en_US.md)
249
+
250
+
251
+ ## Usage
252
+
253
+ ### Command Line
254
+
255
+ ```bash
256
+ magic-pdf --help
257
+ Usage: magic-pdf [OPTIONS]
258
+
259
+ Options:
260
+ -v, --version display the version and exit
261
+ -p, --path PATH local pdf filepath or directory [required]
262
+ -o, --output-dir TEXT output local directory
263
+ -m, --method [ocr|txt|auto] the method for parsing pdf.
264
+ ocr: using ocr technique to extract information from pdf,
265
+ txt: suitable for the text-based pdf only and outperform ocr,
266
+ auto: automatically choose the best method for parsing pdf
267
+ from ocr and txt.
268
+ without method specified, auto will be used by default.
269
+ --help Show this message and exit.
270
+
271
+
272
+ ## show version
273
+ magic-pdf -v
274
+
275
+ ## command line example
276
+ magic-pdf -p {some_pdf} -o {some_output_dir} -m auto
277
+ ```
278
+
279
+ `{some_pdf}` can be a single PDF file or a directory containing multiple PDFs.
280
+ The results will be saved in the `{some_output_dir}` directory. The output file list is as follows:
281
+
282
+ ```text
283
+ ├── some_pdf.md # markdown file
284
+ ├── images # directory for storing images
285
+ ├── layout.pdf # layout diagram
286
+ ├── middle.json # MinerU intermediate processing result
287
+ ├── model.json # model inference result
288
+ ├── origin.pdf # original PDF file
289
+ └── spans.pdf # smallest granularity bbox position information diagram
290
+ ```
291
+
292
+ For more information about the output files, please refer to the [Output File Description](docs/output_file_en_us.md).
293
+
294
+ ### API
295
+
296
+ Processing files from local disk
297
+ ```python
298
+ image_writer = DiskReaderWriter(local_image_dir)
299
+ image_dir = str(os.path.basename(local_image_dir))
300
+ jso_useful_key = {"_pdf_type": "", "model_list": []}
301
+ pipe = UNIPipe(pdf_bytes, jso_useful_key, image_writer)
302
+ pipe.pipe_classify()
303
+ pipe.pipe_analyze()
304
+ pipe.pipe_parse()
305
+ md_content = pipe.pipe_mk_markdown(image_dir, drop_mode="none")
306
+ ```
307
+
308
+ Processing files from object storage
309
+ ```python
310
+ s3pdf_cli = S3ReaderWriter(pdf_ak, pdf_sk, pdf_endpoint)
311
+ image_dir = "s3://img_bucket/"
312
+ s3image_cli = S3ReaderWriter(img_ak, img_sk, img_endpoint, parent_path=image_dir)
313
+ pdf_bytes = s3pdf_cli.read(s3_pdf_path, mode=s3pdf_cli.MODE_BIN)
314
+ jso_useful_key = {"_pdf_type": "", "model_list": []}
315
+ pipe = UNIPipe(pdf_bytes, jso_useful_key, s3image_cli)
316
+ pipe.pipe_classify()
317
+ pipe.pipe_analyze()
318
+ pipe.pipe_parse()
319
+ md_content = pipe.pipe_mk_markdown(image_dir, drop_mode="none")
320
+ ```
321
+
322
+ For detailed implementation, refer to:
323
+ - [demo.py Simplest Processing Method](demo/demo.py)
324
+ - [magic_pdf_parse_main.py More Detailed Processing Workflow](demo/magic_pdf_parse_main.py)
325
+
326
+
327
+ ### Development Guide
328
+
329
+ TODO
330
+
331
+ # TODO
332
+
333
+ - [ ] Semantic-based reading order
334
+ - [ ] List recognition within the text
335
+ - [ ] Code block recognition within the text
336
+ - [ ] Table of contents recognition
337
+ - [x] Table recognition
338
+ - [ ] Chemical formula recognition
339
+ - [ ] Geometric shape recognition
340
+
341
+ # Known Issues
342
+ - Reading order is segmented based on rules, which can cause disordered sequences in some cases
343
+ - Vertical text is not supported
344
+ - Lists, code blocks, and table of contents are not yet supported in the layout model
345
+ - Comic books, art books, elementary school textbooks, and exercise books are not well-parsed yet
346
+ - Enabling OCR may produce better results in PDFs with a high density of formulas
347
+ - If you are processing PDFs with a large number of formulas, it is strongly recommended to enable the OCR function. When using PyMuPDF to extract text, overlapping text lines can occur, leading to inaccurate formula insertion positions.
348
+ - **Table Recognition** is currently in the testing phase; recognition speed is slow, and accuracy needs improvement. Below are some performance test results in an Ubuntu 22.04 LTS + Intel(R) Xeon(R) Platinum 8352V CPU @ 2.10GHz + NVIDIA GeForce RTX 4090 environment for reference.
349
+
350
+ | Table Size | Parsing Time |
351
+ |---------------|----------------------------|
352
+ | 6\*5 55kb | 37s |
353
+ | 16\*12 284kb | 3m18s |
354
+ | 44\*7 559kb | 4m12s |
355
+
356
+ # FAQ
357
+ [FAQ in Chinese](docs/FAQ_zh_cn.md)
358
+
359
+ [FAQ in English](docs/FAQ_en_us.md)
360
+
361
+
362
+ # All Thanks To Our Contributors
363
+
364
+ <a href="https://github.com/opendatalab/MinerU/graphs/contributors">
365
+ <img src="https://contrib.rocks/image?repo=opendatalab/MinerU" />
366
+ </a>
367
+
368
+ # License Information
369
+
370
+ [LICENSE.md](LICENSE.md)
371
+
372
+ This project currently uses PyMuPDF to achieve advanced functionality. However, since it adheres to the AGPL license, it may impose restrictions on certain usage scenarios. In future iterations, we plan to explore and replace it with a more permissive PDF processing library to enhance user-friendliness and flexibility.
373
+
374
+
375
+ # Acknowledgments
376
+ - [PDF-Extract-Kit](https://github.com/opendatalab/PDF-Extract-Kit)
377
+ - [StructEqTable](https://github.com/UniModal4Reasoning/StructEqTable-Deploy)
378
+ - [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)
379
+ - [PyMuPDF](https://github.com/pymupdf/PyMuPDF)
380
+ - [fast-langdetect](https://github.com/LlmKira/fast-langdetect)
381
+ - [pdfminer.six](https://github.com/pdfminer/pdfminer.six)
382
+
383
+ # Citation
384
+
385
+ ```bibtex
386
+ @article{he2024opendatalab,
387
+ title={Opendatalab: Empowering general artificial intelligence with open datasets},
388
+ author={He, Conghui and Li, Wei and Jin, Zhenjiang and Xu, Chao and Wang, Bin and Lin, Dahua},
389
+ journal={arXiv preprint arXiv:2407.13773},
390
+ year={2024}
391
+ }
392
+
393
+ @misc{2024mineru,
394
+ title={MinerU: A One-stop, Open-source, High-quality Data Extraction Tool},
395
+ author={MinerU Contributors},
396
+ howpublished = {\url{https://github.com/opendatalab/MinerU}},
397
+ year={2024}
398
+ }
399
+ ```
400
+
401
+ # Star History
402
+
403
+ <a>
404
+ <picture>
405
+ <source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=opendatalab/MinerU&type=Date&theme=dark" />
406
+ <source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=opendatalab/MinerU&type=Date" />
407
+ <img alt="Star History Chart" src="https://api.star-history.com/svg?repos=opendatalab/MinerU&type=Date" />
408
+ </picture>
409
+ </a>
410
+
411
+ # Magic-doc
412
+ [Magic-Doc](https://github.com/InternLM/magic-doc) Fast speed ppt/pptx/doc/docx/pdf extraction tool
413
+
414
+ # Magic-html
415
+ [Magic-HTML](https://github.com/opendatalab/magic-html) Mixed web page extraction tool
416
+
417
+ # Links
418
+
419
+ - [LabelU (A Lightweight Multi-modal Data Annotation Tool)](https://github.com/opendatalab/labelU)
420
+ - [LabelLLM (An Open-source LLM Dialogue Annotation Platform)](https://github.com/opendatalab/LabelLLM)
421
+ - [PDF-Extract-Kit (A Comprehensive Toolkit for High-Quality PDF Content Extraction)](https://github.com/opendatalab/PDF-Extract-Kit)
@@ -15,7 +15,7 @@ magic_pdf/layout/layout_det_utils.py,sha256=NCYBTvsrULE3Cue53aMD1MfXTmOL9Xy0nivl
15
15
  magic_pdf/layout/layout_sort.py,sha256=ovqRX1xcRA7E7s8VvsI7ZNbaNSElJe07bApCh5hxwIE,33533
16
16
  magic_pdf/layout/layout_spiler_recog.py,sha256=QjBSgB-a7J2yjUR1eaCs9ZD7URtiRnV6W934hpAeuC4,3067
17
17
  magic_pdf/layout/mcol_sort.py,sha256=ADnLisBJBHXDKYChcf2lzTb_TC_vZ4q89_CSN8mwEJc,11331
18
- magic_pdf/libs/Constants.py,sha256=AwQw5aK7JkWjerEyq5vxxMTHH1Gvku8K9NS8xjHKimI,189
18
+ magic_pdf/libs/Constants.py,sha256=aKdTHeK75qkVvxvE_2EA5LYis6Z6HLmiuk9o8ESOnNg,260
19
19
  magic_pdf/libs/MakeContentConfig.py,sha256=UDZPpsv8q4DqTy8h0vRtrT2kHqWiVI205VnVhlUEQc0,206
20
20
  magic_pdf/libs/ModelBlockTypeEnum.py,sha256=kalXPbo5ya6hKhhBHPGlHl1yjWOURoXZWQM3rVUyPsY,164
21
21
  magic_pdf/libs/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -26,7 +26,7 @@ magic_pdf/libs/config_reader.py,sha256=dPx6JJJuCw9AzNgKtrTG1elmfdeN6gDhgFK9r15-N
26
26
  magic_pdf/libs/convert_utils.py,sha256=Ov-lsfCLBPz_15iSJXIslBNmrSf_E_1g_XDWJy8NgO8,143
27
27
  magic_pdf/libs/coordinate_transform.py,sha256=Bbop2cP2uz2ZG0U0gwd7J6EKkgABq5Rv03qf2LMPw80,429
28
28
  magic_pdf/libs/detect_language_from_model.py,sha256=Uln8F9qs8EJOw4EgI7KRlaU3lD_mK8KMTlADLFtz8fk,816
29
- magic_pdf/libs/draw_bbox.py,sha256=90FDAYN3dxgN07_xRzdUgnDAyEswpl9VCXaDo_SMZkA,9449
29
+ magic_pdf/libs/draw_bbox.py,sha256=wPoaxXteZDCL20pjVEQ4kcu0u6Ea-9balUaxrkMsYks,12531
30
30
  magic_pdf/libs/drop_reason.py,sha256=IfjPSrPLMmVziqjOXPep7r_ioQKFRahDgbOW1SD-Tuw,2148
31
31
  magic_pdf/libs/drop_tag.py,sha256=bZDg3bIVWvBT1Ec1icwj5WLOkt5-hI6eRYZ2tX9_a74,673
32
32
  magic_pdf/libs/hash_utils.py,sha256=VEKK9WfFoZgrPfi8kfITjLpr8Ahufs8tXh9R1Y5lAL8,404
@@ -35,19 +35,19 @@ magic_pdf/libs/language.py,sha256=Hj5-lrGoNExxdHLbkcNG-c27U4AjJ9AZPdZblaNSehU,10
35
35
  magic_pdf/libs/local_math.py,sha256=tqljQOgqh3fZc146HYhO88JXJaiXMVwArBkk_CSGICc,177
36
36
  magic_pdf/libs/markdown_utils.py,sha256=cLxLXjRhrNp_wCHvtglrGA_FVdrvfd1KULeTtj1p18w,944
37
37
  magic_pdf/libs/nlp_utils.py,sha256=-X9W3-Ns5ZdDYFvyyEq6i6P2b5hCATaFEZeOjwNOH9M,6901
38
- magic_pdf/libs/ocr_content_type.py,sha256=DiGTYppd6WlibwCAeVpIy3NHCQkglfIAQsJ_ffu5BPw,526
38
+ magic_pdf/libs/ocr_content_type.py,sha256=9c12CoJ8xvdFa4Rk81J_S238yuQl0bDQeapvqb-JkEk,794
39
39
  magic_pdf/libs/path_utils.py,sha256=Hykw_l5CU736b2egHV9P7B-qh3QNKO4nZSGCbsi0Z8E,1043
40
40
  magic_pdf/libs/pdf_check.py,sha256=MAe8wzwT0qvPf_I72wEZG7k1g4haNHS7oUtLqkB5rlE,2145
41
41
  magic_pdf/libs/pdf_image_tools.py,sha256=CAd01giTKr_UJz1_QtDOARG9G9z69GFpzRZwcWSfLtE,1282
42
42
  magic_pdf/libs/safe_filename.py,sha256=ckwcM_eqoysTb5id8czp-tXq2G9da0-l3pshZDCHQtE,236
43
43
  magic_pdf/libs/textbase.py,sha256=SC1Frhz3Fb7V7n2SFRBsl7Bmg0JZdlvZskq0lfW1vIk,732
44
- magic_pdf/libs/version.py,sha256=iEqOsQ5JUsdTQDAPqryKypkJMTOXXBl71cd4Drh5pDs,24
44
+ magic_pdf/libs/version.py,sha256=95eHzU5LYX2l3ASu7OvUb95xo-2kfuwh1uUYnY54K90,24
45
45
  magic_pdf/libs/vis_utils.py,sha256=hTOTEakKV0pGMbk0tbRkVI_tku7A3dGc96ynObZ4kwI,10207
46
46
  magic_pdf/model/__init__.py,sha256=1QcfMKET0xQhSaZMjNQHi_TjzSSDR6PI5mjkmaXHPe8,52
47
- magic_pdf/model/doc_analyze_by_custom_model.py,sha256=cSmh27RG1cvY0pd98T82rq0pANBwkYN0siZIN6oPNI8,4332
47
+ magic_pdf/model/doc_analyze_by_custom_model.py,sha256=SoT21VHy6ICXoRfC9V3XS6BMiX8EZI6zaqSNgoE17oo,4347
48
48
  magic_pdf/model/magic_model.py,sha256=xwKV9BrdjOJecJSzbErT54N6qeJu0tvFuJg2S1z_2kU,25413
49
49
  magic_pdf/model/model_list.py,sha256=AqxAtKGLDn7VVXWYwk0l9LnACxDLyU2jwOJ7vjPZj04,72
50
- magic_pdf/model/pdf_extract_kit.py,sha256=I3pZBWQu8y5YVjURTUJnsIySjeSGK-Yefit9PiqN9VE,12952
50
+ magic_pdf/model/pdf_extract_kit.py,sha256=21vBy8p6pI5a0b6V45ul52yE8zD1R0xrjv4Tx8r9gaw,13620
51
51
  magic_pdf/model/pp_structure_v2.py,sha256=1sn8IJK0d5ZmqJ2XFt9FdaSdI0RQf-iwNAWBrVrIeuc,2872
52
52
  magic_pdf/model/pek_sub_modules/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
53
53
  magic_pdf/model/pek_sub_modules/post_process.py,sha256=HzRxV2sVR3Qo8XKYEHhT6tae-bYTb6dnAfGP6gfVNaM,1135
@@ -138,11 +138,11 @@ magic_pdf/spark/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
138
138
  magic_pdf/spark/spark_api.py,sha256=eSLXTjMYW5Ya41VMIApRVfji1ZxEZXdH9ZdsL6fy5Kw,1131
139
139
  magic_pdf/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
140
140
  magic_pdf/tools/cli.py,sha256=aVmurGAEyWT-MOv0MOaCRrfef1-jkRTpeVVWUsEVyeY,2157
141
- magic_pdf/tools/cli_dev.py,sha256=uDc4fDxVuOIrkaKRdjNAqyh9htyLd-fYDEfJBNFUYao,4149
142
- magic_pdf/tools/common.py,sha256=x4W-Tyo0A-TGsOjzlUGAhxiU2AisU3nBE3_2H_RLUO4,3801
143
- magic_pdf-0.7.0a1.dist-info/LICENSE.md,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
144
- magic_pdf-0.7.0a1.dist-info/METADATA,sha256=NBLsixinI-5iHwdweKr13SM5qg6Jf-fWCwg5ihavlpY,12455
145
- magic_pdf-0.7.0a1.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
146
- magic_pdf-0.7.0a1.dist-info/entry_points.txt,sha256=wXwYke3j8fqDQTocUspL-CqDUEv3Tfcwp09fM8dZAhA,98
147
- magic_pdf-0.7.0a1.dist-info/top_level.txt,sha256=J9I0AzmHWGkp9c6DL8Oe4mEx3yYphLzkRn4H25Lg1rE,10
148
- magic_pdf-0.7.0a1.dist-info/RECORD,,
141
+ magic_pdf/tools/cli_dev.py,sha256=w-J4OixDzHjknnUuRW44PXsUlUqyiD4nPbBSSk9WkXM,4160
142
+ magic_pdf/tools/common.py,sha256=XoSs19DD-4ubbjrDFQer83T9O6O_MmgEO61NbjlP_2M,3939
143
+ magic_pdf-0.7.0b1.dist-info/LICENSE.md,sha256=hIahDEOTzuHCU5J2nd07LWwkLW7Hko4UFO__ffsvB-8,34523
144
+ magic_pdf-0.7.0b1.dist-info/METADATA,sha256=47QGAd2iGc0i1osA_jbBS1QT_Jrfmofoyetsrh9KRy8,18571
145
+ magic_pdf-0.7.0b1.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
146
+ magic_pdf-0.7.0b1.dist-info/entry_points.txt,sha256=wXwYke3j8fqDQTocUspL-CqDUEv3Tfcwp09fM8dZAhA,98
147
+ magic_pdf-0.7.0b1.dist-info/top_level.txt,sha256=J9I0AzmHWGkp9c6DL8Oe4mEx3yYphLzkRn4H25Lg1rE,10
148
+ magic_pdf-0.7.0b1.dist-info/RECORD,,
@@ -1,362 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: magic-pdf
3
- Version: 0.7.0a1
4
- Summary: A practical tool for converting PDF to Markdown
5
- Home-page: https://github.com/opendatalab/MinerU
6
- Requires-Python: >=3.9
7
- Description-Content-Type: text/markdown
8
- License-File: LICENSE.md
9
- Requires-Dist: boto3>=1.28.43
10
- Requires-Dist: Brotli>=1.1.0
11
- Requires-Dist: click>=8.1.7
12
- Requires-Dist: PyMuPDF>=1.24.9
13
- Requires-Dist: loguru>=0.6.0
14
- Requires-Dist: numpy<2.0.0,>=1.21.6
15
- Requires-Dist: fast-langdetect==0.2.0
16
- Requires-Dist: wordninja>=2.0.0
17
- Requires-Dist: scikit-learn>=1.0.2
18
- Requires-Dist: pdfminer.six==20231228
19
- Provides-Extra: full
20
- Requires-Dist: unimernet==0.1.6; extra == "full"
21
- Requires-Dist: ultralytics; extra == "full"
22
- Requires-Dist: paddleocr==2.7.3; extra == "full"
23
- Requires-Dist: pypandoc; extra == "full"
24
- Requires-Dist: struct-eqtable==0.1.0; extra == "full"
25
- Requires-Dist: detectron2; extra == "full"
26
- Requires-Dist: paddlepaddle==3.0.0b1; platform_system == "Linux" and extra == "full"
27
- Requires-Dist: matplotlib; (platform_system == "Linux" or platform_system == "Darwin") and extra == "full"
28
- Requires-Dist: matplotlib<=3.9.0; platform_system == "Windows" and extra == "full"
29
- Requires-Dist: paddlepaddle==2.6.1; (platform_system == "Windows" or platform_system == "Darwin") and extra == "full"
30
- Provides-Extra: lite
31
- Requires-Dist: paddleocr==2.7.3; extra == "lite"
32
- Requires-Dist: paddlepaddle==3.0.0b1; platform_system == "Linux" and extra == "lite"
33
- Requires-Dist: paddlepaddle==2.6.1; (platform_system == "Windows" or platform_system == "Darwin") and extra == "lite"
34
-
35
- <div id="top">
36
-
37
- <p align="center">
38
- <img src="docs/images/MinerU-logo.png" width="300px" style="vertical-align:middle;">
39
- </p>
40
-
41
- </div>
42
- <div align="center">
43
-
44
- [![stars](https://img.shields.io/github/stars/opendatalab/MinerU.svg)](https://github.com/opendatalab/MinerU)
45
- [![forks](https://img.shields.io/github/forks/opendatalab/MinerU.svg)](https://github.com/opendatalab/MinerU)
46
- [![open issues](https://img.shields.io/github/issues-raw/opendatalab/MinerU)](https://github.com/opendatalab/MinerU/issues)
47
- [![issue resolution](https://img.shields.io/github/issues-closed-raw/opendatalab/MinerU)](https://github.com/opendatalab/MinerU/issues)
48
- [![PyPI version](https://badge.fury.io/py/magic-pdf.svg)](https://badge.fury.io/py/magic-pdf)
49
- [![Downloads](https://static.pepy.tech/badge/magic-pdf)](https://pepy.tech/project/magic-pdf)
50
- [![Downloads](https://static.pepy.tech/badge/magic-pdf/month)](https://pepy.tech/project/magic-pdf)
51
-
52
- <a href="https://trendshift.io/repositories/11174" target="_blank"><img src="https://trendshift.io/api/badge/repositories/11174" alt="opendatalab%2FMinerU | Trendshift" style="width: 200px; height: 55px;"/></a>
53
-
54
-
55
-
56
-
57
- [English](README.md) | [简体中文](README_zh-CN.md) | [日本語](README_ja-JP.md)
58
-
59
- </div>
60
-
61
- <div align="center">
62
- <p align="center">
63
- <a href="https://github.com/opendatalab/MinerU">MinerU: An end-to-end PDF parsing tool based on PDF-Extract-Kit, supporting conversion from PDF to Markdown.</a>🚀🚀🚀<br>
64
- <a href="https://github.com/opendatalab/PDF-Extract-Kit">PDF-Extract-Kit: A Comprehensive Toolkit for High-Quality PDF Content Extraction</a>🔥🔥🔥
65
- </p>
66
-
67
- <p align="center">
68
- 👋 join us on <a href="https://discord.gg/gPxmVeGC" target="_blank">Discord</a> and <a href="https://cdn.vansin.top/internlm/mineru.jpg" target="_blank">WeChat</a>
69
- </p>
70
- </div>
71
-
72
- # MinerU
73
-
74
-
75
- ## Introduction
76
-
77
- MinerU is a one-stop, open-source, high-quality data extraction tool, includes the following primary features:
78
-
79
- - [Magic-PDF](#Magic-PDF) PDF Document Extraction
80
- - [Magic-Doc](#Magic-Doc) Webpage & E-book Extraction
81
-
82
-
83
- # Magic-PDF
84
-
85
-
86
- ## Introduction
87
-
88
- Magic-PDF is a tool designed to convert PDF documents into Markdown format, capable of processing files stored locally or on object storage supporting S3 protocol.
89
-
90
- Key features include:
91
-
92
- - Support for multiple front-end model inputs
93
- - Removal of headers, footers, footnotes, and page numbers
94
- - Human-readable layout formatting
95
- - Retains the original document's structure and formatting, including headings, paragraphs, lists, and more
96
- - Extraction and display of images and tables within markdown
97
- - Conversion of equations into LaTeX format
98
- - Automatic detection and conversion of garbled PDFs
99
- - Compatibility with CPU and GPU environments
100
- - Available for Windows, Linux, and macOS platforms
101
-
102
-
103
- https://github.com/user-attachments/assets/4bea02c9-6d54-4cd6-97ed-dff14340982c
104
-
105
-
106
-
107
- ## Project Panorama
108
-
109
- ![Project Panorama](docs/images/project_panorama_en.png)
110
-
111
-
112
- ## Flowchart
113
-
114
- ![Flowchart](docs/images/flowchart_en.png)
115
-
116
- ### Dependency repositorys
117
-
118
- - [PDF-Extract-Kit : A Comprehensive Toolkit for High-Quality PDF Content Extraction](https://github.com/opendatalab/PDF-Extract-Kit) 🚀🚀🚀
119
-
120
- ## Getting Started
121
-
122
- ### Requirements
123
-
124
- - Python >= 3.9
125
-
126
- Using a virtual environment is recommended to avoid potential dependency conflicts; both venv and conda are suitable.
127
- For example:
128
- ```bash
129
- conda create -n MinerU python=3.10
130
- conda activate MinerU
131
- ```
132
-
133
- ### Installation and Configuration
134
-
135
- #### 1. Install Magic-PDF
136
-
137
- **1.Install dependencies**
138
-
139
- The full-feature package depends on detectron2, which requires a compilation installation.
140
- If you need to compile it yourself, please refer to https://github.com/facebookresearch/detectron2/issues/5114
141
- Alternatively, you can directly use our precompiled whl package (limited to Python 3.10):
142
-
143
- ```bash
144
- pip install detectron2 --extra-index-url https://wheels.myhloli.com
145
- ```
146
-
147
- **2.Install the full-feature package with pip**
148
- >Note: The pip-installed package supports CPU-only and is ideal for quick tests.
149
- >
150
- >For CUDA/MPS acceleration in production, see [Acceleration Using CUDA or MPS](#4-Acceleration-Using-CUDA-or-MPS).
151
-
152
- ```bash
153
- pip install magic-pdf[full]==0.6.2b1
154
- ```
155
- > ❗️❗️❗️
156
- > We have pre-released the 0.6.2 beta version, addressing numerous issues mentioned in our logs. However, this build has not undergone full QA testing and does not represent the final release quality. Should you encounter any problems, please promptly report them to us via issues or revert to using version 0.6.1.
157
- > ```bash
158
- > pip install magic-pdf[full-cpu]==0.6.1
159
- > ```
160
-
161
-
162
-
163
- #### 2. Downloading model weights files
164
-
165
- For detailed references, please see below [how_to_download_models](docs/how_to_download_models_en.md)
166
-
167
- After downloading the model weights, move the 'models' directory to a directory on a larger disk space, preferably an SSD.
168
-
169
-
170
- #### 3. Copy the Configuration File and Make Configurations
171
- You can get the [magic-pdf.template.json](magic-pdf.template.json) file in the repository root directory.
172
- ```bash
173
- cp magic-pdf.template.json ~/magic-pdf.json
174
- ```
175
- In magic-pdf.json, configure "models-dir" to point to the directory where the model weights files are located.
176
-
177
- ```json
178
- {
179
- "models-dir": "/tmp/models"
180
- }
181
- ```
182
-
183
-
184
- #### 4. Acceleration Using CUDA or MPS
185
- If you have an available Nvidia GPU or are using a Mac with Apple Silicon, you can leverage acceleration with CUDA or MPS respectively.
186
- ##### CUDA
187
-
188
- You need to install the corresponding PyTorch version according to your CUDA version.
189
- This example installs the CUDA 11.8 version.More information https://pytorch.org/get-started/locally/
190
- ```bash
191
- pip install --force-reinstall torch==2.3.1 torchvision==0.18.1 --index-url https://download.pytorch.org/whl/cu118
192
- ```
193
- > ❗ ️Make sure to specify version
194
- > ```bash
195
- > torch==2.3.1 torchvision==0.18.1
196
- > ```
197
- > in the command, as these are the highest versions we support. Failing to specify the versions may result in automatically installing higher versions which can cause the program to fail.
198
-
199
- Also, you need to modify the value of "device-mode" in the configuration file magic-pdf.json.
200
- ```json
201
- {
202
- "device-mode":"cuda"
203
- }
204
- ```
205
-
206
- ##### MPS
207
-
208
- For macOS users with M-series chip devices, you can use MPS for inference acceleration.
209
- You also need to modify the value of "device-mode" in the configuration file magic-pdf.json.
210
- ```json
211
- {
212
- "device-mode":"mps"
213
- }
214
- ```
215
-
216
-
217
- ### Usage
218
-
219
- #### 1.Usage via Command Line
220
-
221
- ###### simple
222
-
223
- ```bash
224
- magic-pdf pdf-command --pdf "pdf_path" --inside_model true
225
- ```
226
- After the program has finished, you can find the generated markdown files under the directory "/tmp/magic-pdf".
227
- You can find the corresponding xxx_model.json file in the markdown directory.
228
- If you intend to do secondary development on the post-processing pipeline, you can use the command:
229
- ```bash
230
- magic-pdf pdf-command --pdf "pdf_path" --model "model_json_path"
231
- ```
232
- In this way, you won't need to re-run the model data, making debugging more convenient.
233
-
234
-
235
- ###### more
236
-
237
- ```bash
238
- magic-pdf --help
239
- ```
240
-
241
-
242
- #### 2. Usage via Api
243
-
244
- ###### Local
245
- ```python
246
- image_writer = DiskReaderWriter(local_image_dir)
247
- image_dir = str(os.path.basename(local_image_dir))
248
- jso_useful_key = {"_pdf_type": "", "model_list": []}
249
- pipe = UNIPipe(pdf_bytes, jso_useful_key, image_writer)
250
- pipe.pipe_classify()
251
- pipe.pipe_parse()
252
- md_content = pipe.pipe_mk_markdown(image_dir, drop_mode="none")
253
- ```
254
-
255
- ###### Object Storage
256
- ```python
257
- s3pdf_cli = S3ReaderWriter(pdf_ak, pdf_sk, pdf_endpoint)
258
- image_dir = "s3://img_bucket/"
259
- s3image_cli = S3ReaderWriter(img_ak, img_sk, img_endpoint, parent_path=image_dir)
260
- pdf_bytes = s3pdf_cli.read(s3_pdf_path, mode=s3pdf_cli.MODE_BIN)
261
- jso_useful_key = {"_pdf_type": "", "model_list": []}
262
- pipe = UNIPipe(pdf_bytes, jso_useful_key, s3image_cli)
263
- pipe.pipe_classify()
264
- pipe.pipe_parse()
265
- md_content = pipe.pipe_mk_markdown(image_dir, drop_mode="none")
266
- ```
267
-
268
- Demo can be referred to [demo.py](demo/demo.py)
269
-
270
-
271
- # Magic-Doc
272
-
273
-
274
- ## Introduction
275
-
276
- Magic-Doc is a tool designed to convert web pages or multi-format e-books into markdown format.
277
-
278
- Key Features Include:
279
-
280
- - Web Page Extraction
281
- - Cross-modal precise parsing of text, images, tables, and formula information.
282
-
283
- - E-Book Document Extraction
284
- - Supports various document formats including epub, mobi, with full adaptation for text and images.
285
-
286
- - Language Type Identification
287
- - Accurate recognition of 176 languages.
288
-
289
- https://github.com/opendatalab/MinerU/assets/11393164/a5a650e9-f4c0-463e-acc3-960967f1a1ca
290
-
291
-
292
-
293
- https://github.com/opendatalab/MinerU/assets/11393164/0f4a6fe9-6cca-4113-9fdc-a537749d764d
294
-
295
-
296
-
297
- https://github.com/opendatalab/MinerU/assets/11393164/20438a02-ce6c-4af8-9dde-d722a4e825b2
298
-
299
-
300
-
301
-
302
- ## Project Repository
303
-
304
- - [Magic-Doc](https://github.com/InternLM/magic-doc)
305
- Outstanding Webpage and E-book Extraction Tool
306
-
307
-
308
- # All Thanks To Our Contributors
309
-
310
- <a href="https://github.com/opendatalab/MinerU/graphs/contributors">
311
- <img src="https://contrib.rocks/image?repo=opendatalab/MinerU" />
312
- </a>
313
-
314
-
315
- # License Information
316
-
317
- [LICENSE.md](LICENSE.md)
318
-
319
- The project currently leverages PyMuPDF to deliver advanced functionalities; however, its adherence to the AGPL license may impose limitations on certain use cases. In upcoming iterations, we intend to explore and transition to a more permissively licensed PDF processing library to enhance user-friendliness and flexibility.
320
-
321
-
322
- # Acknowledgments
323
-
324
- - [PaddleOCR](https://github.com/PaddlePaddle/PaddleOCR)
325
- - [PyMuPDF](https://github.com/pymupdf/PyMuPDF)
326
- - [fast-langdetect](https://github.com/LlmKira/fast-langdetect)
327
- - [pdfminer.six](https://github.com/pdfminer/pdfminer.six)
328
-
329
-
330
- # Citation
331
-
332
- ```bibtex
333
- @article{he2024opendatalab,
334
- title={Opendatalab: Empowering general artificial intelligence with open datasets},
335
- author={He, Conghui and Li, Wei and Jin, Zhenjiang and Xu, Chao and Wang, Bin and Lin, Dahua},
336
- journal={arXiv preprint arXiv:2407.13773},
337
- year={2024}
338
- }
339
-
340
- @misc{2024mineru,
341
- title={MinerU: A One-stop, Open-source, High-quality Data Extraction Tool},
342
- author={MinerU Contributors},
343
- howpublished = {\url{https://github.com/opendatalab/MinerU}},
344
- year={2024}
345
- }
346
- ```
347
-
348
-
349
- # Star History
350
-
351
- <a>
352
- <picture>
353
- <source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=opendatalab/MinerU&type=Date&theme=dark" />
354
- <source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=opendatalab/MinerU&type=Date" />
355
- <img alt="Star History Chart" src="https://api.star-history.com/svg?repos=opendatalab/MinerU&type=Date" />
356
- </picture>
357
- </a>
358
-
359
- # Links
360
- - [LabelU (A Lightweight Multi-modal Data Annotation Tool)](https://github.com/opendatalab/labelU)
361
- - [LabelLLM (An Open-source LLM Dialogue Annotation Platform)](https://github.com/opendatalab/LabelLLM)
362
- - [PDF-Extract-Kit (A Comprehensive Toolkit for High-Quality PDF Content Extraction)](https://github.com/opendatalab/PDF-Extract-Kit)