pembot 0.0.3__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pembot might be problematic. Click here for more details.

Files changed (129) hide show
  1. pembot/.git/COMMIT_EDITMSG +1 -0
  2. pembot/.git/HEAD +1 -0
  3. pembot/.git/config +11 -0
  4. pembot/.git/description +1 -0
  5. pembot/.git/hooks/applypatch-msg.sample +15 -0
  6. pembot/.git/hooks/commit-msg.sample +24 -0
  7. pembot/.git/hooks/fsmonitor-watchman.sample +174 -0
  8. pembot/.git/hooks/post-update.sample +8 -0
  9. pembot/.git/hooks/pre-applypatch.sample +14 -0
  10. pembot/.git/hooks/pre-commit.sample +49 -0
  11. pembot/.git/hooks/pre-merge-commit.sample +13 -0
  12. pembot/.git/hooks/pre-push.sample +53 -0
  13. pembot/.git/hooks/pre-rebase.sample +169 -0
  14. pembot/.git/hooks/pre-receive.sample +24 -0
  15. pembot/.git/hooks/prepare-commit-msg.sample +42 -0
  16. pembot/.git/hooks/push-to-checkout.sample +78 -0
  17. pembot/.git/hooks/sendemail-validate.sample +77 -0
  18. pembot/.git/hooks/update.sample +128 -0
  19. pembot/.git/index +0 -0
  20. pembot/.git/info/exclude +6 -0
  21. pembot/.git/logs/HEAD +6 -0
  22. pembot/.git/logs/refs/heads/main +6 -0
  23. pembot/.git/logs/refs/remotes/origin/HEAD +1 -0
  24. pembot/.git/logs/refs/remotes/origin/main +5 -0
  25. pembot/.git/objects/0a/fb3a98cdc55b1434b44534ec2bf22c56cfa26c +0 -0
  26. pembot/.git/objects/0c/8d9b2690545bf1906b05cd9f18b783b3eb74f1 +0 -0
  27. pembot/.git/objects/18/28e18ab80aa64d334b26428708140e280cbc63 +0 -0
  28. pembot/.git/objects/19/f61df7dbd562d04f561288677bbf2f18f5dff7 +0 -0
  29. pembot/.git/objects/28/db0ab48059acccd7d257aa02e52e9b6b83a4a5 +0 -0
  30. pembot/.git/objects/35/97e518a8658280be9f377f78edf1dfa1f23814 +0 -0
  31. pembot/.git/objects/3d/07d3b29ff53d95de3898fb786d61732f210515 +0 -0
  32. pembot/.git/objects/3e/cf23eb95123287531d708a21d4ba88d92ccabb +0 -0
  33. pembot/.git/objects/3f/78215d7e17da726fb352fd92b3c117db9b63ba +0 -0
  34. pembot/.git/objects/3f/e072cf3cb6a9f30c3e9936e3ddf622e80270d0 +0 -0
  35. pembot/.git/objects/51/9e780574933d7627a083222bd10dd74f430904 +0 -0
  36. pembot/.git/objects/61/46a371b9c1bd9f51af273f11f986cfd1bedeba +0 -0
  37. pembot/.git/objects/64/00040794955d17c9a1fe1aaaea59f2c4822177 +0 -0
  38. pembot/.git/objects/6d/7a865a23b1cb4182f67907820104ced48b11c9 +0 -0
  39. pembot/.git/objects/72/f047cda92abcd1ddc857f6461de605f8668331 +0 -0
  40. pembot/.git/objects/73/2e98f08bc806c331b06847fc8c743f545499e5 +0 -0
  41. pembot/.git/objects/86/cdaec229f1fbebf43042266b03878944669f25 +0 -0
  42. pembot/.git/objects/87/d6df5217a4a374f8c1211a05f9bd657f72c9a7 +0 -0
  43. pembot/.git/objects/8b/5be2af9b16f290549193859c214cd9072212e8 +0 -0
  44. pembot/.git/objects/93/8f29d9b4b1ae86e39dddf9e3d115a82ddfc9b6 +0 -0
  45. pembot/.git/objects/9b/123713e30fc9e225f9ac8ff5b02f8f8cf86456 +0 -0
  46. pembot/.git/objects/ab/c6b15265171457b41e2cfdaf3b8c3994a59eb7 +0 -0
  47. pembot/.git/objects/ac/9c9018c62fa30dc142665c1b5a375f4e056880 +0 -0
  48. pembot/.git/objects/b1/1173d9b68db117437ccb9551461152e1e8a77d +0 -0
  49. pembot/.git/objects/b2/4e79ab07fe9e68781961a25ff9f1dbb1546fbb +0 -0
  50. pembot/.git/objects/b8/eea52176ffa4d88c5a9976bee26092421565d3 +0 -0
  51. pembot/.git/objects/bf/32a7e6872e5dc4025ee3df3c921ec7ade0855f +0 -0
  52. pembot/.git/objects/c0/793458db6e1bee7f79f1a504fb8ff4963f8ed3 +0 -0
  53. pembot/.git/objects/c2/443060c07101948487cfa93cc39e082e9e0f5f +0 -0
  54. pembot/.git/objects/e5/3070f2b07f45d031444b09b1b38658f3caf29e +0 -0
  55. pembot/.git/objects/e7/911a702079a6144997ea4e70f59abbe59ec2bc +0 -0
  56. pembot/.git/objects/e9/1172752e9a421ae463112d2b0506b37498c98d +0 -0
  57. pembot/.git/objects/ea/0af89e61a882c5afc2a8c281b2d96f174bfe58 +0 -0
  58. pembot/.git/objects/eb/75e1c49f1e5b79dca17ccdbec8067756523238 +0 -0
  59. pembot/.git/objects/f1/655afa1c5636c8d58969e3194bb770aefbc552 +0 -0
  60. pembot/.git/objects/f4/e991088a63def67a30a2b8bbdb4d58514abab8 +0 -0
  61. pembot/.git/objects/f8/cbb5bfd1503e66cec2c593362c60a317b6d300 +0 -0
  62. pembot/.git/objects/f9/98e1f01c2bf0a20159fc851327af05beb3ac88 +0 -0
  63. pembot/.git/objects/fa/9c9a62ec1203a5868b033ded428c2382c4e1b6 +0 -0
  64. pembot/.git/objects/fb/6c90c9ce5e0cdfbe074a3f060afc66f62eefde +0 -0
  65. pembot/.git/objects/fc/e56f1e09d09a05b9babf796fb40bece176f3a2 +0 -0
  66. pembot/.git/objects/pack/pack-d5469edc8c36e3bb1de5e0070e4d5b1eae935dd4.idx +0 -0
  67. pembot/.git/objects/pack/pack-d5469edc8c36e3bb1de5e0070e4d5b1eae935dd4.pack +0 -0
  68. pembot/.git/objects/pack/pack-d5469edc8c36e3bb1de5e0070e4d5b1eae935dd4.rev +0 -0
  69. pembot/.git/packed-refs +2 -0
  70. pembot/.git/refs/heads/main +1 -0
  71. pembot/.git/refs/remotes/origin/HEAD +1 -0
  72. pembot/.git/refs/remotes/origin/main +1 -0
  73. pembot/.gitignore +7 -0
  74. pembot/AnyToText/__init__.py +0 -0
  75. pembot/AnyToText/convertor.py +260 -0
  76. pembot/LICENSE +674 -0
  77. pembot/TextEmbedder/__init__.py +0 -0
  78. pembot/TextEmbedder/gemini_embedder.py +27 -0
  79. pembot/TextEmbedder/mongodb_embedder.py +258 -0
  80. pembot/TextEmbedder/mongodb_index_creator.py +133 -0
  81. pembot/TextEmbedder/vector_query.py +64 -0
  82. pembot/__init__.py +6 -0
  83. pembot/config/config.yaml +5 -0
  84. pembot/gartner.py +140 -0
  85. pembot/main.py +208 -0
  86. pembot/output_structure_local.py +63 -0
  87. pembot/pdf2markdown/.git/HEAD +1 -0
  88. pembot/pdf2markdown/.git/config +11 -0
  89. pembot/pdf2markdown/.git/description +1 -0
  90. pembot/pdf2markdown/.git/hooks/applypatch-msg.sample +15 -0
  91. pembot/pdf2markdown/.git/hooks/commit-msg.sample +24 -0
  92. pembot/pdf2markdown/.git/hooks/fsmonitor-watchman.sample +174 -0
  93. pembot/pdf2markdown/.git/hooks/post-update.sample +8 -0
  94. pembot/pdf2markdown/.git/hooks/pre-applypatch.sample +14 -0
  95. pembot/pdf2markdown/.git/hooks/pre-commit.sample +49 -0
  96. pembot/pdf2markdown/.git/hooks/pre-merge-commit.sample +13 -0
  97. pembot/pdf2markdown/.git/hooks/pre-push.sample +53 -0
  98. pembot/pdf2markdown/.git/hooks/pre-rebase.sample +169 -0
  99. pembot/pdf2markdown/.git/hooks/pre-receive.sample +24 -0
  100. pembot/pdf2markdown/.git/hooks/prepare-commit-msg.sample +42 -0
  101. pembot/pdf2markdown/.git/hooks/push-to-checkout.sample +78 -0
  102. pembot/pdf2markdown/.git/hooks/sendemail-validate.sample +77 -0
  103. pembot/pdf2markdown/.git/hooks/update.sample +128 -0
  104. pembot/pdf2markdown/.git/index +0 -0
  105. pembot/pdf2markdown/.git/info/exclude +6 -0
  106. pembot/pdf2markdown/.git/logs/HEAD +1 -0
  107. pembot/pdf2markdown/.git/logs/refs/heads/main +1 -0
  108. pembot/pdf2markdown/.git/logs/refs/remotes/origin/HEAD +1 -0
  109. pembot/pdf2markdown/.git/objects/pack/pack-d3051affdd6c31306dc53489168fc870872085d1.idx +0 -0
  110. pembot/pdf2markdown/.git/objects/pack/pack-d3051affdd6c31306dc53489168fc870872085d1.pack +0 -0
  111. pembot/pdf2markdown/.git/objects/pack/pack-d3051affdd6c31306dc53489168fc870872085d1.rev +0 -0
  112. pembot/pdf2markdown/.git/packed-refs +2 -0
  113. pembot/pdf2markdown/.git/refs/heads/main +1 -0
  114. pembot/pdf2markdown/.git/refs/remotes/origin/HEAD +1 -0
  115. pembot/pdf2markdown/LICENSE +21 -0
  116. pembot/pdf2markdown/README.md +107 -0
  117. pembot/pdf2markdown/__init__.py +0 -0
  118. pembot/pdf2markdown/config/config.yaml +2 -0
  119. pembot/pdf2markdown/extract.py +888 -0
  120. pembot/pdf2markdown/requirements.txt +8 -0
  121. pembot/pem.py +157 -0
  122. pembot/query.py +204 -0
  123. pembot/utils/__init__.py +0 -0
  124. pembot/utils/inference_client.py +132 -0
  125. pembot/utils/string_tools.py +45 -0
  126. pembot-0.0.3.dist-info/METADATA +8 -0
  127. pembot-0.0.3.dist-info/RECORD +129 -0
  128. pembot-0.0.3.dist-info/WHEEL +5 -0
  129. pembot-0.0.3.dist-info/licenses/LICENSE +674 -0
@@ -0,0 +1,888 @@
1
+ import fitz # PyMuPDF
2
+ import pdfplumber
3
+ import re
4
+ import yaml
5
+ # import pytesseract
6
+ import numpy as np
7
+ from transformers import AutoTokenizer, AutoProcessor, AutoModelForImageTextToText
8
+ # VisionEncoderDecoderModel, ViTImageProcessor,
9
+ from typing import Literal, final
10
+ import torch
11
+ from PIL import Image
12
+ import os
13
+ import logging
14
+ import traceback
15
+ import warnings
16
+ from pathlib import Path
17
+ from abc import ABC, abstractmethod
18
+ import argparse
19
+ from PIL import Image
20
+ import io
21
+ from PIL import Image
22
+
23
+ model_path = "nanonets/Nanonets-OCR-s"
24
+
25
+ model = AutoModelForImageTextToText.from_pretrained(
26
+ model_path,
27
+ torch_dtype="auto",
28
+ device_map="auto",
29
+ attn_implementation="flash_attention_2"
30
+ )
31
+ model.eval()
32
+
33
+ tokenizer = AutoTokenizer.from_pretrained(model_path)
34
+ processor = AutoProcessor.from_pretrained(model_path)
35
+
36
+
37
+ warnings.filterwarnings("ignore")
38
+
39
+ with open(Path("config/config.yaml").resolve(), "r", encoding="utf-8") as f:
40
+ config = yaml.safe_load(f)
41
+
42
+
43
+ class PDFExtractor(ABC):
44
+ """Abstract base class for PDF extraction."""
45
+
46
+ def __init__(self, pdf_path):
47
+ self.pdf_path = pdf_path
48
+ self.setup_logging()
49
+
50
+ def setup_logging(self):
51
+ """Set up logging configuration."""
52
+ log_dir = Path(__file__).parent / "logs"
53
+ log_dir.mkdir(parents=True, exist_ok=True)
54
+ log_file = log_dir / f"{Path(__file__).stem}.log"
55
+
56
+ logging.basicConfig(
57
+ level=logging.INFO,
58
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
59
+ handlers=[
60
+ logging.FileHandler(log_file, encoding="utf-8"),
61
+ logging.StreamHandler(),
62
+ ],
63
+ )
64
+ self.logger = logging.getLogger(__name__)
65
+
66
+ @abstractmethod
67
+ def extract(self) -> tuple[object, list[object]] | tuple[Literal[''], list[object]] | None:
68
+ """Abstract method for extracting content from PDF."""
69
+ pass
70
+
71
+
72
+ class MarkdownPDFExtractor(PDFExtractor):
73
+ """Class for extracting markdown-formatted content from PDF."""
74
+
75
+ BULLET_POINTS = "•◦▪▫●○"
76
+
77
+ def __init__(self, pdf_path, output_path= config["OUTPUT_DIR"], page_delimiter= config["PAGE_DELIMITER"]):
78
+ super().__init__(pdf_path)
79
+
80
+ self.markdown_content= ""
81
+ self.pdf_filename = Path(pdf_path).stem
82
+ self.output_path= output_path
83
+
84
+ output_filepath= f"{Path(self.output_path)}/{self.pdf_filename}.md"
85
+ self.output_filepath= output_filepath
86
+
87
+ self.page_delimiter= page_delimiter
88
+ Path(output_path).mkdir(parents=True, exist_ok=True)
89
+
90
+ # self.setup_image_captioning()
91
+
92
+ # def setup_image_captioning(self):
93
+ # """Set up the image captioning model."""
94
+ # try:
95
+ # self.model = VisionEncoderDecoderModel.from_pretrained(
96
+ # "nlpconnect/vit-gpt2-image-captioning"
97
+ # )
98
+ # self.feature_extractor = ViTImageProcessor.from_pretrained(
99
+ # "nlpconnect/vit-gpt2-image-captioning"
100
+ # )
101
+ # self.tokenizer = AutoTokenizer.from_pretrained(
102
+ # "nlpconnect/vit-gpt2-image-captioning"
103
+ # )
104
+ # self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
105
+ # self.model.to(self.device)
106
+ # self.logger.info("Image captioning model set up successfully.")
107
+ # except Exception as e:
108
+ # self.logger.error(f"Error setting up image captioning model: {e}")
109
+ # self.logger.exception(traceback.format_exc())
110
+
111
+ def extract(self):
112
+ try:
113
+ markdown_content, markdown_pages = self.extract_markdown()
114
+ self.save_markdown(markdown_content)
115
+ self.markdown_content= markdown_content
116
+ self.logger.info(
117
+ f"Markdown content has been saved to {Path(self.output_path)}/{self.pdf_filename}.md"
118
+ )
119
+ return markdown_content, markdown_pages
120
+
121
+ except Exception as e:
122
+ self.logger.error(f"Error processing PDF: {e}")
123
+ self.logger.exception(traceback.format_exc())
124
+ return "", []
125
+
126
+ def extract_markdown_by_blocks(self):
127
+ """Main method to extract markdown from PDF."""
128
+ try:
129
+ doc = fitz.open(self.pdf_path)
130
+ markdown_content = ""
131
+ markdown_pages = []
132
+ tables = self.extract_tables()
133
+ table_index = 0
134
+ list_counter = 0
135
+ in_code_block = False
136
+ code_block_content = ""
137
+ code_block_lang = None
138
+ prev_line = ""
139
+
140
+ for page_num, page in enumerate(doc):
141
+ self.logger.info(f"Processing page {page_num + 1}")
142
+ page_content = ""
143
+ blocks = page.get_text("dict")["blocks"]
144
+ page_height = page.rect.height
145
+ links = self.extract_links(page)
146
+
147
+ if len(page.get_images()) > 0 and len(page.get_images()) <= 128:
148
+ for block in blocks:
149
+ if block["type"] == 0: # Text
150
+ page_content += self.process_text_block(
151
+ block,
152
+ page_height,
153
+ links,
154
+ list_counter,
155
+ in_code_block,
156
+ code_block_content,
157
+ code_block_lang,
158
+ prev_line,
159
+ )
160
+ elif block["type"] == 1: # Image
161
+ page_content += self.process_image_block(page, block)
162
+
163
+ else:
164
+ for block in blocks:
165
+ if block["type"] == 0: # Text
166
+ page_content += self.process_text_block(
167
+ block,
168
+ page_height,
169
+ links,
170
+ list_counter,
171
+ in_code_block,
172
+ code_block_content,
173
+ code_block_lang,
174
+ prev_line,
175
+ )
176
+
177
+ # Insert tables at their approximate positions
178
+ while (
179
+ table_index < len(tables)
180
+ and tables[table_index]["page"] == page.number
181
+ ):
182
+ page_content += (
183
+ "\n\n"
184
+ + self.table_to_markdown(tables[table_index]["content"])
185
+ + "\n\n"
186
+ )
187
+ table_index += 1
188
+
189
+ markdown_pages.append(self.post_process_markdown(page_content))
190
+ markdown_content += page_content + config["PAGE_DELIMITER"]
191
+
192
+ markdown_content = self.post_process_markdown(markdown_content)
193
+ return markdown_content, markdown_pages
194
+ except Exception as e:
195
+ self.logger.error(f"Error extracting markdown: {e}")
196
+ self.logger.exception(traceback.format_exc())
197
+ return "", []
198
+
199
+
200
+ def ocr_page_with_nanonets_s(self, pil_image, model, processor, max_new_tokens: int | None = None):
201
+ prompt = """Extract the text from the above document as if you were reading it naturally. Return the tables in html format. Return the equations in LaTeX representation. If there is an image in the document and image caption is not present, add a small description of the image inside the <img></img> tag; otherwise, add the image caption inside <img></img>. Watermarks should be wrapped in brackets. Ex: <watermark>OFFICIAL COPY</watermark>. Page numbers should be wrapped in brackets. Ex: <page_number>14</page_number> or <page_number>9/22</page_number>. Prefer using ☐ and ☑ for check boxes."""
202
+ if max_new_tokens is None:
203
+ max_new_tokens= 4096
204
+
205
+ # image = Image.open(image_path)
206
+ image = pil_image
207
+ messages = [
208
+ {"role": "system", "content": "You are a helpful assistant."},
209
+ {"role": "user", "content": [
210
+ {"type": "image", "image": image},
211
+ {"type": "text", "text": prompt},
212
+ ]},
213
+ ]
214
+ text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
215
+ inputs = processor(text=[text], images=[image], padding=True, return_tensors="pt")
216
+ inputs = inputs.to(model.device)
217
+
218
+ output_ids = model.generate(**inputs, max_new_tokens=max_new_tokens, do_sample=False)
219
+ generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
220
+
221
+ output_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
222
+ return output_text[0]
223
+
224
+
225
+
226
+ def extract_markdown(self):
227
+ """
228
+ Extracts all possible text content from a PDF page, concatenating it
229
+ from direct text blocks, OCR from embedded image blocks, and OCR from
230
+ full-page raster images (scanned pages).
231
+
232
+ Returns:
233
+ list: A list of strings, where each string is the comprehensive text
234
+ for a corresponding page. Returns an empty list if an error occurs.
235
+ """
236
+
237
+ """taken from self:
238
+ pdf_path (str): The path to the input PDF file.
239
+ output_path (str): Directory to save debug output (like rendered images).
240
+ """
241
+
242
+ all_pages_text = []
243
+ the_text= ""
244
+
245
+ try:
246
+ doc = fitz.open(self.pdf_path)
247
+ logging.info(f"Opened PDF: {self.pdf_path}")
248
+
249
+ tables = self.extract_tables()
250
+ table_index = 0
251
+ list_counter = 0
252
+ in_code_block = False
253
+ code_block_content = ""
254
+ code_block_lang = None
255
+ prev_line = ""
256
+
257
+ for page_num, page in enumerate(doc):
258
+ page_text_content = []
259
+ page_has_searchable_text = False
260
+
261
+ logging.info(f"\nProcessing page {page_num + 1}...")
262
+
263
+ # --- Phase 1: Extract text from direct text blocks and process embedded images ---
264
+ blocks = page.get_text('dict')['blocks']
265
+ text_blocks_content = []
266
+ image_block_text_content = []
267
+
268
+ page_height = page.rect.height
269
+ links = self.extract_links(page)
270
+
271
+ for block_num, block in enumerate(blocks):
272
+ if block['type'] == 0: # Text block
273
+ page_has_searchable_text = True
274
+ text_blocks_content.append(self.process_text_block(
275
+ block,
276
+ page_height,
277
+ links,
278
+ list_counter,
279
+ in_code_block,
280
+ code_block_content,
281
+ code_block_lang,
282
+ prev_line,
283
+ ))
284
+
285
+ # for line in block['lines']:
286
+ # for span in line['spans']:
287
+ # text_blocks_content.append(span['text'])
288
+ elif block['type'] == 1: # Image block
289
+ logging.info(f" Found embedded image block (Page {page_num+1}, Block {block_num+1})")
290
+ img_data = block['image']
291
+ img_ext = block['ext']
292
+
293
+ try:
294
+ # Attempt OCR on the embedded image block
295
+ pil_image = Image.open(io.BytesIO(img_data))
296
+ # ocr_text_from_block_image = pytesseract.image_to_string(pil_image)
297
+ ocr_text_from_block_image= self.ocr_page_with_nanonets_s(pil_image, model, processor, max_new_tokens=15000)
298
+
299
+ if ocr_text_from_block_image.strip():
300
+ logging.info(f" OCR found text in embedded image block.")
301
+ image_block_text_content.append(ocr_text_from_block_image.strip())
302
+ else:
303
+ # If no OCR text, use the caption
304
+ # caption = self.caption_image(pil_image)
305
+ # if caption:
306
+ # logging.info(f" No OCR text, using caption for embedded image block.")
307
+ # image_block_text_content.append(caption)
308
+ # else:
309
+ # logging.info(f" No OCR text and no caption for embedded image block.")
310
+
311
+ # a) captioning sucks, b) no need
312
+ image_block_text_content.append("An Image")
313
+
314
+ # except pytesseract.TesseractNotFoundError:
315
+ # logging.warning(" Tesseract-OCR not found. Skipping OCR for embedded image block.")
316
+ # caption = self.process_image_block(page, block)
317
+ # if caption: image_block_text_content.append(caption)
318
+
319
+ # image_block_text_content.append("An Image")
320
+ except Exception as e:
321
+ logging.error(f" Error processing embedded image block for OCR/caption: {e}")
322
+ # caption = self.process_image_block(page, block)
323
+ # if caption: image_block_text_content.append(caption)
324
+ image_block_text_content.append("An Image")
325
+
326
+
327
+ # Insert tables at their approximate positions
328
+ while (
329
+ table_index < len(tables)
330
+ and tables[table_index]["page"] == page.number
331
+ ):
332
+ page_text_content += (
333
+ "\n\n"
334
+ + self.table_to_markdown(tables[table_index]["content"])
335
+ + "\n\n"
336
+ )
337
+ table_index += 1
338
+
339
+ # Add content from text blocks
340
+ if text_blocks_content:
341
+ page_text_content.append(" ".join(text_blocks_content))
342
+
343
+ # Add content from image blocks
344
+ if image_block_text_content:
345
+ page_text_content.append("\n".join(image_block_text_content))
346
+
347
+
348
+ # --- Phase 2: OCR the entire page IF it seems to be a scanned image ---
349
+ # We check if page_has_searchable_text is False or if the amount of text
350
+ # is very small, suggesting it might be mostly a scanned page.
351
+ # A threshold of 50 characters is arbitrary; adjust as needed.
352
+ current_text_len = len(" ".join(page_text_content).strip())
353
+
354
+ if not page_has_searchable_text or current_text_len < 50:
355
+ logging.info(f" Page {page_num + 1} appears to be a scanned image or has minimal text. Attempting full-page OCR.")
356
+ try:
357
+ # Render the page as a high-resolution image (e.g., 300 DPI)
358
+ pix = page.get_pixmap(matrix=fitz.Matrix(300/72, 300/72))
359
+ img_bytes = pix.tobytes("png")
360
+
361
+ pil_image = Image.open(io.BytesIO(img_bytes))
362
+
363
+ # Perform OCR on the entire page image
364
+ # ocr_text_from_page = pytesseract.image_to_string(pil_image)
365
+ ocr_text_from_page= self.ocr_page_with_nanonets_s(pil_image, model, processor, max_new_tokens=15000)
366
+
367
+ if ocr_text_from_page.strip():
368
+ logging.info(f" Successfully extracted text via full-page OCR.")
369
+ page_text_content.append(ocr_text_from_page.strip())
370
+ else:
371
+ logging.info(f" Full-page OCR yielded no text for page {page_num+1}.")
372
+
373
+ # except pytesseract.TesseractNotFoundError:
374
+ # logging.warning(" Tesseract-OCR not found. Skipping full-page OCR for this page.")
375
+ except Exception as e:
376
+ logging.error(f" Error during full-page OCR on page {page_num+1}: {e}")
377
+ else:
378
+ logging.info(f" Page {page_num + 1} has sufficient searchable text; skipping full-page OCR.")
379
+
380
+
381
+ # Concatenate all collected text for the current page
382
+ final_page_text = "\n".join(filter(None, page_text_content)).strip() # Use filter(None, ...) to remove empty strings
383
+ all_pages_text.append(self.post_process_markdown(final_page_text))
384
+ the_text += final_page_text + self.page_delimiter
385
+
386
+ logging.info(f" Comprehensive text for page {page_num + 1} (first 200 chars):\n{final_page_text[:200]}...")
387
+
388
+ print("\npage done\n")
389
+ print(final_page_text)
390
+
391
+
392
+ doc.close()
393
+ return the_text, all_pages_text
394
+
395
+ except fitz.FileNotFoundError:
396
+ logging.error(f"PDF file not found: {self.pdf_path}")
397
+ return []
398
+ except Exception as e:
399
+ logging.critical(f"An unexpected error occurred: {e}")
400
+ return []
401
+
402
+
403
+ def extract_tables(self):
404
+ """Extract tables from PDF using pdfplumber."""
405
+ tables = []
406
+ try:
407
+ with pdfplumber.open(self.pdf_path) as pdf:
408
+ for page_number, page in enumerate(pdf.pages):
409
+ page_tables = page.extract_tables()
410
+ if len(page_tables) > 128:
411
+ continue
412
+ for table in page_tables:
413
+ tables.append({"page": page_number, "content": table})
414
+ self.logger.info(f"Extracted {len(tables)} tables from the PDF.")
415
+ except Exception as e:
416
+ self.logger.error(f"Error extracting tables: {e}")
417
+ self.logger.exception(traceback.format_exc())
418
+ return tables
419
+
420
+ def table_to_markdown(self, table):
421
+ """Convert a table to markdown format."""
422
+ if not table:
423
+ return ""
424
+
425
+ try:
426
+ table = [
427
+ ["" if cell is None else str(cell).strip() for cell in row]
428
+ for row in table
429
+ ]
430
+ col_widths = [max(len(cell) for cell in col) for col in zip(*table)]
431
+
432
+ markdown = ""
433
+ for i, row in enumerate(table):
434
+ formatted_row = [
435
+ cell.ljust(col_widths[j]) for j, cell in enumerate(row)
436
+ ]
437
+ markdown += "| " + " | ".join(formatted_row) + " |\n"
438
+
439
+ if i == 0:
440
+ markdown += (
441
+ "|"
442
+ + "|".join(["-" * (width + 2) for width in col_widths])
443
+ + "|\n"
444
+ )
445
+
446
+ return markdown
447
+ except Exception as e:
448
+ self.logger.error(f"Error converting table to markdown: {e}")
449
+ self.logger.exception(traceback.format_exc())
450
+ return ""
451
+
452
+ def perform_ocr(self, image):
453
+ """Perform OCR on the given image."""
454
+ try:
455
+ # ocr_result = pytesseract.image_to_string(
456
+ # image
457
+ # )
458
+ ocr_result= self.ocr_page_with_nanonets_s(image, model, processor, max_new_tokens=15000)
459
+
460
+
461
+ return ocr_result.strip()
462
+ except Exception as e:
463
+ self.logger.error(f"Error performing OCR: {e}")
464
+ self.logger.exception(traceback.format_exc())
465
+ return ""
466
+
467
+ def caption_image(self, image):
468
+ """Generate a caption for the given image."""
469
+ try:
470
+ ocr_text = self.perform_ocr(image)
471
+ if ocr_text:
472
+ return ocr_text
473
+
474
+ # Convert image to RGB if it's not already
475
+ if image.mode != "RGB":
476
+ image = image.convert("RGB")
477
+
478
+ # Ensure the image is in the correct shape
479
+ image = np.array(image).transpose(2, 0, 1) # Convert to (C, H, W) format
480
+
481
+ inputs = self.feature_extractor(images=image, return_tensors="pt").to(
482
+ self.device
483
+ )
484
+ pixel_values = inputs.pixel_values
485
+
486
+ generated_ids = self.model.generate(pixel_values, max_length=30)
487
+ generated_caption = self.tokenizer.batch_decode(
488
+ generated_ids, skip_special_tokens=True
489
+ )[0]
490
+ return generated_caption.strip()
491
+ except Exception as e:
492
+ self.logger.error(f"Error captioning image: {e}")
493
+ self.logger.exception(traceback.format_exc())
494
+ return ""
495
+
496
+ def clean_text(self, text):
497
+ """Clean the given text by removing extra spaces."""
498
+ text = text.strip()
499
+ text = re.sub(r"\s+", " ", text)
500
+ return text
501
+
502
+ def apply_formatting(self, text, flags):
503
+ """Apply markdown formatting to the given text based on flags."""
504
+ text = text.strip()
505
+ if not text:
506
+ return text
507
+
508
+ is_bold = flags & 2**4
509
+ is_italic = flags & 2**1
510
+ is_monospace = flags & 2**3
511
+ is_superscript = flags & 2**0
512
+ is_subscript = flags & 2**5
513
+
514
+ if is_monospace:
515
+ text = f"`{text}`"
516
+ elif is_superscript and not bool(re.search(r"\s+", text)):
517
+ text = f"^{text}^"
518
+ elif is_subscript and not bool(re.search(r"\s+", text)):
519
+ text = f"~{text}~"
520
+
521
+ if is_bold and is_italic:
522
+ text = f"***{text}***"
523
+ elif is_bold:
524
+ text = f"**{text}**"
525
+ elif is_italic:
526
+ text = f"*{text}*"
527
+
528
+ return f" {text} "
529
+
530
+ def is_bullet_point(self, text):
531
+ """Check if the given text is a bullet point."""
532
+ return text.strip().startswith(tuple(self.BULLET_POINTS))
533
+
534
+ def convert_bullet_to_markdown(self, text):
535
+ """Convert a bullet point to markdown format."""
536
+ text = re.sub(r"^\s*", "", text)
537
+ return re.sub(f"^[{re.escape(self.BULLET_POINTS)}]\s*", "- ", text)
538
+
539
+ def is_numbered_list_item(self, text):
540
+ """Check if the given text is a numbered list item."""
541
+ return bool(re.match(r"^\d+\s{0,3}[.)]", text.strip()))
542
+
543
+ def convert_numbered_list_to_markdown(self, text, list_counter):
544
+ """Convert a numbered list item to markdown format."""
545
+ text = re.sub(r"^\s*", "", text)
546
+ return re.sub(r"^\d+\s{0,3}[.)]", f"{list_counter}. ", text)
547
+
548
+ def is_horizontal_line(self, text):
549
+ """Check if the given text represents a horizontal line."""
550
+ return bool(re.match(r"^[_-]+$", text.strip()))
551
+
552
+ def extract_links(self, page):
553
+ """Extract links from the given page."""
554
+ links = []
555
+ try:
556
+ for link in page.get_links():
557
+ if link["kind"] == 2: # URI link
558
+ links.append({"rect": link["from"], "uri": link["uri"]})
559
+ self.logger.info(f"Extracted {len(links)} links from the page.")
560
+ except Exception as e:
561
+ self.logger.error(f"Error extracting links: {e}")
562
+ self.logger.exception(traceback.format_exc())
563
+ return links
564
+
565
+ def detect_code_block(self, prev_line, current_line):
566
+ """Detect if the current line starts a code block."""
567
+ patterns = {
568
+ "python": [
569
+ (
570
+ r"^(?:from|import)\s+\w+",
571
+ r"^(?:from|import|def|class|if|for|while|try|except|with)\s",
572
+ ),
573
+ (r"^(?:def|class)\s+\w+", r"^\s{4}"),
574
+ (r"^\s{4}", r"^\s{4,}"),
575
+ ],
576
+ "javascript": [
577
+ (
578
+ r"^(?:function|const|let|var)\s+\w+",
579
+ r"^(?:function|const|let|var|if|for|while|try|catch|class)\s",
580
+ ),
581
+ (r"^(?:if|for|while)\s*\(", r"^\s{2,}"),
582
+ (r"^\s{2,}", r"^\s{2,}"),
583
+ ],
584
+ "html": [
585
+ (
586
+ r"^<(!DOCTYPE|html|head|body|div|p|a|script|style)",
587
+ r"^<(!DOCTYPE|html|head|body|div|p|a|script|style)",
588
+ ),
589
+ (r"^<\w+.*>$", r"^\s{2,}<"),
590
+ (r"^\s{2,}<", r"^\s{2,}<"),
591
+ ],
592
+ "shell": [
593
+ (r"^(?:\$|\#)\s", r"^(?:\$|\#)\s"),
594
+ (r"^[a-z_]+\s*=", r"^[a-z_]+\s*="),
595
+ ],
596
+ "bash": [
597
+ (
598
+ r"^(?:#!/bin/bash|alias|export|source)\s",
599
+ r"^(?:#!/bin/bash|alias|export|source|echo|read|if|for|while|case|function)\s",
600
+ ),
601
+ (r"^(?:if|for|while|case|function)\s", r"^\s{2,}"),
602
+ (r"^\s{2,}", r"^\s{2,}"),
603
+ ],
604
+ "cpp": [
605
+ (
606
+ r"^#include\s*<",
607
+ r"^(?:#include|using|namespace|class|struct|enum|template|typedef)\s",
608
+ ),
609
+ (r"^(?:class|struct|enum)\s+\w+", r"^\s{2,}"),
610
+ (r"^\s{2,}", r"^\s{2,}"),
611
+ ],
612
+ "java": [
613
+ (
614
+ r"^(?:import|package)\s+\w+",
615
+ r"^(?:import|package|public|private|protected|class|interface|enum)\s",
616
+ ),
617
+ (r"^(?:public|private|protected)\s+class\s+\w+", r"^\s{4,}"),
618
+ (r"^\s{4,}", r"^\s{4,}"),
619
+ ],
620
+ "json": [
621
+ (r"^\s*{", r'^\s*["{[]'),
622
+ (r'^\s*"', r'^\s*["}],?$'),
623
+ (r"^\s*\[", r"^\s*[}\]],?$"),
624
+ ],
625
+ }
626
+
627
+ for lang, pattern_pairs in patterns.items():
628
+ for prev_pattern, curr_pattern in pattern_pairs:
629
+ if re.match(prev_pattern, prev_line.strip()) and re.match(
630
+ curr_pattern, current_line.strip()
631
+ ):
632
+ return lang
633
+
634
+ return None
635
+
636
+ def process_text_block(
637
+ self,
638
+ block,
639
+ page_height,
640
+ links,
641
+ list_counter,
642
+ in_code_block,
643
+ code_block_content,
644
+ code_block_lang,
645
+ prev_line,
646
+ ):
647
+ """Process a text block and convert it to markdown."""
648
+ try:
649
+ block_rect = block["bbox"]
650
+ if block_rect[1] < 50 or block_rect[3] > page_height - 50:
651
+ return "" # Skip headers and footers
652
+
653
+ block_text = ""
654
+ last_y1 = None
655
+ last_font_size = None
656
+
657
+ for line in block["lines"]:
658
+ line_text = ""
659
+ curr_font_size = [span["size"] for span in line["spans"]]
660
+
661
+ for span in line["spans"]:
662
+ text = span["text"]
663
+ font_size = span["size"]
664
+ flags = span["flags"]
665
+ span_rect = span["bbox"]
666
+
667
+ if self.is_horizontal_line(text):
668
+ line_text += "\n---\n"
669
+ continue
670
+
671
+ text = self.clean_text(text)
672
+
673
+ if text.strip():
674
+ header_level = self.get_header_level(font_size)
675
+ if header_level > 0:
676
+ text = f"\n{'#' * header_level} {text}\n\n"
677
+
678
+ else:
679
+ is_list_item = self.is_bullet_point(
680
+ text
681
+ ) or self.is_numbered_list_item(text)
682
+
683
+ if is_list_item:
684
+ marker, content = re.split(
685
+ r"(?<=^[•◦▪▫●○\d.)])\s*", text, 1
686
+ )
687
+ formatted_content = self.apply_formatting(
688
+ content, flags
689
+ )
690
+ text = f"{marker} {formatted_content}"
691
+ else:
692
+ text = self.apply_formatting(text, flags)
693
+
694
+ for link in links:
695
+ if fitz.Rect(span_rect).intersects(link["rect"]):
696
+ text = f"[{text.strip()}]({link['uri']})"
697
+ break
698
+
699
+ line_text += text
700
+
701
+ if last_y1 is not None:
702
+ avg_last_font_size = (
703
+ sum(last_font_size) / len(last_font_size)
704
+ if last_font_size
705
+ else 0
706
+ )
707
+ avg_current_font_size = sum(curr_font_size) / len(curr_font_size)
708
+ font_size_changed = (
709
+ abs(avg_current_font_size - avg_last_font_size) > 1
710
+ )
711
+
712
+ if abs(line["bbox"][3] - last_y1) > 2 or font_size_changed:
713
+ block_text += "\n"
714
+
715
+ block_text += self.clean_text(line_text) + " "
716
+ last_font_size = curr_font_size
717
+ last_y1 = line["bbox"][3]
718
+
719
+ markdown_content = ""
720
+ lines = block_text.split("\n")
721
+ for i, line in enumerate(lines):
722
+ clean_line = self.clean_text(line)
723
+
724
+ if not in_code_block:
725
+ code_lang = self.detect_code_block(prev_line, clean_line)
726
+ if code_lang:
727
+ in_code_block = True
728
+ code_block_lang = code_lang
729
+ code_block_content = prev_line + "\n" + clean_line + "\n"
730
+ prev_line = clean_line
731
+ continue
732
+
733
+ if in_code_block:
734
+ code_block_content += clean_line + "\n"
735
+ if (
736
+ i == len(lines) - 1
737
+ or self.detect_code_block(clean_line, lines[i + 1])
738
+ != code_block_lang
739
+ ):
740
+ markdown_content += (
741
+ f"```{code_block_lang}\n{code_block_content}```\n\n"
742
+ )
743
+ in_code_block = False
744
+ code_block_content = ""
745
+ code_block_lang = None
746
+ else:
747
+ if self.is_bullet_point(clean_line):
748
+ markdown_content += "\n" + self.convert_bullet_to_markdown(
749
+ clean_line
750
+ )
751
+ list_counter = 0
752
+ elif self.is_numbered_list_item(clean_line):
753
+ list_counter += 1
754
+ markdown_content += (
755
+ "\n"
756
+ + self.convert_numbered_list_to_markdown(
757
+ clean_line, list_counter
758
+ )
759
+ )
760
+ else:
761
+ markdown_content += f"{clean_line}\n"
762
+ list_counter = 0
763
+
764
+ prev_line = clean_line
765
+
766
+ return markdown_content + "\n"
767
+ except Exception as e:
768
+ self.logger.error(f"Error processing text block: {e}")
769
+ self.logger.exception(traceback.format_exc())
770
+ return ""
771
+
772
+ def process_image_block(self, page, block):
773
+ """Process an image block and convert it to markdown."""
774
+ try:
775
+ image_rect = block["bbox"]
776
+ zoom_x = 2.0 # horizontal zoom
777
+ zoom_y = 2.0 # vertical zoom
778
+ mat = fitz.Matrix(zoom_x, zoom_y) # zoom factor 2 in each dimension
779
+ pix = page.get_pixmap(clip=image_rect, matrix=mat, alpha=False)
780
+ image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
781
+
782
+ if image.width < 20 or image.height < 20:
783
+ return ""
784
+
785
+ image_filename = (
786
+ f"{self.pdf_filename}_image_{int(page.number)+1}_{block['number']}.png"
787
+ )
788
+ image_path = (
789
+ Path(self.output_path) / image_filename
790
+ ) # Convert to Path object
791
+ image.save(image_path, "PNG", optimize=True, quality=95)
792
+ caption = self.caption_image(image)
793
+ if not caption:
794
+ caption = (
795
+ f"{self.pdf_filename}_image_{int(page.number)+1}_{block['number']}"
796
+ )
797
+
798
+ return f"![{caption}]({image_path})\n\n" # image_path is now a Path object
799
+ except Exception as e:
800
+ self.logger.error(f"Error processing image block: {e}")
801
+ self.logger.exception(traceback.format_exc())
802
+ return ""
803
+
804
+ def get_header_level(self, font_size):
805
+ """Determine header level based on font size."""
806
+ if font_size > 24:
807
+ return 1
808
+ elif font_size > 20:
809
+ return 2
810
+ elif font_size > 18:
811
+ return 3
812
+ elif font_size > 16:
813
+ return 4
814
+ elif font_size > 14:
815
+ return 5
816
+ elif font_size > 12:
817
+ return 6
818
+ else:
819
+ return 0
820
+
821
+ def post_process_markdown(self, markdown_content):
822
+ """Post-process the markdown content."""
823
+ try:
824
+ markdown_content = re.sub(
825
+ r"\n{3,}", "\n\n", markdown_content
826
+ ) # Remove excessive newlines
827
+ markdown_content = re.sub(
828
+ r"(\d+)\s*\n", "", markdown_content
829
+ ) # Remove page numbers
830
+ markdown_content = re.sub(
831
+ r" +", " ", markdown_content
832
+ ) # Remove multiple spaces
833
+ markdown_content = re.sub(
834
+ r"\s*(---\n)+", "\n\n---\n", markdown_content
835
+ ) # Remove duplicate horizontal lines
836
+
837
+ def remove_middle_headers(match):
838
+ line = match.group(0)
839
+ # Keep the initial header and remove all subsequent '#' characters
840
+ return re.sub(
841
+ r"(^#{1,6}\s).*?(?=\n)",
842
+ lambda m: m.group(1)
843
+ + re.sub(r"#", "", m.group(0)[len(m.group(1)) :]),
844
+ line,
845
+ )
846
+
847
+ markdown_content = re.sub(
848
+ r"^#{1,6}\s.*\n",
849
+ remove_middle_headers,
850
+ markdown_content,
851
+ flags=re.MULTILINE,
852
+ ) # Remove headers in the middle of lines
853
+ return markdown_content
854
+ except Exception as e:
855
+ self.logger.error(f"Error post-processing markdown: {e}")
856
+ self.logger.exception(traceback.format_exc())
857
+ return markdown_content
858
+
859
+ def save_markdown(self, markdown_content):
860
+ """Save the markdown content to a file."""
861
+ try:
862
+ os.makedirs(Path(self.output_path), exist_ok=True)
863
+ with open(
864
+ self.output_filepath,
865
+ "w",
866
+ encoding="utf-8",
867
+ ) as f:
868
+ f.write(markdown_content)
869
+ self.logger.info("Markdown content saved successfully.")
870
+ except Exception as e:
871
+ self.logger.error(f"Error saving markdown content: {e}")
872
+ self.logger.exception(traceback.format_exc())
873
+
874
+
875
+ def main():
876
+ parser = argparse.ArgumentParser(
877
+ description="Extract markdown-formatted content from a PDF file."
878
+ )
879
+ parser.add_argument("--pdf_path", help="Path to the input PDF file", required=True)
880
+ args = parser.parse_args()
881
+
882
+ extractor = MarkdownPDFExtractor(args.pdf_path)
883
+ markdown_pages = extractor.extract()
884
+ return markdown_pages
885
+
886
+
887
+ if __name__ == "__main__":
888
+ main()