mistocr 0.2.5__tar.gz → 0.4.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mistocr
3
- Version: 0.2.5
3
+ Version: 0.4.0
4
4
  Summary: Batch OCR for PDFs with heading restoration and visual content integration
5
5
  Home-page: https://github.com/franckalbinet/mistocr
6
6
  Author: Solveit
@@ -23,6 +23,7 @@ Requires-Dist: mistralai
23
23
  Requires-Dist: pillow
24
24
  Requires-Dist: dotenv
25
25
  Requires-Dist: lisette
26
+ Requires-Dist: PyPDF2
26
27
  Provides-Extra: dev
27
28
  Dynamic: author
28
29
  Dynamic: author-email
@@ -0,0 +1 @@
1
+ __version__ = "0.4.0"
@@ -5,7 +5,8 @@ d = { 'settings': { 'branch': 'main',
5
5
  'doc_host': 'https://franckalbinet.github.io',
6
6
  'git_url': 'https://github.com/franckalbinet/mistocr',
7
7
  'lib_path': 'mistocr'},
8
- 'syms': { 'mistocr.core': { 'mistocr.core._get_paths': ('core.html#_get_paths', 'mistocr/core.py'),
8
+ 'syms': { 'mistocr.core': { 'mistocr.core._check_timeout': ('core.html#_check_timeout', 'mistocr/core.py'),
9
+ 'mistocr.core._get_paths': ('core.html#_get_paths', 'mistocr/core.py'),
9
10
  'mistocr.core._prep_batch': ('core.html#_prep_batch', 'mistocr/core.py'),
10
11
  'mistocr.core._run_batch': ('core.html#_run_batch', 'mistocr/core.py'),
11
12
  'mistocr.core.create_batch_entry': ('core.html#create_batch_entry', 'mistocr/core.py'),
@@ -18,10 +19,12 @@ d = { 'settings': { 'branch': 'main',
18
19
  'mistocr.core.save_page': ('core.html#save_page', 'mistocr/core.py'),
19
20
  'mistocr.core.save_pages': ('core.html#save_pages', 'mistocr/core.py'),
20
21
  'mistocr.core.submit_batch': ('core.html#submit_batch', 'mistocr/core.py'),
22
+ 'mistocr.core.subset_pdf': ('core.html#subset_pdf', 'mistocr/core.py'),
21
23
  'mistocr.core.upload_pdf': ('core.html#upload_pdf', 'mistocr/core.py'),
22
24
  'mistocr.core.wait_for_job': ('core.html#wait_for_job', 'mistocr/core.py')},
23
25
  'mistocr.pipeline': {'mistocr.pipeline.pdf_to_md': ('pipeline.html#pdf_to_md', 'mistocr/pipeline.py')},
24
- 'mistocr.refine': { 'mistocr.refine.HeadingCorrections': ('refine.html#headingcorrections', 'mistocr/refine.py'),
26
+ 'mistocr.refine': { 'mistocr.refine.HeadingCorrection': ('refine.html#headingcorrection', 'mistocr/refine.py'),
27
+ 'mistocr.refine.HeadingCorrections': ('refine.html#headingcorrections', 'mistocr/refine.py'),
25
28
  'mistocr.refine.ImgDescription': ('refine.html#imgdescription', 'mistocr/refine.py'),
26
29
  'mistocr.refine.add_descs_to_pg': ('refine.html#add_descs_to_pg', 'mistocr/refine.py'),
27
30
  'mistocr.refine.add_descs_to_pgs': ('refine.html#add_descs_to_pgs', 'mistocr/refine.py'),
@@ -3,8 +3,9 @@
3
3
  # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/00_core.ipynb.
4
4
 
5
5
  # %% auto 0
6
- __all__ = ['ocr_model', 'ocr_endpoint', 'get_api_key', 'upload_pdf', 'create_batch_entry', 'prep_pdf_batch', 'submit_batch',
7
- 'wait_for_job', 'download_results', 'save_images', 'save_page', 'save_pages', 'ocr_pdf', 'read_pgs']
6
+ __all__ = ['logger', 'ocr_model', 'ocr_endpoint', 'get_api_key', 'upload_pdf', 'create_batch_entry', 'prep_pdf_batch',
7
+ 'submit_batch', 'wait_for_job', 'download_results', 'save_images', 'save_page', 'save_pages', 'ocr_pdf',
8
+ 'read_pgs', 'subset_pdf']
8
9
 
9
10
  # %% ../nbs/00_core.ipynb 3
10
11
  from fastcore.all import *
@@ -13,8 +14,15 @@ from io import BytesIO
13
14
  from pathlib import Path
14
15
  from PIL import Image
15
16
  from mistralai import Mistral
17
+ import PyPDF2
18
+ import logging
16
19
 
17
- # %% ../nbs/00_core.ipynb 6
20
+ # %% ../nbs/00_core.ipynb 4
21
+ logger = logging.getLogger(__name__)
22
+ logging.basicConfig(level=logging.WARNING, format='%(name)s - %(levelname)s - %(message)s')
23
+ logger.setLevel(logging.DEBUG)
24
+
25
+ # %% ../nbs/00_core.ipynb 7
18
26
  def get_api_key(
19
27
  key:str=None # Mistral API key
20
28
  ):
@@ -23,11 +31,11 @@ def get_api_key(
23
31
  if not key: raise ValueError("MISTRAL_API_KEY not found")
24
32
  return key
25
33
 
26
- # %% ../nbs/00_core.ipynb 7
34
+ # %% ../nbs/00_core.ipynb 8
27
35
  ocr_model = "mistral-ocr-latest"
28
36
  ocr_endpoint = "/v1/ocr"
29
37
 
30
- # %% ../nbs/00_core.ipynb 10
38
+ # %% ../nbs/00_core.ipynb 11
31
39
  def upload_pdf(
32
40
  path:str, # Path to PDF file
33
41
  key:str=None # Mistral API key
@@ -38,7 +46,7 @@ def upload_pdf(
38
46
  uploaded = c.files.upload(file=dict(file_name=path.stem, content=path.read_bytes()), purpose="ocr")
39
47
  return c.files.get_signed_url(file_id=uploaded.id).url, c
40
48
 
41
- # %% ../nbs/00_core.ipynb 15
49
+ # %% ../nbs/00_core.ipynb 16
42
50
  def create_batch_entry(
43
51
  path:str, # Path to PDF file,
44
52
  url:str, # Mistral signed URL
@@ -50,7 +58,7 @@ def create_batch_entry(
50
58
  if not cid: cid = path.stem
51
59
  return dict(custom_id=cid, body=dict(document=dict(type="document_url", document_url=url), include_image_base64=inc_img))
52
60
 
53
- # %% ../nbs/00_core.ipynb 17
61
+ # %% ../nbs/00_core.ipynb 18
54
62
  def prep_pdf_batch(
55
63
  path:str, # Path to PDF file,
56
64
  cid:str=None, # Custom ID (by default using the file name without extention)
@@ -61,7 +69,7 @@ def prep_pdf_batch(
61
69
  url, c = upload_pdf(path, key)
62
70
  return create_batch_entry(path, url, cid, inc_img), c
63
71
 
64
- # %% ../nbs/00_core.ipynb 21
72
+ # %% ../nbs/00_core.ipynb 22
65
73
  def submit_batch(
66
74
  entries:list[dict], # List of batch entries,
67
75
  c:Mistral=None, # Mistral client,
@@ -75,20 +83,35 @@ def submit_batch(
75
83
  batch_data = c.files.upload(file=dict(file_name="batch.jsonl", content=open(f.name, "rb")), purpose="batch")
76
84
  return c.batch.jobs.create(input_files=[batch_data.id], model=model, endpoint=endpoint)
77
85
 
78
- # %% ../nbs/00_core.ipynb 24
86
+ # %% ../nbs/00_core.ipynb 25
87
+ def _check_timeout(
88
+ queued_time:int, # Time spent in QUEUED state (seconds)
89
+ timeout:int, # Maximum allowed QUEUED time (seconds)
90
+ job_id:str # Batch job ID
91
+ ):
92
+ "Raise TimeoutError if job has been queued longer than timeout"
93
+ if queued_time >= timeout: raise TimeoutError(f"Job {job_id} stayed in QUEUED for {queued_time}s, exceeding timeout of {timeout}s. Check your balance or Mistral Status.")
94
+
95
+ # %% ../nbs/00_core.ipynb 26
79
96
  def wait_for_job(
80
- job:dict, # Job dict,
81
- c:Mistral=None, # Mistral client,
82
- poll_interval:int=1 # Poll interval in seconds
83
- ) -> dict: # Job dict (with status)
97
+ job:dict, # Batch job from submit_batch
98
+ c:Mistral=None, # Mistral client
99
+ poll_interval:int=1, # Seconds between status checks
100
+ queued_timeout:int=300 # Max seconds in QUEUED before timeout
101
+ ) -> dict: # Completed job dict
84
102
  "Poll job until completion and return final job status"
103
+ logger.info(f"Waiting for batch job {job.id} (initial status: {job.status})")
104
+ queued_time = 0
85
105
  while job.status in ["QUEUED", "RUNNING"]:
86
- print(f'Mistral batch job status: {job.status}')
106
+ logger.debug(f"Job {job.id} status: {job.status} (elapsed: {queued_time}s)")
107
+ if job.status == "QUEUED": queued_time += poll_interval; _check_timeout(queued_time, queued_timeout, job.id)
87
108
  time.sleep(poll_interval)
88
109
  job = c.batch.jobs.get(job_id=job.id)
110
+ logger.info(f"Job {job.id} completed with status: {job.status}")
111
+ if job.status != "SUCCESS": logger.warning(f"Job {job.id} finished with non-success status: {job.status}")
89
112
  return job
90
113
 
91
- # %% ../nbs/00_core.ipynb 26
114
+ # %% ../nbs/00_core.ipynb 28
92
115
  def download_results(
93
116
  job:dict, # Job dict,
94
117
  c:Mistral=None # Mistral client
@@ -97,7 +120,7 @@ def download_results(
97
120
  content = c.files.download(file_id=job.output_file).read().decode('utf-8')
98
121
  return [json.loads(line) for line in content.strip().split('\n') if line]
99
122
 
100
- # %% ../nbs/00_core.ipynb 31
123
+ # %% ../nbs/00_core.ipynb 33
101
124
  def save_images(
102
125
  page:dict, # Page dict,
103
126
  img_dir:str='img' # Directory to save images
@@ -108,7 +131,7 @@ def save_images(
108
131
  img_bytes = base64.b64decode(img['image_base64'].split(',')[1])
109
132
  Image.open(BytesIO(img_bytes)).save(img_dir / img['id'])
110
133
 
111
- # %% ../nbs/00_core.ipynb 32
134
+ # %% ../nbs/00_core.ipynb 34
112
135
  def save_page(
113
136
  page:dict, # Page dict,
114
137
  dst:str, # Directory to save page
@@ -120,7 +143,7 @@ def save_page(
120
143
  img_dir.mkdir(exist_ok=True)
121
144
  save_images(page, img_dir)
122
145
 
123
- # %% ../nbs/00_core.ipynb 34
146
+ # %% ../nbs/00_core.ipynb 36
124
147
  def save_pages(
125
148
  ocr_resp:dict, # OCR response,
126
149
  dst:str, # Directory to save pages,
@@ -133,7 +156,7 @@ def save_pages(
133
156
  for page in ocr_resp['pages']: save_page(page, dst, img_dir)
134
157
  return dst
135
158
 
136
- # %% ../nbs/00_core.ipynb 40
159
+ # %% ../nbs/00_core.ipynb 42
137
160
  def _get_paths(path:str) -> list[Path]:
138
161
  "Get list of PDFs from file or folder"
139
162
  path = Path(path)
@@ -144,7 +167,7 @@ def _get_paths(path:str) -> list[Path]:
144
167
  return pdfs
145
168
  raise ValueError(f"Path not found: {path}")
146
169
 
147
- # %% ../nbs/00_core.ipynb 41
170
+ # %% ../nbs/00_core.ipynb 43
148
171
  def _prep_batch(pdfs:list[Path], inc_img:bool=True, key:str=None) -> tuple[list[dict], Mistral]:
149
172
  "Prepare batch entries for list of PDFs"
150
173
  entries, c = [], None
@@ -153,7 +176,7 @@ def _prep_batch(pdfs:list[Path], inc_img:bool=True, key:str=None) -> tuple[list[
153
176
  entries.append(entry)
154
177
  return entries, c
155
178
 
156
- # %% ../nbs/00_core.ipynb 42
179
+ # %% ../nbs/00_core.ipynb 44
157
180
  def _run_batch(entries:list[dict], c:Mistral, poll_interval:int=2) -> list[dict]:
158
181
  "Submit batch, wait for completion, and download results"
159
182
  job = submit_batch(entries, c)
@@ -161,7 +184,7 @@ def _run_batch(entries:list[dict], c:Mistral, poll_interval:int=2) -> list[dict]
161
184
  if job.status != 'SUCCESS': raise Exception(f"Job failed with status: {job.status}")
162
185
  return download_results(job, c)
163
186
 
164
- # %% ../nbs/00_core.ipynb 43
187
+ # %% ../nbs/00_core.ipynb 45
165
188
  def ocr_pdf(
166
189
  path:str, # Path to PDF file or folder,
167
190
  dst:str='md', # Directory to save markdown pages,
@@ -175,7 +198,7 @@ def ocr_pdf(
175
198
  results = _run_batch(entries, c, poll_interval)
176
199
  return L([save_pages(r['response']['body'], dst, r['custom_id']) for r in results])
177
200
 
178
- # %% ../nbs/00_core.ipynb 47
201
+ # %% ../nbs/00_core.ipynb 52
179
202
  def read_pgs(
180
203
  path:str, # OCR output directory,
181
204
  join:bool=True # Join pages into single string
@@ -185,3 +208,24 @@ def read_pgs(
185
208
  pgs = sorted(path.glob('page_*.md'), key=lambda p: int(p.stem.split('_')[1]))
186
209
  contents = L([p.read_text() for p in pgs])
187
210
  return '\n\n'.join(contents) if join else contents
211
+
212
+ # %% ../nbs/00_core.ipynb 59
213
+ def subset_pdf(
214
+ path:str, # Path to PDF file
215
+ start:int=1, # Start page (1-based)
216
+ end:int=None, # End page (1-based, inclusive)
217
+ dst:str='.' # Output directory
218
+ ) -> Path: # Path to subset PDF
219
+ "Extract page range from PDF and save with range suffix"
220
+ path = Path(path)
221
+ writer = PyPDF2.PdfWriter()
222
+ with open(path, 'rb') as f:
223
+ reader = PyPDF2.PdfReader(f)
224
+ n = len(reader.pages)
225
+ end = end or n
226
+ s, e = max(0, start-1), min(n, end) - 1
227
+ for i in range(s, e+1): writer.add_page(reader.pages[i])
228
+ suffix = f"_p{s+1}-{e+1}" if s>0 or e<n-1 else ""
229
+ out = Path(dst) / f"{path.stem}{suffix}.pdf"
230
+ with open(out, 'wb') as f: writer.write(f)
231
+ return out
@@ -0,0 +1,48 @@
1
+ """End-to-End Pipeline: PDF OCR, Markdown Heading Correction, and AI Image Descriptions"""
2
+
3
+ # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/02_pipeline.ipynb.
4
+
5
+ # %% auto 0
6
+ __all__ = ['logger', 'pdf_to_md']
7
+
8
+ # %% ../nbs/02_pipeline.ipynb 3
9
+ from fastcore.all import *
10
+ from .core import read_pgs, ocr_pdf
11
+ from .refine import add_img_descs, fix_hdgs
12
+ from pathlib import Path
13
+ from asyncio import Semaphore, gather, sleep
14
+ import tempfile
15
+ import os, json, shutil
16
+ import logging
17
+
18
+ # %% ../nbs/02_pipeline.ipynb 4
19
+ logger = logging.getLogger(__name__)
20
+ logging.basicConfig(level=logging.WARNING, format='%(name)s - %(levelname)s - %(message)s')
21
+ logger.setLevel(logging.INFO)
22
+
23
+ # %% ../nbs/02_pipeline.ipynb 5
24
+ @delegates(add_img_descs)
25
+ async def pdf_to_md(
26
+ pdf_path:str, # Path to input PDF file
27
+ dst:str, # Destination directory for output markdown
28
+ ocr_dst:str=None, # Optional OCR output directory
29
+ model:str='claude-sonnet-4-5', # Model to use for heading fixes and image descriptions
30
+ add_img_desc:bool=True, # Whether to add image descriptions
31
+ progress:bool=True, # Whether to show progress messages
32
+ **kwargs
33
+ ):
34
+ "Convert PDF to markdown with OCR, fixed heading hierarchy, and optional image descriptions"
35
+ "Convert PDF to markdown with OCR, fixed heading hierarchy, and optional image descriptions"
36
+ cleanup = ocr_dst is None
37
+ if cleanup: ocr_dst = tempfile.mkdtemp()
38
+ n_steps = 3 if add_img_desc else 2
39
+ if progress: logger.info(f"Step 1/{n_steps}: Running OCR on {pdf_path}...")
40
+ ocr_dir = ocr_pdf(pdf_path, ocr_dst)[0]
41
+ if progress: logger.info(f"Step 2/{n_steps}: Fixing heading hierarchy...")
42
+ fix_hdgs(ocr_dir, model=model)
43
+ if add_img_desc:
44
+ if progress: logger.info(f"Step 3/{n_steps}: Adding image descriptions...")
45
+ await add_img_descs(ocr_dir, dst=dst, model=model, progress=progress, **kwargs)
46
+ elif dst != str(ocr_dir): shutil.copytree(ocr_dir, dst, dirs_exist_ok=True)
47
+ if cleanup: shutil.rmtree(ocr_dst)
48
+ if progress: logger.info("Done!")
@@ -4,9 +4,9 @@
4
4
 
5
5
  # %% auto 0
6
6
  __all__ = ['prompt_fix_hdgs', 'describe_img_prompt', 'get_hdgs', 'add_pg_hdgs', 'read_pgs_pg', 'fmt_hdgs_idx',
7
- 'HeadingCorrections', 'fix_hdg_hierarchy', 'mk_fixes_lut', 'apply_hdg_fixes', 'fix_hdgs', 'ImgDescription',
8
- 'describe_img', 'limit', 'parse_r', 'describe_imgs', 'save_img_descs', 'add_descs_to_pg', 'add_descs_to_pgs',
9
- 'add_img_descs']
7
+ 'HeadingCorrection', 'HeadingCorrections', 'fix_hdg_hierarchy', 'mk_fixes_lut', 'apply_hdg_fixes',
8
+ 'fix_hdgs', 'ImgDescription', 'describe_img', 'limit', 'parse_r', 'describe_imgs', 'save_img_descs',
9
+ 'add_descs_to_pg', 'add_descs_to_pgs', 'add_img_descs']
10
10
 
11
11
  # %% ../nbs/01_refine.ipynb 3
12
12
  from fastcore.all import *
@@ -59,14 +59,26 @@ def fmt_hdgs_idx(
59
59
 
60
60
 
61
61
  # %% ../nbs/01_refine.ipynb 18
62
+ class HeadingCorrection(BaseModel):
63
+ "A single heading correction mapping an index to its corrected markdown heading"
64
+ index: int
65
+ corrected: str
66
+
67
+ # %% ../nbs/01_refine.ipynb 19
62
68
  class HeadingCorrections(BaseModel):
63
- corrections: dict[int, str] # index corrected heading
69
+ "Collection of heading corrections returned by the LLM"
70
+ corrections: list[HeadingCorrection]
64
71
 
65
- # %% ../nbs/01_refine.ipynb 20
72
+ # %% ../nbs/01_refine.ipynb 21
66
73
  prompt_fix_hdgs = """Fix markdown heading hierarchy errors while preserving the document's intended structure.
67
74
 
68
75
  INPUT FORMAT: Each heading is prefixed with its index number (e.g., "0. # Title ... page 1")
69
76
 
77
+ ANALYSIS STEPS (think through these before outputting corrections):
78
+ 1. For each numbered heading (e.g., "4.1", "2.a", "A.1"), identify its parent heading (e.g., "4", "2", "A")
79
+ 2. Verify the child heading is exactly one # deeper than its parent
80
+ 3. If not, mark it for correction
81
+
70
82
  RULES - Apply these fixes in order:
71
83
 
72
84
  1. **Single H1 rule**: Documents must have exactly ONE # heading (typically the document title at the top)
@@ -75,8 +87,8 @@ RULES - Apply these fixes in order:
75
87
  - NO exceptions: appendices, references, and all sections are ## or deeper after the title
76
88
 
77
89
  2. **Infer depth from numbering patterns**: If headings contain section numbers, deeper nesting means deeper heading level
78
- - Parent section (e.g., "1", "2", "A") should be shallower than child (e.g., "1.1", "2.a", "A.1")
79
- - Child section should be one # deeper than parent
90
+ - Parent section (e.g., "1", "2", "A") MUST be shallower than child (e.g., "1.1", "2.a", "A.1")
91
+ - Child section MUST be exactly one # deeper than parent
80
92
  - Works with any numbering: "1/1.1/1.1.1", "A/A.1/A.1.a", "I/I.A/I.A.1", etc.
81
93
 
82
94
  3. **Level jumps**: Headings can only increase by one # at a time when moving deeper
@@ -85,16 +97,19 @@ RULES - Apply these fixes in order:
85
97
 
86
98
  4. **Decreasing levels is OK**: Moving back up the hierarchy (### to ##) is valid for new sections
87
99
 
88
- OUTPUT: Return a Python dictionary mapping index to corrected heading (without the index prefix).
100
+ 5. **Unnumbered headings in numbered documents**: If the document uses numbered headings consistently, any unnumbered heading appearing within that structure is likely misclassified bold text and should be converted to regular text (output the heading text without any # symbols in the corrected field)
101
+
102
+ OUTPUT: Return a list of corrections, where each correction has:
103
+ - index: the heading's index number
104
+ - corrected: the fixed heading text (without the index prefix), or empty string "" to remove the heading entirely
89
105
  IMPORTANT: Preserve the " ... page N" suffix in all corrected headings.
90
- Only include entries that need changes.
106
+ Only include headings that need changes.
91
107
 
92
108
  Headings to analyze:
93
109
  {headings_list}
94
110
  """
95
111
 
96
-
97
- # %% ../nbs/01_refine.ipynb 22
112
+ # %% ../nbs/01_refine.ipynb 23
98
113
  def fix_hdg_hierarchy(
99
114
  hdgs: list[str], # List of markdown headings
100
115
  prompt: str=None, # Prompt to use
@@ -106,10 +121,11 @@ def fix_hdg_hierarchy(
106
121
  if prompt is None: prompt = prompt_fix_hdgs
107
122
  prompt = prompt.format(headings_list=fmt_hdgs_idx(hdgs))
108
123
  r = completion(model=model, messages=[{"role": "user", "content": prompt}], response_format=HeadingCorrections, api_key=api_key)
109
- return json.loads(r.choices[0].message.content)['corrections']
124
+ fixes = json.loads(r.choices[0].message.content)['corrections']
125
+ return {o['index']: o['corrected'] for o in fixes}
110
126
 
111
127
 
112
- # %% ../nbs/01_refine.ipynb 25
128
+ # %% ../nbs/01_refine.ipynb 26
113
129
  @delegates(fix_hdg_hierarchy)
114
130
  def mk_fixes_lut(
115
131
  hdgs: list[str], # List of markdown headings
@@ -120,9 +136,9 @@ def mk_fixes_lut(
120
136
  "Make a lookup table of fixes"
121
137
  if api_key is None: api_key = os.getenv('ANTHROPIC_API_KEY')
122
138
  fixes = fix_hdg_hierarchy(hdgs, model=model, api_key=api_key, **kwargs)
123
- return {hdgs[int(k)]:v for k,v in fixes.items()}
139
+ return {hdgs[k]:v for k,v in fixes.items()}
124
140
 
125
- # %% ../nbs/01_refine.ipynb 28
141
+ # %% ../nbs/01_refine.ipynb 29
126
142
  def apply_hdg_fixes(
127
143
  p:str, # Page to fix
128
144
  lut_fixes: dict[str, str], # Lookup table of fixes
@@ -131,7 +147,7 @@ def apply_hdg_fixes(
131
147
  for old in get_hdgs(p): p = p.replace(old, lut_fixes.get(old, old))
132
148
  return p
133
149
 
134
- # %% ../nbs/01_refine.ipynb 31
150
+ # %% ../nbs/01_refine.ipynb 32
135
151
  @delegates(mk_fixes_lut)
136
152
  def fix_hdgs(src:str, model:str='claude-sonnet-4-5', dst:str=None, img_folder:str='img', **kwargs):
137
153
  "Fix heading hierarchy in markdown document"
@@ -143,13 +159,13 @@ def fix_hdgs(src:str, model:str='claude-sonnet-4-5', dst:str=None, img_folder:st
143
159
  lut = mk_fixes_lut(L([get_hdgs(pg) for pg in pgs_with_pg]).concat(), model, **kwargs)
144
160
  for i,p in enumerate(pgs_with_pg, 1): (dst_path/f'page_{i}.md').write_text(apply_hdg_fixes(p, lut))
145
161
 
146
- # %% ../nbs/01_refine.ipynb 37
162
+ # %% ../nbs/01_refine.ipynb 38
147
163
  class ImgDescription(BaseModel):
148
164
  "Image classification and description for OCR'd documents"
149
165
  is_informative:bool # Whether image contains informative content (charts, diagrams, tables) vs decorative (logos, backgrounds)
150
166
  description:str # Detailed description of the image content for RAG and accessibility
151
167
 
152
- # %% ../nbs/01_refine.ipynb 40
168
+ # %% ../nbs/01_refine.ipynb 41
153
169
  describe_img_prompt = """Analyze this image from an academic/technical document.
154
170
 
155
171
  Step 1: Determine if this image is informative for understanding the document content.
@@ -162,7 +178,7 @@ Step 2:
162
178
 
163
179
  Return your response as JSON with 'is_informative' (boolean) and 'description' (string) fields."""
164
180
 
165
- # %% ../nbs/01_refine.ipynb 41
181
+ # %% ../nbs/01_refine.ipynb 42
166
182
  async def describe_img(
167
183
  img_path: Path, # Path to the image file
168
184
  model: str = 'claude-sonnet-4-5', # Model to use
@@ -173,7 +189,7 @@ async def describe_img(
173
189
  r = await chat([img_path.read_bytes(), prompt], response_format=ImgDescription)
174
190
  return r
175
191
 
176
- # %% ../nbs/01_refine.ipynb 45
192
+ # %% ../nbs/01_refine.ipynb 46
177
193
  async def limit(
178
194
  semaphore, # Semaphore for concurrency control
179
195
  coro, # Coroutine to execute
@@ -185,14 +201,14 @@ async def limit(
185
201
  if delay: await sleep(delay)
186
202
  return r
187
203
 
188
- # %% ../nbs/01_refine.ipynb 47
204
+ # %% ../nbs/01_refine.ipynb 48
189
205
  def parse_r(
190
206
  result # ModelResponse object from API call
191
207
  ): # Dictionary with 'is_informative' and 'description' keys
192
208
  "Extract and parse JSON content from model response"
193
209
  return json.loads(result.choices[0].message.content)
194
210
 
195
- # %% ../nbs/01_refine.ipynb 49
211
+ # %% ../nbs/01_refine.ipynb 50
196
212
  async def describe_imgs(
197
213
  imgs: list[Path], # List of image file paths to describe
198
214
  model: str = 'claude-sonnet-4-5', # Model to use for image description
@@ -205,7 +221,7 @@ async def describe_imgs(
205
221
  results = await gather(*[limit(sem, describe_img(img, model, prompt), delay) for img in imgs])
206
222
  return {img.name: parse_r(r) for img, r in zip(imgs, results)}
207
223
 
208
- # %% ../nbs/01_refine.ipynb 51
224
+ # %% ../nbs/01_refine.ipynb 52
209
225
  def save_img_descs(
210
226
  descs: dict, # Dictionary of image descriptions
211
227
  dst_fname: Path, # Path to save the JSON file
@@ -213,7 +229,7 @@ def save_img_descs(
213
229
  "Save image descriptions to JSON file"
214
230
  Path(dst_fname).write_text(json.dumps(descs, indent=2))
215
231
 
216
- # %% ../nbs/01_refine.ipynb 56
232
+ # %% ../nbs/01_refine.ipynb 57
217
233
  def add_descs_to_pg(
218
234
  pg:str, # Page markdown content
219
235
  descs:dict # Dictionary mapping image filenames to their descriptions
@@ -224,7 +240,7 @@ def add_descs_to_pg(
224
240
  if fname in descs: pg = pg.replace(link, f"{link}\nAI-generated image description:\n___\n{descs[fname]['description']}\n___")
225
241
  return pg
226
242
 
227
- # %% ../nbs/01_refine.ipynb 61
243
+ # %% ../nbs/01_refine.ipynb 62
228
244
  def add_descs_to_pgs(
229
245
  pgs:list, # List of page markdown strings
230
246
  descs:dict # Dictionary mapping image filenames to their descriptions
@@ -232,7 +248,7 @@ def add_descs_to_pgs(
232
248
  "Add AI-generated descriptions to images in all pages"
233
249
  return [add_descs_to_pg(pg, descs) for pg in pgs]
234
250
 
235
- # %% ../nbs/01_refine.ipynb 64
251
+ # %% ../nbs/01_refine.ipynb 65
236
252
  async def add_img_descs(
237
253
  src:str, # Path to source markdown directory
238
254
  dst:str=None, # Destination directory (defaults to src if None)
@@ -247,6 +263,12 @@ async def add_img_descs(
247
263
  src_path,dst_path = Path(src),Path(dst) if dst else Path(src)
248
264
  if dst_path != src_path: dst_path.mkdir(parents=True, exist_ok=True)
249
265
  src_imgs = src_path/img_folder
266
+
267
+ # Check if image folder exists
268
+ if not src_imgs.exists():
269
+ if progress: print(f"No images to describe in the document (no '{img_folder}' folder found)")
270
+ return
271
+
250
272
  if src_imgs.exists() and dst_path != src_path: shutil.copytree(src_imgs, dst_path/img_folder, dirs_exist_ok=True)
251
273
  desc_file = src_path/'img_descriptions.json'
252
274
  if desc_file.exists() and not force:
@@ -263,3 +285,4 @@ async def add_img_descs(
263
285
  enriched = [add_descs_to_pg(pg, descs) for pg in pgs]
264
286
  for i,pg in enumerate(enriched, 1): (dst_path/f'page_{i}.md').write_text(pg)
265
287
  if progress: print(f"Done! Enriched pages saved to {dst_path}")
288
+
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mistocr
3
- Version: 0.2.5
3
+ Version: 0.4.0
4
4
  Summary: Batch OCR for PDFs with heading restoration and visual content integration
5
5
  Home-page: https://github.com/franckalbinet/mistocr
6
6
  Author: Solveit
@@ -23,6 +23,7 @@ Requires-Dist: mistralai
23
23
  Requires-Dist: pillow
24
24
  Requires-Dist: dotenv
25
25
  Requires-Dist: lisette
26
+ Requires-Dist: PyPDF2
26
27
  Provides-Extra: dev
27
28
  Dynamic: author
28
29
  Dynamic: author-email
@@ -3,5 +3,6 @@ mistralai
3
3
  pillow
4
4
  dotenv
5
5
  lisette
6
+ PyPDF2
6
7
 
7
8
  [dev]
@@ -1,7 +1,7 @@
1
1
  [DEFAULT]
2
2
  repo = mistocr
3
3
  lib_name = mistocr
4
- version = 0.2.5
4
+ version = 0.4.0
5
5
  min_python = 3.9
6
6
  license = apache2
7
7
  black_formatting = False
@@ -27,7 +27,7 @@ keywords = nbdev jupyter notebook python
27
27
  language = English
28
28
  status = 3
29
29
  user = franckalbinet
30
- requirements = fastcore mistralai pillow dotenv lisette
30
+ requirements = fastcore mistralai pillow dotenv lisette PyPDF2
31
31
  readme_nb = index.ipynb
32
32
  allowed_metadata_keys =
33
33
  allowed_cell_metadata_keys =
@@ -1 +0,0 @@
1
- __version__ = "0.2.5"
@@ -1,37 +0,0 @@
1
- """End-to-End Pipeline: PDF OCR, Markdown Heading Correction, and AI Image Descriptions"""
2
-
3
- # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/02_pipeline.ipynb.
4
-
5
- # %% auto 0
6
- __all__ = ['pdf_to_md']
7
-
8
- # %% ../nbs/02_pipeline.ipynb 3
9
- from fastcore.all import *
10
- from .core import read_pgs, ocr_pdf
11
- from .refine import add_img_descs, fix_hdgs
12
- from pathlib import Path
13
- from asyncio import Semaphore, gather, sleep
14
- import os, json, shutil
15
-
16
- # %% ../nbs/02_pipeline.ipynb 4
17
- @delegates(add_img_descs)
18
- async def pdf_to_md(
19
- pdf_path:str, # Path to input PDF file
20
- dst:str, # Destination directory for output markdown
21
- ocr_output:str=None, # Optional OCR output directory (defaults to pdf_path stem)
22
- model:str='claude-sonnet-4-5', # Model to use for heading fixes and image descriptions
23
- add_img_desc:bool=True, # Whether to add image descriptions
24
- progress:bool=True, # Whether to show progress messages
25
- **kwargs):
26
- "Convert PDF to markdown with OCR, fixed heading hierarchy, and optional image descriptions"
27
- n_steps = 3 if add_img_desc else 2
28
- if progress: print(f"Step 1/{n_steps}: Running OCR on {pdf_path}...")
29
- ocr_dirs = ocr_pdf(pdf_path, ocr_output or 'ocr_temp')
30
- ocr_dir = ocr_dirs[0]
31
- if progress: print(f"Step 2/{n_steps}: Fixing heading hierarchy...")
32
- fix_hdgs(ocr_dir, model=model)
33
- if add_img_desc:
34
- if progress: print(f"Step 3/{n_steps}: Adding image descriptions...")
35
- await add_img_descs(ocr_dir, dst=dst, model=model, progress=progress, **kwargs)
36
- elif dst and Path(dst) != ocr_dir: shutil.copytree(ocr_dir, dst, dirs_exist_ok=True)
37
- if progress: print("Done!")
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes