docling 2.58.0__py3-none-any.whl → 2.59.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of docling might be problematic. Click here for more details.

@@ -139,10 +139,14 @@ class MsExcelDocumentBackend(DeclarativeDocumentBackend, PaginatedDocumentBacken
139
139
  self.workbook = None
140
140
  try:
141
141
  if isinstance(self.path_or_stream, BytesIO):
142
- self.workbook = load_workbook(filename=self.path_or_stream)
142
+ self.workbook = load_workbook(
143
+ filename=self.path_or_stream, data_only=True
144
+ )
143
145
 
144
146
  elif isinstance(self.path_or_stream, Path):
145
- self.workbook = load_workbook(filename=str(self.path_or_stream))
147
+ self.workbook = load_workbook(
148
+ filename=str(self.path_or_stream), data_only=True
149
+ )
146
150
 
147
151
  self.valid = self.workbook is not None
148
152
  except Exception as e:
docling/cli/main.py CHANGED
@@ -738,10 +738,15 @@ def convert( # noqa: C901
738
738
 
739
739
  pipeline_options.vlm_options = SMOLDOCLING_MLX
740
740
  except ImportError:
741
- _log.warning(
742
- "To run SmolDocling faster, please install mlx-vlm:\n"
743
- "pip install mlx-vlm"
744
- )
741
+ if sys.version_info < (3, 14):
742
+ _log.warning(
743
+ "To run SmolDocling faster, please install mlx-vlm:\n"
744
+ "pip install mlx-vlm"
745
+ )
746
+ else:
747
+ _log.warning(
748
+ "You can run SmolDocling faster with MLX support, but it is unfortunately not yet available on Python 3.14."
749
+ )
745
750
 
746
751
  elif vlm_model == VlmModelType.GRANITEDOCLING:
747
752
  pipeline_options.vlm_options = GRANITEDOCLING_TRANSFORMERS
@@ -751,10 +756,16 @@ def convert( # noqa: C901
751
756
 
752
757
  pipeline_options.vlm_options = GRANITEDOCLING_MLX
753
758
  except ImportError:
754
- _log.warning(
755
- "To run GraniteDocling faster, please install mlx-vlm:\n"
756
- "pip install mlx-vlm"
757
- )
759
+ if sys.version_info < (3, 14):
760
+ _log.warning(
761
+ "To run GraniteDocling faster, please install mlx-vlm:\n"
762
+ "pip install mlx-vlm"
763
+ )
764
+ else:
765
+ _log.warning(
766
+ "You can run GraniteDocling faster with MLX support, but it is unfortunately not yet available on Python 3.14."
767
+ )
768
+
758
769
  elif vlm_model == VlmModelType.SMOLDOCLING_VLLM:
759
770
  pipeline_options.vlm_options = SMOLDOCLING_VLLM
760
771
 
@@ -207,6 +207,8 @@ class VlmPrediction(BaseModel):
207
207
  text: str = ""
208
208
  generated_tokens: list[VlmPredictionToken] = []
209
209
  generation_time: float = -1
210
+ num_tokens: Optional[int] = None
211
+ stop_reason: Optional[str] = None # todo define an enum for possible stop reasons
210
212
 
211
213
 
212
214
  class ContainerElement(
@@ -82,6 +82,7 @@ class InlineVlmOptions(BaseVlmOptions):
82
82
 
83
83
  use_kv_cache: bool = True
84
84
  max_new_tokens: int = 4096
85
+ track_generated_tokens: bool = False
85
86
 
86
87
  @property
87
88
  def repo_cache_folder(self) -> str:
@@ -73,7 +73,7 @@ class ApiVlmModel(BasePageModel):
73
73
  # Skip non-GenerationStopper criteria (should have been caught in validation)
74
74
 
75
75
  # Streaming path with early abort support
76
- page_tags = api_image_request_streaming(
76
+ page_tags, num_tokens = api_image_request_streaming(
77
77
  image=hi_res_image,
78
78
  prompt=prompt,
79
79
  url=self.vlm_options.url,
@@ -84,7 +84,7 @@ class ApiVlmModel(BasePageModel):
84
84
  )
85
85
  else:
86
86
  # Non-streaming fallback (existing behavior)
87
- page_tags = api_image_request(
87
+ page_tags, num_tokens = api_image_request(
88
88
  image=hi_res_image,
89
89
  prompt=prompt,
90
90
  url=self.vlm_options.url,
@@ -94,7 +94,9 @@ class ApiVlmModel(BasePageModel):
94
94
  )
95
95
 
96
96
  page_tags = self.vlm_options.decode_response(page_tags)
97
- page.predictions.vlm_response = VlmPrediction(text=page_tags)
97
+ page.predictions.vlm_response = VlmPrediction(
98
+ text=page_tags, num_tokens=num_tokens
99
+ )
98
100
  return page
99
101
 
100
102
  with ThreadPoolExecutor(max_workers=self.concurrency) as executor:
@@ -1,3 +1,4 @@
1
+ import sys
1
2
  import threading
2
3
  from collections.abc import Iterable
3
4
  from pathlib import Path
@@ -75,7 +76,10 @@ class PictureDescriptionVlmModel(
75
76
  else "sdpa"
76
77
  ),
77
78
  )
78
- self.model = torch.compile(self.model) # type: ignore
79
+ if sys.version_info < (3, 14):
80
+ self.model = torch.compile(self.model) # type: ignore
81
+ else:
82
+ self.model.eval()
79
83
 
80
84
  self.provenance = f"{self.options.repo_id}"
81
85
 
@@ -1,5 +1,6 @@
1
1
  import importlib.metadata
2
2
  import logging
3
+ import sys
3
4
  import time
4
5
  from collections.abc import Iterable
5
6
  from pathlib import Path
@@ -129,7 +130,10 @@ class HuggingFaceTransformersVlmModel(BaseVlmPageModel, HuggingFaceModelDownload
129
130
  trust_remote_code=vlm_options.trust_remote_code,
130
131
  revision=vlm_options.revision,
131
132
  )
132
- self.vlm_model = torch.compile(self.vlm_model) # type: ignore
133
+ if sys.version_info < (3, 14):
134
+ self.vlm_model = torch.compile(self.vlm_model) # type: ignore
135
+ else:
136
+ self.vlm_model.eval()
133
137
 
134
138
  # Load generation config
135
139
  self.generation_config = GenerationConfig.from_pretrained(
@@ -363,13 +367,19 @@ class HuggingFaceTransformersVlmModel(BaseVlmPageModel, HuggingFaceModelDownload
363
367
  decoded_texts = [text.rstrip(pad_token) for text in decoded_texts]
364
368
 
365
369
  # -- Optional logging
370
+ num_tokens = None
366
371
  if generated_ids.shape[0] > 0:
372
+ num_tokens = int(generated_ids[0].shape[0])
367
373
  _log.debug(
368
- f"Generated {int(generated_ids[0].shape[0])} tokens in {generation_time:.2f}s "
374
+ f"Generated {num_tokens} tokens in {generation_time:.2f}s "
369
375
  f"for batch size {generated_ids.shape[0]}."
370
376
  )
371
377
 
372
378
  for text in decoded_texts:
373
379
  # Apply decode_response to the output text
374
380
  decoded_text = self.vlm_options.decode_response(text)
375
- yield VlmPrediction(text=decoded_text, generation_time=generation_time)
381
+ yield VlmPrediction(
382
+ text=decoded_text,
383
+ generation_time=generation_time,
384
+ num_tokens=num_tokens,
385
+ )
@@ -50,9 +50,14 @@ class HuggingFaceMlxModel(BaseVlmPageModel, HuggingFaceModelDownloadMixin):
50
50
  from mlx_vlm.prompt_utils import apply_chat_template # type: ignore
51
51
  from mlx_vlm.utils import load_config # type: ignore
52
52
  except ImportError:
53
- raise ImportError(
54
- "mlx-vlm is not installed. Please install it via `pip install mlx-vlm` to use MLX VLM models."
55
- )
53
+ if sys.version_info < (3, 14):
54
+ raise ImportError(
55
+ "mlx-vlm is not installed. Please install it via `pip install mlx-vlm` to use MLX VLM models."
56
+ )
57
+ else:
58
+ raise ImportError(
59
+ "mlx-vlm is not installed. It is not yet available on Python 3.14."
60
+ )
56
61
 
57
62
  repo_cache_folder = vlm_options.repo_id.replace("/", "--")
58
63
 
@@ -313,5 +318,6 @@ class HuggingFaceMlxModel(BaseVlmPageModel, HuggingFaceModelDownloadMixin):
313
318
  text=decoded_output,
314
319
  generation_time=generation_time,
315
320
  generated_tokens=tokens,
321
+ num_tokens=len(tokens),
316
322
  )
317
323
  _log.debug("MLX model: Released global lock")
@@ -1,4 +1,5 @@
1
1
  import logging
2
+ import sys
2
3
  import time
3
4
  from collections.abc import Iterable
4
5
  from pathlib import Path
@@ -153,7 +154,10 @@ class NuExtractTransformersModel(BaseVlmModel, HuggingFaceModelDownloadMixin):
153
154
  ),
154
155
  trust_remote_code=vlm_options.trust_remote_code,
155
156
  )
156
- self.vlm_model = torch.compile(self.vlm_model) # type: ignore
157
+ if sys.version_info < (3, 14):
158
+ self.vlm_model = torch.compile(self.vlm_model) # type: ignore
159
+ else:
160
+ self.vlm_model.eval()
157
161
 
158
162
  # Load generation config
159
163
  self.generation_config = GenerationConfig.from_pretrained(artifacts_path)
@@ -278,13 +282,19 @@ class NuExtractTransformersModel(BaseVlmModel, HuggingFaceModelDownloadMixin):
278
282
  )
279
283
 
280
284
  # Optional logging
285
+ num_tokens = None
281
286
  if generated_ids.shape[0] > 0: # type: ignore
287
+ num_tokens = int(generated_ids[0].shape[0])
282
288
  _log.debug(
283
- f"Generated {int(generated_ids[0].shape[0])} tokens in {generation_time:.2f}s "
289
+ f"Generated {num_tokens} tokens in {generation_time:.2f}s "
284
290
  f"for batch size {generated_ids.shape[0]}." # type: ignore
285
291
  )
286
292
 
287
293
  for text in decoded_texts:
288
294
  # Apply decode_response to the output text
289
295
  decoded_text = self.vlm_options.decode_response(text)
290
- yield VlmPrediction(text=decoded_text, generation_time=generation_time)
296
+ yield VlmPrediction(
297
+ text=decoded_text,
298
+ generation_time=generation_time,
299
+ num_tokens=num_tokens,
300
+ )
@@ -1,4 +1,5 @@
1
1
  import logging
2
+ import sys
2
3
  import time
3
4
  from collections.abc import Iterable
4
5
  from pathlib import Path
@@ -8,7 +9,7 @@ import numpy as np
8
9
  from PIL.Image import Image
9
10
 
10
11
  from docling.datamodel.accelerator_options import AcceleratorOptions
11
- from docling.datamodel.base_models import Page, VlmPrediction
12
+ from docling.datamodel.base_models import Page, VlmPrediction, VlmPredictionToken
12
13
  from docling.datamodel.document import ConversionResult
13
14
  from docling.datamodel.pipeline_options_vlm_model import (
14
15
  InlineVlmOptions,
@@ -87,7 +88,7 @@ class VllmVlmModel(BaseVlmPageModel, HuggingFaceModelDownloadMixin):
87
88
  vlm_options: InlineVlmOptions,
88
89
  ):
89
90
  self.enabled = enabled
90
- self.vlm_options = vlm_options
91
+ self.vlm_options: InlineVlmOptions = vlm_options
91
92
 
92
93
  self.llm = None
93
94
  self.sampling_params = None
@@ -100,7 +101,18 @@ class VllmVlmModel(BaseVlmPageModel, HuggingFaceModelDownloadMixin):
100
101
  return
101
102
 
102
103
  from transformers import AutoProcessor
103
- from vllm import LLM, SamplingParams
104
+
105
+ try:
106
+ from vllm import LLM, SamplingParams
107
+ except ImportError:
108
+ if sys.version_info < (3, 14):
109
+ raise ImportError(
110
+ "vllm is not installed. Please install it via `pip install vllm`."
111
+ )
112
+ else:
113
+ raise ImportError(
114
+ "vllm is not installed. It is not yet available on Python 3.14."
115
+ )
104
116
 
105
117
  # Device selection
106
118
  self.device = decide_device(
@@ -222,7 +234,8 @@ class VllmVlmModel(BaseVlmPageModel, HuggingFaceModelDownloadMixin):
222
234
  pages_with_images.append(page)
223
235
 
224
236
  if images:
225
- predictions = list(self.process_images(images, user_prompts))
237
+ with TimeRecorder(conv_res, "vlm_inference"):
238
+ predictions = list(self.process_images(images, user_prompts))
226
239
  for page, prediction in zip(pages_with_images, predictions):
227
240
  page.predictions.vlm_response = prediction
228
241
 
@@ -288,13 +301,34 @@ class VllmVlmModel(BaseVlmPageModel, HuggingFaceModelDownloadMixin):
288
301
  # Optional debug
289
302
  if outputs:
290
303
  try:
291
- num_tokens = len(outputs[0].outputs[0].token_ids)
292
- _log.debug(f"Generated {num_tokens} tokens in {generation_time:.2f}s.")
304
+ num_tokens_within_batch = len(outputs[0].outputs[0].token_ids)
305
+ _log.debug(
306
+ f"Generated {num_tokens_within_batch} tokens for batch in {generation_time:.2f}s."
307
+ )
293
308
  except Exception:
294
- pass
309
+ num_tokens_within_batch = 0
295
310
 
296
311
  # Emit predictions
297
312
  for output in outputs:
298
313
  text = output.outputs[0].text if output.outputs else ""
314
+ stop_reason = output.outputs[0].stop_reason if output.outputs else ""
315
+ generated_tokens = [
316
+ VlmPredictionToken(token=int(p)) for p in output.outputs[0].token_ids
317
+ ]
318
+ num_tokens = len(generated_tokens)
299
319
  decoded_text = self.vlm_options.decode_response(text)
300
- yield VlmPrediction(text=decoded_text, generation_time=generation_time)
320
+ if self.vlm_options.track_generated_tokens:
321
+ yield VlmPrediction(
322
+ text=decoded_text,
323
+ generation_time=generation_time,
324
+ num_tokens=num_tokens,
325
+ stop_reason=stop_reason,
326
+ generated_tokens=generated_tokens,
327
+ )
328
+ else:
329
+ yield VlmPrediction(
330
+ text=decoded_text,
331
+ generation_time=generation_time,
332
+ num_tokens=num_tokens,
333
+ stop_reason=stop_reason,
334
+ )
@@ -1,6 +1,7 @@
1
1
  import logging
2
2
  import os
3
3
  import re
4
+ import sys
4
5
  import tempfile
5
6
  from io import BytesIO
6
7
  from pathlib import Path
@@ -117,9 +118,15 @@ class _NativeWhisperModel:
117
118
  try:
118
119
  import whisper # type: ignore
119
120
  except ImportError:
120
- raise ImportError(
121
- "whisper is not installed. Please install it via `pip install openai-whisper` or do `uv sync --extra asr`."
122
- )
121
+ if sys.version_info < (3, 14):
122
+ raise ImportError(
123
+ "whisper is not installed. Please install it via `pip install openai-whisper` or do `uv sync --extra asr`."
124
+ )
125
+ else:
126
+ raise ImportError(
127
+ "whisper is not installed. Unfortunately its dependencies are not yet available for Python 3.14."
128
+ )
129
+
123
130
  self.asr_options = asr_options
124
131
  self.max_tokens = asr_options.max_new_tokens
125
132
  self.temperature = asr_options.temperature
@@ -2,7 +2,7 @@ import base64
2
2
  import json
3
3
  import logging
4
4
  from io import BytesIO
5
- from typing import Optional
5
+ from typing import Dict, List, Optional, Tuple
6
6
 
7
7
  import requests
8
8
  from PIL import Image
@@ -21,7 +21,7 @@ def api_image_request(
21
21
  timeout: float = 20,
22
22
  headers: Optional[dict[str, str]] = None,
23
23
  **params,
24
- ) -> str:
24
+ ) -> Tuple[str, Optional[int]]:
25
25
  img_io = BytesIO()
26
26
  image.save(img_io, "PNG")
27
27
  image_base64 = base64.b64encode(img_io.getvalue()).decode("utf-8")
@@ -60,7 +60,8 @@ def api_image_request(
60
60
 
61
61
  api_resp = OpenAiApiResponse.model_validate_json(r.text)
62
62
  generated_text = api_resp.choices[0].message.content.strip()
63
- return generated_text
63
+ num_tokens = api_resp.usage.total_tokens
64
+ return generated_text, num_tokens
64
65
 
65
66
 
66
67
  def api_image_request_streaming(
@@ -72,7 +73,7 @@ def api_image_request_streaming(
72
73
  headers: Optional[dict[str, str]] = None,
73
74
  generation_stoppers: list[GenerationStopper] = [],
74
75
  **params,
75
- ) -> str:
76
+ ) -> Tuple[str, Optional[int]]:
76
77
  """
77
78
  Stream a chat completion from an OpenAI-compatible server (e.g., vLLM).
78
79
  Parses SSE lines: 'data: {json}\\n\\n', terminated by 'data: [DONE]'.
@@ -150,6 +151,16 @@ def api_image_request_streaming(
150
151
  _log.debug("Unexpected SSE chunk shape: %s", e)
151
152
  piece = ""
152
153
 
154
+ # Try to extract token count
155
+ num_tokens = None
156
+ try:
157
+ if "usage" in obj:
158
+ usage = obj["usage"]
159
+ num_tokens = usage.get("total_tokens")
160
+ except Exception as e:
161
+ num_tokens = None
162
+ _log.debug("Usage key not included in response: %s", e)
163
+
153
164
  if piece:
154
165
  full_text.append(piece)
155
166
  for stopper in generation_stoppers:
@@ -162,6 +173,6 @@ def api_image_request_streaming(
162
173
  # closing the connection when we exit the 'with' block.
163
174
  # vLLM/OpenAI-compatible servers will detect the client disconnect
164
175
  # and abort the request server-side.
165
- return "".join(full_text)
176
+ return "".join(full_text), num_tokens
166
177
 
167
- return "".join(full_text)
178
+ return "".join(full_text), num_tokens
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: docling
3
- Version: 2.58.0
3
+ Version: 2.59.0
4
4
  Summary: SDK and CLI for parsing PDF, DOCX, HTML, and more, to a unified document representation for powering downstream workflows such as gen AI applications.
5
5
  Author-email: Christoph Auer <cau@zurich.ibm.com>, Michele Dolfi <dol@zurich.ibm.com>, Maxim Lysak <mly@zurich.ibm.com>, Nikos Livathinos <nli@zurich.ibm.com>, Ahmed Nassar <ahn@zurich.ibm.com>, Panos Vagenas <pva@zurich.ibm.com>, Peter Staar <taa@zurich.ibm.com>
6
6
  License-Expression: MIT
@@ -22,6 +22,7 @@ Classifier: Programming Language :: Python :: 3.10
22
22
  Classifier: Programming Language :: Python :: 3.11
23
23
  Classifier: Programming Language :: Python :: 3.12
24
24
  Classifier: Programming Language :: Python :: 3.13
25
+ Classifier: Programming Language :: Python :: 3.14
25
26
  Requires-Python: <4.0,>=3.9
26
27
  Description-Content-Type: text/markdown
27
28
  License-File: LICENSE
@@ -45,7 +46,7 @@ Requires-Dist: beautifulsoup4<5.0.0,>=4.12.3
45
46
  Requires-Dist: pandas<3.0.0,>=2.1.4
46
47
  Requires-Dist: marko<3.0.0,>=2.1.2
47
48
  Requires-Dist: openpyxl<4.0.0,>=3.1.5
48
- Requires-Dist: lxml<6.0.0,>=4.0.0
49
+ Requires-Dist: lxml<7.0.0,>=4.0.0
49
50
  Requires-Dist: pillow<12.0.0,>=10.0.0
50
51
  Requires-Dist: tqdm<5.0.0,>=4.65.0
51
52
  Requires-Dist: pluggy<2.0.0,>=1.0.0
@@ -62,15 +63,15 @@ Requires-Dist: ocrmac<2.0.0,>=1.0.0; sys_platform == "darwin" and extra == "ocrm
62
63
  Provides-Extra: vlm
63
64
  Requires-Dist: transformers<5.0.0,>=4.46.0; extra == "vlm"
64
65
  Requires-Dist: accelerate<2.0.0,>=1.2.1; extra == "vlm"
65
- Requires-Dist: mlx-vlm<1.0.0,>=0.3.0; (python_version >= "3.10" and sys_platform == "darwin" and platform_machine == "arm64") and extra == "vlm"
66
- Requires-Dist: vllm<1.0.0,>=0.10.0; (python_version >= "3.10" and sys_platform == "linux" and platform_machine == "x86_64") and extra == "vlm"
66
+ Requires-Dist: mlx-vlm<1.0.0,>=0.3.0; (python_version >= "3.10" and python_version < "3.14" and sys_platform == "darwin" and platform_machine == "arm64") and extra == "vlm"
67
+ Requires-Dist: vllm<1.0.0,>=0.10.0; (python_version >= "3.10" and python_version < "3.14" and sys_platform == "linux" and platform_machine == "x86_64") and extra == "vlm"
67
68
  Requires-Dist: qwen-vl-utils>=0.0.11; extra == "vlm"
68
69
  Provides-Extra: rapidocr
69
- Requires-Dist: rapidocr<4.0.0,>=3.3; python_version < "3.14" and extra == "rapidocr"
70
- Requires-Dist: onnxruntime<2.0.0,>=1.7.0; extra == "rapidocr"
70
+ Requires-Dist: rapidocr<4.0.0,>=3.3; extra == "rapidocr"
71
+ Requires-Dist: onnxruntime<2.0.0,>=1.7.0; python_version < "3.14" and extra == "rapidocr"
71
72
  Provides-Extra: asr
72
- Requires-Dist: mlx-whisper>=0.4.3; (python_version >= "3.10" and sys_platform == "darwin" and platform_machine == "arm64") and extra == "asr"
73
- Requires-Dist: openai-whisper>=20250625; extra == "asr"
73
+ Requires-Dist: mlx-whisper>=0.4.3; (python_version >= "3.10" and python_version < "3.14" and sys_platform == "darwin" and platform_machine == "arm64") and extra == "asr"
74
+ Requires-Dist: openai-whisper>=20250625; python_version < "3.14" and extra == "asr"
74
75
  Dynamic: license-file
75
76
 
76
77
  <p align="center">
@@ -13,7 +13,7 @@ docling/backend/docling_parse_v4_backend.py,sha256=tBJR0BbKFOIDKSngjVDu0BrzTj7qU
13
13
  docling/backend/html_backend.py,sha256=m91kRxMhQ1w-7G6MHA9l01dgF8-YQNn8ZNx9lwG467M,52935
14
14
  docling/backend/md_backend.py,sha256=_0ToiecsGwU4H4BBso4ar9TGJi8OTwSXjgmi66vSJVQ,23513
15
15
  docling/backend/mets_gbs_backend.py,sha256=EA8sY6tbmGiysKGYPPZiNlK-i7Adn8bLTo-7Ym15hTU,12774
16
- docling/backend/msexcel_backend.py,sha256=-iWLdIonMZl2FCfPAXFQKIQzFOJn5InpH6KDAJ_L64o,22760
16
+ docling/backend/msexcel_backend.py,sha256=ujU8qoevNhLDWffihMlSYFVl7B3y_Uu5g-yispWyt8Q,22868
17
17
  docling/backend/mspowerpoint_backend.py,sha256=71W_iV31Rggqn9UcMzXmsZ3QKMRpsBT8fCwdjsIIKAs,15109
18
18
  docling/backend/msword_backend.py,sha256=zNJy-KM3Ia-L8IQ4sjYxATW4owFxbg2CK0rzke8y-7w,57451
19
19
  docling/backend/noop_backend.py,sha256=EOPbD86FzZPX-K_DpNrJh0_lC0bZz--4DpG-OagDNGY,1688
@@ -32,24 +32,24 @@ docling/backend/xml/jats_backend.py,sha256=_BWpQQg3SlsHAOOj0v2qRJoVqaQzL91GqN1tK
32
32
  docling/backend/xml/uspto_backend.py,sha256=Tv4CE7V5_QwxTNJPl90CAd_mAbwaLGy8S6s6evh1Xow,70910
33
33
  docling/chunking/__init__.py,sha256=h83TDs0AuOV6oEPLAPrn9dpGKiU-2Vg6IRNo4cv6GDA,346
34
34
  docling/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
- docling/cli/main.py,sha256=x_mPS3g3Zw60_9bL_oo9OfPBmuSd-aJV7oKTPD0GjS4,36772
35
+ docling/cli/main.py,sha256=T7MllU1e2zYoKekpEHPv7VdI4cypL6K5zzCfscHCRro,37404
36
36
  docling/cli/models.py,sha256=zZBFQJAD7C5sespnYy5M__4qC_GyqAZ-QpfWtgPRDB0,6343
37
37
  docling/cli/tools.py,sha256=QhtRxQG0TVrfsMqdv5i7J0_qQy1ZZyWYnHPwJl7b5oY,322
38
38
  docling/datamodel/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
39
  docling/datamodel/accelerator_options.py,sha256=wv6dOFTVAwr9onkE-0pfUqX_fDb6gX53iPPE6o8nKjI,2511
40
40
  docling/datamodel/asr_model_specs.py,sha256=gQJkW7DaSPiOuW_0QoI5OzR1_DQGRkw7yQlrVJ4hyo0,14473
41
41
  docling/datamodel/backend_options.py,sha256=2zSbJRtBmJ6Twywj8pLOKaHhklY85XaGXUmSLX_SfgQ,2473
42
- docling/datamodel/base_models.py,sha256=pC9CvVxMzcujKAG0TTObkYznKp8gIFdzDMDmgk5FjMQ,12697
42
+ docling/datamodel/base_models.py,sha256=AmKIWnqjKo0WgUg6SsHJpN_et_B4rR6em0NEfJ1JKxU,12821
43
43
  docling/datamodel/document.py,sha256=T9OogC1kIm0VDSC2ZFcFgWdcOjXzw5JvGr2y2hMlx3s,18795
44
44
  docling/datamodel/extraction.py,sha256=7dgvtK5SuvgfB8LHAwS1FwrW1kcMQJuJG0ol8uAQgoQ,1323
45
45
  docling/datamodel/layout_model_specs.py,sha256=GSkJ-Z_0PVgwWGi7C7TsxbzRjlrWS9ZrHJjHumv-Z5U,2339
46
46
  docling/datamodel/pipeline_options.py,sha256=dklSaA7P6VkjbBB-Pz2OyzO2SQuV9y0I8VVr9XHJusw,11692
47
47
  docling/datamodel/pipeline_options_asr_model.py,sha256=cLqtRHBr2kbTNXRJ1ZhFGiXIK7Nl9RFmz2Wd7tJF2Jg,2172
48
- docling/datamodel/pipeline_options_vlm_model.py,sha256=Szdq5_MhqQ8xBCvOUkdn_LLV29ZMQJcF4xnItYlkmXQ,3090
48
+ docling/datamodel/pipeline_options_vlm_model.py,sha256=JBdpfN3nASD5_DaAUe0tla20-Mia8fkveyNw7wVTJ4c,3131
49
49
  docling/datamodel/settings.py,sha256=c0MTw6pO5be_BKxHKYl4SaBJAw_qL-aapxp-g5HHj1A,2084
50
50
  docling/datamodel/vlm_model_specs.py,sha256=9TTmihDEFcI-TY1jJ2GTnTcrGa3bLg0e6anN4gPtFgU,10035
51
51
  docling/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
52
- docling/models/api_vlm_model.py,sha256=iNQ9LiT031Mch-LHn8O2CskVXYkr4weEetZPxynU_9U,4236
52
+ docling/models/api_vlm_model.py,sha256=tZHXS_weqkhgVse1JbrpvjzAyCxW8br78eRYrlMSG3k,4321
53
53
  docling/models/auto_ocr_model.py,sha256=nn_eQfNdGUclXKrB0nodHmCqgMUNUJzG3dLq0lhlNAI,5188
54
54
  docling/models/base_model.py,sha256=QEbglxu3kT6aNq3x_5jY8T_KcD_Hhv9zr0-A4Mizhco,7252
55
55
  docling/models/base_ocr_model.py,sha256=kT8TylASOpPlY60rIG6VL6_eLVsfg5KvEVnZHzDWtR0,8193
@@ -62,7 +62,7 @@ docling/models/page_assemble_model.py,sha256=TvN1naez7dUodLxpUUBzpuMCpqZBTf6YSpe
62
62
  docling/models/page_preprocessing_model.py,sha256=EmusNexws5ZmR93js_saVU0BedqZ_HIHQeY7lcf52tI,5284
63
63
  docling/models/picture_description_api_model.py,sha256=o3EkV5aHW_6WzE_fdj_VRnNCrS_btclO_ZCLAUqrfl0,2377
64
64
  docling/models/picture_description_base_model.py,sha256=kLthLhdlgwhootQ4_xhhcAk6A-vso5-qcsFJ3TcYfO0,2991
65
- docling/models/picture_description_vlm_model.py,sha256=Uja_BQSk7F-U1J2hm4yeLguirUzKYv1K8zRyw1IYomY,4150
65
+ docling/models/picture_description_vlm_model.py,sha256=7-reEy5gNxKgOB-VMiysemTwoasZhO5H8VyX4NUEY-4,4272
66
66
  docling/models/rapid_ocr_model.py,sha256=JGeed1aNO64SYFgxlOifdut4fynUJyBuyyQrfuSno-4,13182
67
67
  docling/models/readingorder_model.py,sha256=gnRFfJAXH-zKtQJws5Zb1_KCVvu_dAq9pgaDYQKCt9s,17236
68
68
  docling/models/table_structure_model.py,sha256=7g_mFf1YzfF8PXQfefNu6XYZu7TzJAn86zKb6IEUdCg,12518
@@ -78,12 +78,12 @@ docling/models/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
78
78
  docling/models/utils/generation_utils.py,sha256=0ZfMBMbolHAWjdbMza8FbD4_jQ4VY6ReUa4gqVLwMoU,5365
79
79
  docling/models/utils/hf_model_download.py,sha256=VlKna9tLIVOGQkIRQBXfDimPIIyeRV7cFCbuOVmFQiU,1092
80
80
  docling/models/vlm_models_inline/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
81
- docling/models/vlm_models_inline/hf_transformers_model.py,sha256=Vr6ZIKMVBGQrb0tXl0dVuqYMorDPEnNdF1axAkUpF1Q,14785
82
- docling/models/vlm_models_inline/mlx_model.py,sha256=ae7hDMgBsMLkqulmbKDamGSSrLJcroYsP1HApJ90IZM,13471
83
- docling/models/vlm_models_inline/nuextract_transformers_model.py,sha256=jLNtlkMDheUyWot7Oqq-GHQIYzJ0fZrbReq5xCnYb9E,10506
84
- docling/models/vlm_models_inline/vllm_model.py,sha256=vXClayYxPGX1jzQ1Rvf3vvwtW9khgApGvcRz4Qbyu7I,10293
81
+ docling/models/vlm_models_inline/hf_transformers_model.py,sha256=ylhdnY6A2nUkLQ2Ki-o-Jn8_kjO-JbYKdhnDXmGPB7Y,15047
82
+ docling/models/vlm_models_inline/mlx_model.py,sha256=_q1fVmVaEfnKTVp78djO4MSUA7LrF0JtCnMjTKnotT8,13749
83
+ docling/models/vlm_models_inline/nuextract_transformers_model.py,sha256=f-Djq2G6JLT-RE2LoEP3b2Q-LI33NsGM7Qxo4f6TkeA,10768
84
+ docling/models/vlm_models_inline/vllm_model.py,sha256=gIGZha3YCPBlJGgbjtqpRkiNrOqQszsOT3ZZZu1xbYo,11671
85
85
  docling/pipeline/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
86
- docling/pipeline/asr_pipeline.py,sha256=44lweVOCkFe8KikgXJjqDtfHewIotYvc242Xvgl9fV0,15744
86
+ docling/pipeline/asr_pipeline.py,sha256=rzEMHkbZfTmCwl4mjMa2bWRlVmkajC5nKBaY0bT7qj0,16020
87
87
  docling/pipeline/base_extraction_pipeline.py,sha256=GYrEz83IXv-tdIHjtNWxMBNczFwL8SZyf9vnPJ3STaI,2627
88
88
  docling/pipeline/base_pipeline.py,sha256=NPMQDTyis-LgQ4SybY2f5AESZl5PxogF-FRQuCDckXg,12748
89
89
  docling/pipeline/extraction_vlm_pipeline.py,sha256=veUOTe8nGdnduZKaGn1RRb-NfU1H6t_EN4QAsb022Zg,8260
@@ -93,7 +93,7 @@ docling/pipeline/threaded_standard_pdf_pipeline.py,sha256=i67G5AOW7PIFCe5JS2sdBm
93
93
  docling/pipeline/vlm_pipeline.py,sha256=HSbSoGZyy4eIK8eOL2g_NymrHg8r-DrB2buggJQAqHU,16189
94
94
  docling/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
95
95
  docling/utils/accelerator_utils.py,sha256=DSajLxVx1JEVT0zt5de26llciLNlVfIDfSa2zYCFJzQ,2909
96
- docling/utils/api_image_request.py,sha256=xrn4O8ax8wdQPkLgbRhX22qWUangCXwaOzIXy_86LCs,5367
96
+ docling/utils/api_image_request.py,sha256=HO-FrZ8kOqMMRVJSIsH3apoNoDKM2l7xrC8NfWAEgFQ,5876
97
97
  docling/utils/export.py,sha256=VwVUnYDk3mhGmISDbVm306fwpGNnoojouStBD4UajXI,4673
98
98
  docling/utils/glm_utils.py,sha256=TKOWQqWAHsX_w4fvoAA7_2xCi_urhnp1DsmjY8_sk5w,12274
99
99
  docling/utils/layout_postprocessor.py,sha256=bwDIhgUg5rKianzccGPTotTjqjkWtIQSoZwgKio8YC4,25124
@@ -104,9 +104,9 @@ docling/utils/orientation.py,sha256=jTyLxyT31FlOodZoBMlADHNQK2lAWKYVs5z7pXd_6Cg,
104
104
  docling/utils/profiling.py,sha256=YaMGoB9MMZpagF9mb5ndoHj8Lpb9aIdb7El-Pl7IcFs,1753
105
105
  docling/utils/utils.py,sha256=kJtIYuzXeOyJHYlxmLAo7dGM5rEsDa1i84qEsUj1nio,1908
106
106
  docling/utils/visualization.py,sha256=tY2ylE2aiQKkmzlSLnFW-HTfFyqUUMguW18ldd1PLfo,2868
107
- docling-2.58.0.dist-info/licenses/LICENSE,sha256=mBb7ErEcM8VS9OhiGHnQ2kk75HwPhr54W1Oiz3965MY,1088
108
- docling-2.58.0.dist-info/METADATA,sha256=py9js2V38fIWft1SmMe_iD_trav0WEwojgwxlHMsNv4,11642
109
- docling-2.58.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
110
- docling-2.58.0.dist-info/entry_points.txt,sha256=hzVlbeE0aMSTQ9S0-NTYN0Hmgsn6qL_EA2qX4UbkAuY,149
111
- docling-2.58.0.dist-info/top_level.txt,sha256=vkIywP-USjFyYo1AIRQbWQQaL3xB5jf8vkCYdTIfNic,8
112
- docling-2.58.0.dist-info/RECORD,,
107
+ docling-2.59.0.dist-info/licenses/LICENSE,sha256=mBb7ErEcM8VS9OhiGHnQ2kk75HwPhr54W1Oiz3965MY,1088
108
+ docling-2.59.0.dist-info/METADATA,sha256=cXUxVkPEnIzf56IGksKO5slZW2A1Nu0WSonasqsdwic,11805
109
+ docling-2.59.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
110
+ docling-2.59.0.dist-info/entry_points.txt,sha256=hzVlbeE0aMSTQ9S0-NTYN0Hmgsn6qL_EA2qX4UbkAuY,149
111
+ docling-2.59.0.dist-info/top_level.txt,sha256=vkIywP-USjFyYo1AIRQbWQQaL3xB5jf8vkCYdTIfNic,8
112
+ docling-2.59.0.dist-info/RECORD,,