vlmparse 0.1.6__py3-none-any.whl → 0.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vlmparse/cli.py +16 -36
- vlmparse/clients/granite_docling.py +1 -0
- vlmparse/clients/openai_converter.py +9 -7
- vlmparse/constants.py +3 -0
- vlmparse/converter_with_server.py +64 -25
- vlmparse/servers/docker_server.py +6 -7
- vlmparse/servers/utils.py +39 -11
- {vlmparse-0.1.6.dist-info → vlmparse-0.1.7.dist-info}/METADATA +1 -1
- {vlmparse-0.1.6.dist-info → vlmparse-0.1.7.dist-info}/RECORD +13 -13
- {vlmparse-0.1.6.dist-info → vlmparse-0.1.7.dist-info}/WHEEL +0 -0
- {vlmparse-0.1.6.dist-info → vlmparse-0.1.7.dist-info}/entry_points.txt +0 -0
- {vlmparse-0.1.6.dist-info → vlmparse-0.1.7.dist-info}/licenses/LICENSE +0 -0
- {vlmparse-0.1.6.dist-info → vlmparse-0.1.7.dist-info}/top_level.txt +0 -0
vlmparse/cli.py
CHANGED
|
@@ -11,8 +11,8 @@ class DParseCLI:
|
|
|
11
11
|
model: str,
|
|
12
12
|
port: int | None = None,
|
|
13
13
|
gpus: str | None = None,
|
|
14
|
-
|
|
15
|
-
|
|
14
|
+
vllm_args: list[str] | None = None,
|
|
15
|
+
forget_predefined_vllm_args: bool = False,
|
|
16
16
|
):
|
|
17
17
|
"""Deploy a VLLM server in a Docker container.
|
|
18
18
|
|
|
@@ -20,43 +20,26 @@ class DParseCLI:
|
|
|
20
20
|
model: Model name
|
|
21
21
|
port: VLLM server port (default: 8056)
|
|
22
22
|
gpus: Comma-separated GPU device IDs (e.g., "0" or "0,1,2"). If not specified, all GPUs will be used.
|
|
23
|
-
|
|
24
|
-
|
|
23
|
+
vllm_args: Additional keyword arguments to pass to the VLLM server.
|
|
24
|
+
forget_predefined_vllm_args: If True, the predefined VLLM kwargs from the docker config will be replaced by vllm_args otherwise the predefined kwargs will be updated with vllm_args with a risk of collision of argument names.
|
|
25
25
|
"""
|
|
26
|
-
if port is None:
|
|
27
|
-
port = 8056
|
|
28
26
|
|
|
29
|
-
from vlmparse.
|
|
27
|
+
from vlmparse.converter_with_server import start_server
|
|
30
28
|
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
# Only override GPU configuration if explicitly specified
|
|
41
|
-
# This preserves CPU-only settings from the config
|
|
42
|
-
if gpus is not None:
|
|
43
|
-
docker_config.gpu_device_ids = [g.strip() for g in str(gpus).split(",")]
|
|
44
|
-
server = docker_config.get_server(auto_stop=False)
|
|
45
|
-
|
|
46
|
-
if server is None:
|
|
47
|
-
logger.error(f"Model server not found for model: {model}")
|
|
48
|
-
return
|
|
49
|
-
|
|
50
|
-
# Deploy server and leave it running (cleanup=False)
|
|
51
|
-
logger.info(
|
|
52
|
-
f"Deploying VLLM server for {docker_config.model_name} on port {port}..."
|
|
29
|
+
base_url, container, _, _ = start_server(
|
|
30
|
+
model=model,
|
|
31
|
+
gpus=gpus,
|
|
32
|
+
port=port,
|
|
33
|
+
with_vllm_server=True,
|
|
34
|
+
vllm_args=vllm_args,
|
|
35
|
+
forget_predefined_vllm_args=forget_predefined_vllm_args,
|
|
36
|
+
auto_stop=False,
|
|
53
37
|
)
|
|
54
38
|
|
|
55
|
-
base_url, container = server.start()
|
|
56
|
-
|
|
57
39
|
logger.info(f"✓ VLLM server ready at {base_url}")
|
|
58
|
-
|
|
59
|
-
|
|
40
|
+
if container is not None:
|
|
41
|
+
logger.info(f"✓ Container ID: {container.id}")
|
|
42
|
+
logger.info(f"✓ Container name: {container.name}")
|
|
60
43
|
|
|
61
44
|
def convert(
|
|
62
45
|
self,
|
|
@@ -69,7 +52,6 @@ class DParseCLI:
|
|
|
69
52
|
with_vllm_server: bool = False,
|
|
70
53
|
concurrency: int = 10,
|
|
71
54
|
dpi: int | None = None,
|
|
72
|
-
vllm_kwargs: dict | None = None,
|
|
73
55
|
debug: bool = False,
|
|
74
56
|
):
|
|
75
57
|
"""Parse PDF documents and save results.
|
|
@@ -84,7 +66,6 @@ class DParseCLI:
|
|
|
84
66
|
mode: Output mode - "document" (save as JSON zip), "md" (save as markdown file), "md_page" (save as folder of markdown pages)
|
|
85
67
|
with_vllm_server: If True, a local VLLM server will be deployed if the model is not found in the registry. Note that if the model is in the registry and the uri is None, the server will be anyway deployed.
|
|
86
68
|
dpi: DPI to use for the conversion. If not specified, the default DPI will be used.
|
|
87
|
-
vllm_kwargs: Additional keyword arguments to pass to the VLLM server.
|
|
88
69
|
debug: If True, run in debug mode (single-threaded, no concurrency)
|
|
89
70
|
"""
|
|
90
71
|
from vlmparse.converter_with_server import ConverterWithServer
|
|
@@ -95,7 +76,6 @@ class DParseCLI:
|
|
|
95
76
|
gpus=gpus,
|
|
96
77
|
with_vllm_server=with_vllm_server,
|
|
97
78
|
concurrency=concurrency,
|
|
98
|
-
vllm_kwargs=vllm_kwargs,
|
|
99
79
|
) as converter_with_server:
|
|
100
80
|
return converter_with_server.parse(
|
|
101
81
|
inputs=inputs, out_folder=out_folder, mode=mode, dpi=dpi, debug=debug
|
|
@@ -34,6 +34,7 @@ class GraniteDoclingDockerServerConfig(VLLMDockerServerConfig):
|
|
|
34
34
|
class GraniteDoclingConverterConfig(OpenAIConverterConfig):
|
|
35
35
|
"""Granite Docling converter configuration."""
|
|
36
36
|
|
|
37
|
+
model_name: str = "ibm-granite/granite-docling-258M"
|
|
37
38
|
preprompt: str | None = None
|
|
38
39
|
postprompt: str | None = "Convert this page to docling."
|
|
39
40
|
completion_kwargs: dict | None = {
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import os
|
|
2
|
-
from typing import Literal
|
|
2
|
+
from typing import Literal, Optional
|
|
3
3
|
|
|
4
4
|
from loguru import logger
|
|
5
5
|
from pydantic import Field
|
|
@@ -101,7 +101,7 @@ class OpenAIConverterClient(BaseConverter):
|
|
|
101
101
|
|
|
102
102
|
async def _get_chat_completion(
|
|
103
103
|
self, messages: list[dict], completion_kwargs: dict | None = None
|
|
104
|
-
) -> tuple[str, "CompletionUsage"]: # noqa: F821
|
|
104
|
+
) -> tuple[str, Optional["CompletionUsage"]]: # noqa: F821
|
|
105
105
|
"""Helper to handle chat completion with optional streaming."""
|
|
106
106
|
if completion_kwargs is None:
|
|
107
107
|
completion_kwargs = self.config.completion_kwargs
|
|
@@ -117,7 +117,8 @@ class OpenAIConverterClient(BaseConverter):
|
|
|
117
117
|
async for chunk in response_stream:
|
|
118
118
|
if chunk.choices and chunk.choices[0].delta.content:
|
|
119
119
|
response_parts.append(chunk.choices[0].delta.content)
|
|
120
|
-
|
|
120
|
+
|
|
121
|
+
return "".join(response_parts), None
|
|
121
122
|
else:
|
|
122
123
|
response_obj = await self.model.chat.completions.create(
|
|
123
124
|
model=self.config.llm_params.model_name,
|
|
@@ -175,9 +176,10 @@ class OpenAIConverterClient(BaseConverter):
|
|
|
175
176
|
|
|
176
177
|
text = html_to_md_keep_tables(text)
|
|
177
178
|
page.text = text
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
179
|
+
if usage is not None:
|
|
180
|
+
page.prompt_tokens = usage.prompt_tokens
|
|
181
|
+
page.completion_tokens = usage.completion_tokens
|
|
182
|
+
if hasattr(usage, "reasoning_tokens"):
|
|
183
|
+
page.reasoning_tokens = usage.reasoning_tokens
|
|
182
184
|
|
|
183
185
|
return page
|
vlmparse/constants.py
CHANGED
|
@@ -5,10 +5,61 @@ from typing import Literal
|
|
|
5
5
|
|
|
6
6
|
from loguru import logger
|
|
7
7
|
|
|
8
|
+
from vlmparse.constants import DEFAULT_SERVER_PORT
|
|
8
9
|
from vlmparse.servers.utils import get_model_from_uri
|
|
9
10
|
from vlmparse.utils import get_file_paths
|
|
10
11
|
|
|
11
12
|
|
|
13
|
+
def start_server(
|
|
14
|
+
model: str,
|
|
15
|
+
gpus: str,
|
|
16
|
+
port: None | int = None,
|
|
17
|
+
with_vllm_server: bool = True,
|
|
18
|
+
vllm_args: list[str] = {},
|
|
19
|
+
forget_predefined_vllm_args: bool = False,
|
|
20
|
+
auto_stop: bool = False,
|
|
21
|
+
):
|
|
22
|
+
from vlmparse.registries import docker_config_registry
|
|
23
|
+
|
|
24
|
+
base_url = ""
|
|
25
|
+
container = None
|
|
26
|
+
docker_config = docker_config_registry.get(model, default=with_vllm_server)
|
|
27
|
+
|
|
28
|
+
if port is None:
|
|
29
|
+
port = DEFAULT_SERVER_PORT
|
|
30
|
+
|
|
31
|
+
if docker_config is None:
|
|
32
|
+
logger.warning(
|
|
33
|
+
f"No Docker configuration found for model: {model}, using default configuration"
|
|
34
|
+
)
|
|
35
|
+
return "", container, None, docker_config
|
|
36
|
+
|
|
37
|
+
gpu_device_ids = None
|
|
38
|
+
if gpus is not None:
|
|
39
|
+
gpu_device_ids = [g.strip() for g in str(gpus).split(",")]
|
|
40
|
+
|
|
41
|
+
if docker_config is not None:
|
|
42
|
+
if port is not None:
|
|
43
|
+
docker_config.docker_port = port
|
|
44
|
+
docker_config.gpu_device_ids = gpu_device_ids
|
|
45
|
+
docker_config.update_command_args(
|
|
46
|
+
vllm_args,
|
|
47
|
+
forget_predefined_vllm_args=forget_predefined_vllm_args,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
logger.info(
|
|
51
|
+
f"Deploying VLLM server for {docker_config.model_name} on port {port}..."
|
|
52
|
+
)
|
|
53
|
+
server = docker_config.get_server(auto_stop=auto_stop)
|
|
54
|
+
if server is None:
|
|
55
|
+
logger.error(f"Model server not found for model: {model}")
|
|
56
|
+
return "", container, None, docker_config
|
|
57
|
+
|
|
58
|
+
base_url, container = server.start()
|
|
59
|
+
|
|
60
|
+
return base_url, container, server, docker_config
|
|
61
|
+
|
|
62
|
+
|
|
12
63
|
class ConverterWithServer:
|
|
13
64
|
def __init__(
|
|
14
65
|
self,
|
|
@@ -18,8 +69,8 @@ class ConverterWithServer:
|
|
|
18
69
|
port: int | None = None,
|
|
19
70
|
with_vllm_server: bool = False,
|
|
20
71
|
concurrency: int = 10,
|
|
21
|
-
|
|
22
|
-
|
|
72
|
+
vllm_args: dict | None = None,
|
|
73
|
+
forget_predefined_vllm_args: bool = False,
|
|
23
74
|
):
|
|
24
75
|
self.model = model
|
|
25
76
|
self.uri = uri
|
|
@@ -27,8 +78,8 @@ class ConverterWithServer:
|
|
|
27
78
|
self.gpus = gpus
|
|
28
79
|
self.with_vllm_server = with_vllm_server
|
|
29
80
|
self.concurrency = concurrency
|
|
30
|
-
self.
|
|
31
|
-
self.
|
|
81
|
+
self.vllm_args = vllm_args
|
|
82
|
+
self.forget_predefined_vllm_args = forget_predefined_vllm_args
|
|
32
83
|
self.server = None
|
|
33
84
|
self.client = None
|
|
34
85
|
|
|
@@ -36,32 +87,20 @@ class ConverterWithServer:
|
|
|
36
87
|
self.model = get_model_from_uri(self.uri)
|
|
37
88
|
|
|
38
89
|
def start_server_and_client(self):
|
|
39
|
-
from vlmparse.registries import
|
|
40
|
-
converter_config_registry,
|
|
41
|
-
docker_config_registry,
|
|
42
|
-
)
|
|
43
|
-
|
|
44
|
-
gpu_device_ids = None
|
|
45
|
-
if self.gpus is not None:
|
|
46
|
-
gpu_device_ids = [g.strip() for g in self.gpus.split(",")]
|
|
90
|
+
from vlmparse.registries import converter_config_registry
|
|
47
91
|
|
|
48
92
|
if self.uri is None:
|
|
49
|
-
docker_config =
|
|
50
|
-
self.model,
|
|
93
|
+
_, _, self.server, docker_config = start_server(
|
|
94
|
+
model=self.model,
|
|
95
|
+
gpus=self.gpus,
|
|
96
|
+
port=self.port,
|
|
97
|
+
with_vllm_server=self.with_vllm_server,
|
|
98
|
+
vllm_args=self.vllm_args,
|
|
99
|
+
forget_predefined_vllm_args=self.forget_predefined_vllm_args,
|
|
100
|
+
auto_stop=True,
|
|
51
101
|
)
|
|
52
102
|
|
|
53
103
|
if docker_config is not None:
|
|
54
|
-
if self.port is not None:
|
|
55
|
-
docker_config.docker_port = self.port
|
|
56
|
-
docker_config.gpu_device_ids = gpu_device_ids
|
|
57
|
-
docker_config.update_command_args(
|
|
58
|
-
self.vllm_kwargs,
|
|
59
|
-
forget_predefined_vllm_kwargs=self.forget_predefined_vllm_kwargs,
|
|
60
|
-
)
|
|
61
|
-
self.server = docker_config.get_server(auto_stop=True)
|
|
62
|
-
|
|
63
|
-
self.server.start()
|
|
64
|
-
|
|
65
104
|
self.client = docker_config.get_client()
|
|
66
105
|
else:
|
|
67
106
|
self.client = converter_config_registry.get(self.model).get_client()
|
|
@@ -49,15 +49,14 @@ class DockerServerConfig(BaseModel):
|
|
|
49
49
|
|
|
50
50
|
def update_command_args(
|
|
51
51
|
self,
|
|
52
|
-
|
|
53
|
-
|
|
52
|
+
vllm_args: dict | None = None,
|
|
53
|
+
forget_predefined_vllm_args: bool = False,
|
|
54
54
|
) -> list[str]:
|
|
55
|
-
if
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
self.command_args = new_kwargs
|
|
55
|
+
if vllm_args is not None:
|
|
56
|
+
if forget_predefined_vllm_args:
|
|
57
|
+
self.command_args = vllm_args
|
|
59
58
|
else:
|
|
60
|
-
self.command_args.extend(
|
|
59
|
+
self.command_args.extend(vllm_args)
|
|
61
60
|
|
|
62
61
|
return self.command_args
|
|
63
62
|
|
vlmparse/servers/utils.py
CHANGED
|
@@ -2,6 +2,7 @@ import getpass
|
|
|
2
2
|
import time
|
|
3
3
|
from contextlib import contextmanager
|
|
4
4
|
from pathlib import Path
|
|
5
|
+
from urllib.parse import parse_qsl, urlparse
|
|
5
6
|
|
|
6
7
|
import docker
|
|
7
8
|
from loguru import logger
|
|
@@ -222,25 +223,52 @@ def docker_server(
|
|
|
222
223
|
logger.info("Container stopped")
|
|
223
224
|
|
|
224
225
|
|
|
226
|
+
def normalize_uri(uri: str) -> tuple:
|
|
227
|
+
u = urlparse(uri)
|
|
228
|
+
|
|
229
|
+
# --- Normalize scheme ---
|
|
230
|
+
scheme = (u.scheme or "http").lower()
|
|
231
|
+
|
|
232
|
+
# --- Normalize host ---
|
|
233
|
+
host = (u.hostname or "").lower()
|
|
234
|
+
if host in ("localhost", "0.0.0.0"):
|
|
235
|
+
host = "localhost"
|
|
236
|
+
|
|
237
|
+
# --- Normalize port (apply defaults) ---
|
|
238
|
+
if u.port:
|
|
239
|
+
port = u.port
|
|
240
|
+
else:
|
|
241
|
+
port = 443 if scheme == "https" else 80
|
|
242
|
+
|
|
243
|
+
# --- Normalize path ---
|
|
244
|
+
# Treat empty path as "/" and remove trailing slash (except root)
|
|
245
|
+
path = u.path or "/"
|
|
246
|
+
if path != "/" and path.endswith("/"):
|
|
247
|
+
path = path.rstrip("/")
|
|
248
|
+
|
|
249
|
+
# Collapse duplicate slashes
|
|
250
|
+
while "//" in path:
|
|
251
|
+
path = path.replace("//", "/")
|
|
252
|
+
|
|
253
|
+
# --- Normalize query parameters (sorted) ---
|
|
254
|
+
query_pairs = parse_qsl(u.query, keep_blank_values=True)
|
|
255
|
+
query = "&".join(f"{k}={v}" for k, v in sorted(query_pairs))
|
|
256
|
+
|
|
257
|
+
return (scheme, host, port, path, query)
|
|
258
|
+
|
|
259
|
+
|
|
225
260
|
def get_model_from_uri(uri: str) -> str:
|
|
226
261
|
model = None
|
|
227
262
|
client = docker.from_env()
|
|
228
263
|
containers = client.containers.list()
|
|
264
|
+
|
|
265
|
+
uri = normalize_uri(uri)
|
|
266
|
+
|
|
229
267
|
for container in containers:
|
|
230
268
|
c_uri = container.labels.get("vlmparse_uri")
|
|
231
269
|
c_model = container.labels.get("vlmparse_model_name")
|
|
232
|
-
if c_uri is not None:
|
|
233
|
-
c_uri = c_uri.replace("localhost", "0.0.0.0")
|
|
234
|
-
|
|
235
|
-
# Check if user URI matches container URI (ignoring /v1 suffix if missing)
|
|
236
|
-
if c_uri and (
|
|
237
|
-
c_uri == uri or c_uri.startswith(uri.rstrip("/")) or uri.startswith(c_uri)
|
|
238
|
-
):
|
|
239
|
-
# Update URI to the correct one from container (likely has /v1)
|
|
240
|
-
if len(c_uri) > len(uri.rstrip("/")):
|
|
241
|
-
logger.info(f"Updating URI from {uri} to {c_uri}")
|
|
242
|
-
uri = c_uri
|
|
243
270
|
|
|
271
|
+
if c_uri and uri == normalize_uri(c_uri):
|
|
244
272
|
# Infer model if not provided
|
|
245
273
|
if model is None and c_model:
|
|
246
274
|
logger.info(f"Inferred model {c_model} from container")
|
|
@@ -1,22 +1,22 @@
|
|
|
1
1
|
vlmparse/base_model.py,sha256=4U4UPe8SNArliKnUf8pp8zQugWYsnhg9okylt7mrW1U,381
|
|
2
2
|
vlmparse/build_doc.py,sha256=LAWrnFrqamN5PwJo57AUtQOPrMFGnCGw4gBjEKZ6pYo,2127
|
|
3
|
-
vlmparse/cli.py,sha256=
|
|
4
|
-
vlmparse/constants.py,sha256=
|
|
3
|
+
vlmparse/cli.py,sha256=JfR6gk0pdYAavJgFTVx4OcgWdiLktGoKJ8TcVcD_IHw,12235
|
|
4
|
+
vlmparse/constants.py,sha256=DYaK7KtTW8p9MPb3iPvoP5H1r7ICRuIFo89P01q4uCI,184
|
|
5
5
|
vlmparse/converter.py,sha256=F0JSY9sFYUggCvaUCb27kKGJJpnZKW2FStMDVJoIOeQ,7383
|
|
6
|
-
vlmparse/converter_with_server.py,sha256=
|
|
6
|
+
vlmparse/converter_with_server.py,sha256=zpUHDpHbDBs4Cj7dcVjvUQw0-U_InRNDC5Ekb_gehRM,6022
|
|
7
7
|
vlmparse/registries.py,sha256=yBVrrhy61rSoLwdNV-z0C4lqIpTbLoWab3V6u7aSyNM,5797
|
|
8
8
|
vlmparse/utils.py,sha256=rcVrtPiQVj_8HAmFQOu___72uYIapp_X89yxrMNCBow,1236
|
|
9
9
|
vlmparse/clients/chandra.py,sha256=EulsCZdwOtm0pQ6CDm320U96k8aWFN4wKqCm1Xo7VCE,9775
|
|
10
10
|
vlmparse/clients/deepseekocr.py,sha256=Uw6tPvP2KVsPDlz1ZUgYdbgQSjmFPuYeFDrGMMOTBAo,6501
|
|
11
11
|
vlmparse/clients/docling.py,sha256=SAkLsqseuWfkuiel8FWR1G0Z5s-SZU3dE2JbsOvF4SA,5328
|
|
12
12
|
vlmparse/clients/dotsocr.py,sha256=uGJoYEiDkP3-rmfdkAnMeAX-T4RZyEPoh6jmow5_-J8,10336
|
|
13
|
-
vlmparse/clients/granite_docling.py,sha256=
|
|
13
|
+
vlmparse/clients/granite_docling.py,sha256=LMJAFjpSxcgLhsVxknSqrCC35MUTmklsE9PJZvMK2O8,4691
|
|
14
14
|
vlmparse/clients/hunyuanocr.py,sha256=UFqaS4b8UM9EtizyrZIxlqcYlESmxm8xrQZP7lL6tkE,1857
|
|
15
15
|
vlmparse/clients/lightonocr.py,sha256=wx1Im8Z3wlRWwYbPqnSd3LqTtdAU8CnX5mzu1BuCUY8,1314
|
|
16
16
|
vlmparse/clients/mineru.py,sha256=6jZ1sKn2kGwUvD8gVs4PqEDH7uUXYK8pAB5Fr1JeqnY,3617
|
|
17
17
|
vlmparse/clients/nanonetocr.py,sha256=BT5vaeerCsK5agvOaHK3NvLUqWd1FfDmrMmDYbp646I,1543
|
|
18
18
|
vlmparse/clients/olmocr.py,sha256=A4Vl0meYpU5QPTML_OxyyRM07xCxtfrMZedgGMYEcuU,1851
|
|
19
|
-
vlmparse/clients/openai_converter.py,sha256=
|
|
19
|
+
vlmparse/clients/openai_converter.py,sha256=bIDpR7Yn70eEp0pmzFoG2dDwY-mxCj3kH1IZS9BvXVQ,6266
|
|
20
20
|
vlmparse/clients/paddleocrvl.py,sha256=qFBDj_UQocyq3WCh24tUOx9Ud7S9DfSm-1n3ztikY2s,1402
|
|
21
21
|
vlmparse/clients/prompts.py,sha256=-J60lqxgRzlkQ9VsQLxmWsIMaDt-gNqWqWoqHIw9CLc,4228
|
|
22
22
|
vlmparse/clients/pipe_utils/cleaner.py,sha256=oxBkBTOkluN1lmeNbzajRIe0_D__ZGwUOBaI_Ph0uxE,2396
|
|
@@ -24,13 +24,13 @@ vlmparse/clients/pipe_utils/html_to_md_conversion.py,sha256=cFFqzD2jCNw_968_eu3W
|
|
|
24
24
|
vlmparse/clients/pipe_utils/utils.py,sha256=935ecIO446I0pstszE_1nrIPHn1Ffrxunq7fVd0dsd8,315
|
|
25
25
|
vlmparse/data_model/box.py,sha256=lJsh4qhjgYXZF5vTSJ1qMXD5GVlBi2_SBedBMlfJikU,16868
|
|
26
26
|
vlmparse/data_model/document.py,sha256=xheaMeStOj2c9GZKmdtxcEl_Dj44V5JyVp6JnTrSpH0,4615
|
|
27
|
-
vlmparse/servers/docker_server.py,sha256=
|
|
28
|
-
vlmparse/servers/utils.py,sha256=
|
|
27
|
+
vlmparse/servers/docker_server.py,sha256=UVU7VDloJ8Yfqj-WUv3Trti9AODcdC9JyTzW3sCM-l4,7032
|
|
28
|
+
vlmparse/servers/utils.py,sha256=tIXhgbF9EVOJy2nYEguVq69gn9ATxtya_1F4wZSt68o,9454
|
|
29
29
|
vlmparse/st_viewer/fs_nav.py,sha256=7GNH68h2Loh5pQ64Pe72-D2cs2BLhqRXevEmKdFmPX0,1616
|
|
30
30
|
vlmparse/st_viewer/st_viewer.py,sha256=m2rQTtk5rlwErNmivNAg-4rkHkvNkvLhoJZxFQi7Dwk,2105
|
|
31
|
-
vlmparse-0.1.
|
|
32
|
-
vlmparse-0.1.
|
|
33
|
-
vlmparse-0.1.
|
|
34
|
-
vlmparse-0.1.
|
|
35
|
-
vlmparse-0.1.
|
|
36
|
-
vlmparse-0.1.
|
|
31
|
+
vlmparse-0.1.7.dist-info/licenses/LICENSE,sha256=3TKJHk8hPBR5dbLWZ3IpfCftl-_m-iyBwpYQGZYxj14,1080
|
|
32
|
+
vlmparse-0.1.7.dist-info/METADATA,sha256=DP--8aCeLxAgvo6vvaDog7xzzMzvZywVvCrMiAKhDbo,5597
|
|
33
|
+
vlmparse-0.1.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
34
|
+
vlmparse-0.1.7.dist-info/entry_points.txt,sha256=gD5berP6HwE2wNIkls-Lw5goiceA8uMgPEd7ifnFJXs,47
|
|
35
|
+
vlmparse-0.1.7.dist-info/top_level.txt,sha256=k4ni-GNH_iAX7liQEsk_KY_c3xgZgt8k9fsSs9IXLXs,9
|
|
36
|
+
vlmparse-0.1.7.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|