vlmparse 0.1.7__py3-none-any.whl → 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. vlmparse/build_doc.py +20 -19
  2. vlmparse/cli.py +439 -270
  3. vlmparse/clients/chandra.py +176 -60
  4. vlmparse/clients/deepseekocr.py +193 -12
  5. vlmparse/clients/docling.py +0 -1
  6. vlmparse/clients/dotsocr.py +34 -31
  7. vlmparse/clients/glmocr.py +243 -0
  8. vlmparse/clients/granite_docling.py +9 -36
  9. vlmparse/clients/hunyuanocr.py +5 -1
  10. vlmparse/clients/lightonocr.py +23 -1
  11. vlmparse/clients/mineru.py +0 -1
  12. vlmparse/clients/mistral_converter.py +85 -0
  13. vlmparse/clients/nanonetocr.py +5 -1
  14. vlmparse/clients/olmocr.py +6 -2
  15. vlmparse/clients/openai_converter.py +95 -60
  16. vlmparse/clients/paddleocrvl.py +195 -40
  17. vlmparse/converter.py +51 -11
  18. vlmparse/converter_with_server.py +92 -19
  19. vlmparse/registries.py +107 -89
  20. vlmparse/servers/base_server.py +127 -0
  21. vlmparse/servers/docker_compose_deployment.py +489 -0
  22. vlmparse/servers/docker_compose_server.py +39 -0
  23. vlmparse/servers/docker_run_deployment.py +226 -0
  24. vlmparse/servers/docker_server.py +17 -109
  25. vlmparse/servers/model_identity.py +48 -0
  26. vlmparse/servers/server_registry.py +42 -0
  27. vlmparse/servers/utils.py +83 -219
  28. vlmparse/st_viewer/st_viewer.py +1 -1
  29. vlmparse/utils.py +15 -2
  30. {vlmparse-0.1.7.dist-info → vlmparse-0.1.9.dist-info}/METADATA +13 -3
  31. vlmparse-0.1.9.dist-info/RECORD +44 -0
  32. {vlmparse-0.1.7.dist-info → vlmparse-0.1.9.dist-info}/WHEEL +1 -1
  33. vlmparse-0.1.7.dist-info/RECORD +0 -36
  34. {vlmparse-0.1.7.dist-info → vlmparse-0.1.9.dist-info}/entry_points.txt +0 -0
  35. {vlmparse-0.1.7.dist-info → vlmparse-0.1.9.dist-info}/licenses/LICENSE +0 -0
  36. {vlmparse-0.1.7.dist-info → vlmparse-0.1.9.dist-info}/top_level.txt +0 -0
@@ -14,25 +14,39 @@ def start_server(
14
14
  model: str,
15
15
  gpus: str,
16
16
  port: None | int = None,
17
- with_vllm_server: bool = True,
17
+ server: Literal["registry", "hf"] = "registry",
18
18
  vllm_args: list[str] = {},
19
19
  forget_predefined_vllm_args: bool = False,
20
20
  auto_stop: bool = False,
21
21
  ):
22
22
  from vlmparse.registries import docker_config_registry
23
+ from vlmparse.servers.docker_server import (
24
+ DEFAULT_MODEL_NAME,
25
+ VLLMDockerServerConfig,
26
+ )
23
27
 
24
28
  base_url = ""
25
29
  container = None
26
- docker_config = docker_config_registry.get(model, default=with_vllm_server)
30
+ docker_config = docker_config_registry.get(model)
27
31
 
28
32
  if port is None:
29
33
  port = DEFAULT_SERVER_PORT
30
34
 
31
35
  if docker_config is None:
32
- logger.warning(
33
- f"No Docker configuration found for model: {model}, using default configuration"
34
- )
35
- return "", container, None, docker_config
36
+ if server == "registry":
37
+ print(f"DEBUG: Registry lookup failed for {model} (strict mode)")
38
+ raise ValueError(
39
+ f"Model '{model}' not found in registry and server='registry'. Use server='hf' to serve arbitrary HuggingFace models."
40
+ )
41
+ elif server == "hf":
42
+ docker_config = VLLMDockerServerConfig(
43
+ model_name=model, default_model_name=DEFAULT_MODEL_NAME
44
+ )
45
+ else:
46
+ logger.warning(
47
+ f"No Docker configuration found for model: {model} and server type is undetermined."
48
+ )
49
+ return "", container, None, docker_config
36
50
 
37
51
  gpu_device_ids = None
38
52
  if gpus is not None:
@@ -42,13 +56,14 @@ def start_server(
42
56
  if port is not None:
43
57
  docker_config.docker_port = port
44
58
  docker_config.gpu_device_ids = gpu_device_ids
45
- docker_config.update_command_args(
46
- vllm_args,
47
- forget_predefined_vllm_args=forget_predefined_vllm_args,
48
- )
59
+ if hasattr(docker_config, "update_command_args"):
60
+ docker_config.update_command_args(
61
+ vllm_args,
62
+ forget_predefined_vllm_args=forget_predefined_vllm_args,
63
+ )
49
64
 
50
65
  logger.info(
51
- f"Deploying VLLM server for {docker_config.model_name} on port {port}..."
66
+ f"Deploying server for {docker_config.model_name} on port {port}..."
52
67
  )
53
68
  server = docker_config.get_server(auto_stop=auto_stop)
54
69
  if server is None:
@@ -67,19 +82,27 @@ class ConverterWithServer:
67
82
  uri: str | None = None,
68
83
  gpus: str | None = None,
69
84
  port: int | None = None,
70
- with_vllm_server: bool = False,
85
+ server: Literal["registry", "hf", "google", "openai"] = "registry",
71
86
  concurrency: int = 10,
72
87
  vllm_args: dict | None = None,
73
88
  forget_predefined_vllm_args: bool = False,
89
+ return_documents: bool = False,
74
90
  ):
91
+ if model is None and uri is None:
92
+ raise ValueError("Either 'model' or 'uri' must be provided")
93
+
94
+ if concurrency < 1:
95
+ raise ValueError("concurrency must be at least 1")
96
+
75
97
  self.model = model
76
98
  self.uri = uri
77
99
  self.port = port
78
100
  self.gpus = gpus
79
- self.with_vllm_server = with_vllm_server
101
+ self.server_type = server
80
102
  self.concurrency = concurrency
81
103
  self.vllm_args = vllm_args
82
104
  self.forget_predefined_vllm_args = forget_predefined_vllm_args
105
+ self.return_documents = return_documents
83
106
  self.server = None
84
107
  self.client = None
85
108
 
@@ -87,28 +110,56 @@ class ConverterWithServer:
87
110
  self.model = get_model_from_uri(self.uri)
88
111
 
89
112
  def start_server_and_client(self):
90
- from vlmparse.registries import converter_config_registry
113
+ from vlmparse.clients.openai_converter import OpenAIConverterConfig
114
+ from vlmparse.registries import (
115
+ converter_config_registry,
116
+ docker_config_registry,
117
+ )
91
118
 
119
+ start_local_server = False
92
120
  if self.uri is None:
121
+ if self.server_type == "hf":
122
+ start_local_server = True
123
+ elif self.server_type == "registry":
124
+ if self.model in docker_config_registry.list_models():
125
+ start_local_server = True
126
+
127
+ if start_local_server:
128
+ server_arg = "hf" if self.server_type == "hf" else "registry"
93
129
  _, _, self.server, docker_config = start_server(
94
130
  model=self.model,
95
131
  gpus=self.gpus,
96
132
  port=self.port,
97
- with_vllm_server=self.with_vllm_server,
133
+ server=server_arg,
98
134
  vllm_args=self.vllm_args,
99
135
  forget_predefined_vllm_args=self.forget_predefined_vllm_args,
100
136
  auto_stop=True,
101
137
  )
102
138
 
103
139
  if docker_config is not None:
104
- self.client = docker_config.get_client()
140
+ self.client = docker_config.get_client(
141
+ return_documents_in_batch_mode=self.return_documents
142
+ )
105
143
  else:
106
- self.client = converter_config_registry.get(self.model).get_client()
144
+ # Should not happen if start_server works as expected
145
+ self.client = converter_config_registry.get(self.model).get_client(
146
+ return_documents_in_batch_mode=self.return_documents
147
+ )
148
+
149
+ elif self.server_type == "hf":
150
+ client_config = OpenAIConverterConfig(
151
+ model_name=self.model, base_url=self.uri
152
+ )
153
+ self.client = client_config.get_client(
154
+ return_documents_in_batch_mode=self.return_documents
155
+ )
107
156
 
108
157
  else:
109
158
  client_config = converter_config_registry.get(self.model, uri=self.uri)
110
159
 
111
- self.client = client_config.get_client()
160
+ self.client = client_config.get_client(
161
+ return_documents_in_batch_mode=self.return_documents
162
+ )
112
163
 
113
164
  def stop_server(self):
114
165
  if self.server is not None and self.server.auto_stop:
@@ -119,16 +170,30 @@ class ConverterWithServer:
119
170
  return self
120
171
 
121
172
  def __exit__(self, exc_type, exc_value, traceback):
122
- self.stop_server()
173
+ try:
174
+ self.stop_server()
175
+ except Exception as e:
176
+ logger.warning(f"Error stopping server during cleanup: {e}")
177
+ return False # Don't suppress exceptions
123
178
 
124
179
  def parse(
125
180
  self,
126
181
  inputs: str | list[str],
127
182
  out_folder: str = ".",
128
183
  mode: Literal["document", "md", "md_page"] = "document",
184
+ conversion_mode: Literal[
185
+ "ocr",
186
+ "ocr_layout",
187
+ "table",
188
+ "image_description",
189
+ "formula",
190
+ "chart",
191
+ ]
192
+ | None = None,
129
193
  dpi: int | None = None,
130
194
  debug: bool = False,
131
195
  retrylast: bool = False,
196
+ completion_kwargs: dict | None = None,
132
197
  ):
133
198
  assert (
134
199
  self.client is not None
@@ -165,6 +230,14 @@ class ConverterWithServer:
165
230
  if dpi is not None:
166
231
  self.client.config.dpi = int(dpi)
167
232
 
233
+ if conversion_mode is not None:
234
+ self.client.config.conversion_mode = conversion_mode
235
+
236
+ if completion_kwargs is not None and hasattr(
237
+ self.client.config, "completion_kwargs"
238
+ ):
239
+ self.client.config.completion_kwargs |= completion_kwargs
240
+
168
241
  if debug:
169
242
  self.client.debug = debug
170
243
 
vlmparse/registries.py CHANGED
@@ -1,37 +1,30 @@
1
1
  import os
2
2
  from collections.abc import Callable
3
3
 
4
- from vlmparse.clients.chandra import ChandraConverterConfig, ChandraDockerServerConfig
4
+ from vlmparse.clients.chandra import ChandraDockerServerConfig
5
5
  from vlmparse.clients.deepseekocr import (
6
- DeepSeekOCRConverterConfig,
6
+ DeepSeekOCR2DockerServerConfig,
7
7
  DeepSeekOCRDockerServerConfig,
8
8
  )
9
- from vlmparse.clients.docling import DoclingConverterConfig, DoclingDockerServerConfig
10
- from vlmparse.clients.dotsocr import DotsOCRConverterConfig, DotsOCRDockerServerConfig
11
- from vlmparse.clients.granite_docling import (
12
- GraniteDoclingConverterConfig,
13
- GraniteDoclingDockerServerConfig,
14
- )
15
- from vlmparse.clients.hunyuanocr import (
16
- HunyuanOCRConverterConfig,
17
- HunyuanOCRDockerServerConfig,
18
- )
9
+ from vlmparse.clients.docling import DoclingDockerServerConfig
10
+ from vlmparse.clients.dotsocr import DotsOCRDockerServerConfig
11
+ from vlmparse.clients.glmocr import GLMOCRDockerServerConfig
12
+ from vlmparse.clients.granite_docling import GraniteDoclingDockerServerConfig
13
+ from vlmparse.clients.hunyuanocr import HunyuanOCRDockerServerConfig
19
14
  from vlmparse.clients.lightonocr import (
20
- LightOnOCRConverterConfig,
15
+ LightonOCR21BServerConfig,
21
16
  LightOnOCRDockerServerConfig,
22
17
  )
23
- from vlmparse.clients.mineru import MinerUConverterConfig, MinerUDockerServerConfig
24
- from vlmparse.clients.nanonetocr import (
25
- NanonetOCR2ConverterConfig,
26
- NanonetOCR2DockerServerConfig,
27
- )
28
- from vlmparse.clients.olmocr import OlmOCRConverterConfig, OlmOCRDockerServerConfig
29
- from vlmparse.clients.openai_converter import LLMParams, OpenAIConverterConfig
30
- from vlmparse.clients.paddleocrvl import (
31
- PaddleOCRVLConverterConfig,
32
- PaddleOCRVLDockerServerConfig,
33
- )
34
- from vlmparse.servers.docker_server import DEFAULT_MODEL_NAME, docker_config_registry
18
+ from vlmparse.clients.mineru import MinerUDockerServerConfig
19
+ from vlmparse.clients.mistral_converter import MistralOCRConverterConfig
20
+ from vlmparse.clients.nanonetocr import NanonetOCR2DockerServerConfig
21
+ from vlmparse.clients.olmocr import OlmOCRDockerServerConfig
22
+ from vlmparse.clients.openai_converter import OpenAIConverterConfig
23
+ from vlmparse.clients.paddleocrvl import PaddleOCRVLDockerServerConfig
24
+ from vlmparse.converter import ConverterConfig
25
+ from vlmparse.servers.docker_compose_server import DockerComposeServerConfig
26
+ from vlmparse.servers.docker_server import DockerServerConfig
27
+ from vlmparse.servers.server_registry import docker_config_registry
35
28
 
36
29
 
37
30
  def get_default(cls, field_name):
@@ -43,19 +36,26 @@ def get_default(cls, field_name):
43
36
  return field_info.default
44
37
 
45
38
 
46
- for server_config_cls in [
39
+ # All server configs - single source of truth
40
+ SERVER_CONFIGS: list[type[DockerServerConfig | DockerComposeServerConfig]] = [
47
41
  ChandraDockerServerConfig,
48
42
  LightOnOCRDockerServerConfig,
49
43
  DotsOCRDockerServerConfig,
50
44
  PaddleOCRVLDockerServerConfig,
45
+ GLMOCRDockerServerConfig,
51
46
  NanonetOCR2DockerServerConfig,
52
47
  HunyuanOCRDockerServerConfig,
53
48
  DoclingDockerServerConfig,
54
49
  OlmOCRDockerServerConfig,
55
50
  MinerUDockerServerConfig,
56
51
  DeepSeekOCRDockerServerConfig,
52
+ DeepSeekOCR2DockerServerConfig,
57
53
  GraniteDoclingDockerServerConfig,
58
- ]:
54
+ LightonOCR21BServerConfig,
55
+ ]
56
+
57
+ # Register docker server configs
58
+ for server_config_cls in SERVER_CONFIGS:
59
59
  aliases = get_default(server_config_cls, "aliases") or []
60
60
  model_name = get_default(server_config_cls, "model_name")
61
61
  names = [n for n in aliases + [model_name] if isinstance(n, str)]
@@ -64,37 +64,83 @@ for server_config_cls in [
64
64
 
65
65
 
66
66
  class ConverterConfigRegistry:
67
- """Registry for mapping model names to their Docker configurations."""
67
+ """Registry for mapping model names to their converter configurations.
68
+
69
+ Thread-safe registry that maps model names to their converter configuration factories.
70
+ """
68
71
 
69
72
  def __init__(self):
70
- self._registry = dict()
73
+ import threading
74
+
75
+ self._registry: dict[str, Callable[[str | None], ConverterConfig]] = {}
76
+ self._lock = threading.RLock()
71
77
 
72
78
  def register(
73
79
  self,
74
80
  model_name: str,
75
- config_factory: Callable[[str], OpenAIConverterConfig | None],
81
+ config_factory: Callable[[str | None], ConverterConfig],
76
82
  ):
77
- """Register a config factory for a model name."""
78
- self._registry[model_name] = config_factory
79
-
80
- def get(self, model_name: str, uri: str | None = None) -> OpenAIConverterConfig:
81
- """Get config for a model name. Returns default if not registered."""
82
- if model_name in self._registry:
83
- return self._registry[model_name](uri=uri)
84
- # Fallback to OpenAIConverterConfig for unregistered models
85
- if uri is not None:
86
- return OpenAIConverterConfig(
87
- llm_params=LLMParams(base_url=uri, model_name=model_name)
88
- )
89
- return OpenAIConverterConfig(llm_params=LLMParams(model_name=model_name))
83
+ """Register a config factory for a model name (thread-safe)."""
84
+ with self._lock:
85
+ self._registry[model_name] = config_factory
86
+
87
+ def register_from_server(
88
+ self,
89
+ server_config_cls: type[DockerServerConfig | DockerComposeServerConfig],
90
+ ):
91
+ """Register converter config derived from a server config class.
92
+
93
+ This ensures model_name and default_model_name are consistently
94
+ passed from server to client config via _create_client_kwargs.
95
+ """
96
+ aliases = get_default(server_config_cls, "aliases") or []
97
+ model_name = get_default(server_config_cls, "model_name")
98
+ names = [n for n in aliases + [model_name] if isinstance(n, str)]
99
+ # Also register short name (after last /)
100
+ if model_name and "/" in model_name:
101
+ names.append(model_name.split("/")[-1])
102
+
103
+ def factory(uri: str | None, cls=server_config_cls) -> ConverterConfig:
104
+ server = cls()
105
+ client_config = server.client_config
106
+ # Override base_url if provided
107
+ if uri is not None:
108
+ client_config = client_config.model_copy(update={"base_url": uri})
109
+ return client_config
110
+
111
+ with self._lock:
112
+ for name in names:
113
+ self._registry[name] = factory
114
+
115
+ def get(
116
+ self,
117
+ model_name: str,
118
+ uri: str | None = None,
119
+ ) -> ConverterConfig:
120
+ """Get config for a model name (thread-safe). Raises ValueError if not registered."""
121
+ with self._lock:
122
+ factory = self._registry.get(model_name)
123
+
124
+ if factory is not None:
125
+ return factory(uri)
126
+
127
+ raise ValueError(f"Model '{model_name}' not found in registry.")
90
128
 
91
129
  def list_models(self) -> list[str]:
92
- """List all registered model names."""
93
- return list(self._registry.keys())
130
+ """List all registered model names (thread-safe)."""
131
+ with self._lock:
132
+ return list(self._registry.keys())
94
133
 
95
134
 
96
135
  # Global registry instance
97
136
  converter_config_registry = ConverterConfigRegistry()
137
+
138
+ # Register all server-backed converters through the server config
139
+ # This ensures model_name and default_model_name are consistently passed
140
+ for server_config_cls in SERVER_CONFIGS:
141
+ converter_config_registry.register_from_server(server_config_cls)
142
+
143
+ # External API configs (no server config - these are cloud APIs)
98
144
  GOOGLE_API_BASE_URL = (
99
145
  os.getenv("GOOGLE_API_BASE_URL")
100
146
  or "https://generativelanguage.googleapis.com/v1beta/openai/"
@@ -111,11 +157,10 @@ for gemini_model in [
111
157
  converter_config_registry.register(
112
158
  gemini_model,
113
159
  lambda uri=None, model=gemini_model: OpenAIConverterConfig(
114
- llm_params=LLMParams(
115
- model_name=model,
116
- base_url=GOOGLE_API_BASE_URL if uri is None else uri,
117
- api_key=os.getenv("GOOGLE_API_KEY"),
118
- )
160
+ model_name=model,
161
+ base_url=GOOGLE_API_BASE_URL if uri is None else uri,
162
+ api_key=os.getenv("GOOGLE_API_KEY"),
163
+ default_model_name=model,
119
164
  ),
120
165
  )
121
166
  for openai_model in [
@@ -126,45 +171,18 @@ for openai_model in [
126
171
  converter_config_registry.register(
127
172
  openai_model,
128
173
  lambda uri=None, model=openai_model: OpenAIConverterConfig(
129
- llm_params=LLMParams(
130
- model_name=model,
131
- base_url=None,
132
- api_key=os.getenv("OPENAI_API_KEY"),
133
- )
174
+ model_name=model,
175
+ base_url=None,
176
+ api_key=os.getenv("OPENAI_API_KEY"),
177
+ default_model_name=model,
134
178
  ),
135
179
  )
136
180
 
137
- for converter_config_cls in [
138
- ChandraConverterConfig,
139
- LightOnOCRConverterConfig,
140
- DotsOCRConverterConfig,
141
- PaddleOCRVLConverterConfig,
142
- NanonetOCR2ConverterConfig,
143
- HunyuanOCRConverterConfig,
144
- DeepSeekOCRConverterConfig,
145
- GraniteDoclingConverterConfig,
146
- OlmOCRConverterConfig,
147
- ]:
148
- aliases = get_default(converter_config_cls, "aliases") or []
149
- model_name = get_default(converter_config_cls, "model_name")
150
- names = [n for n in aliases + [model_name] if isinstance(n, str)]
151
- for name in names:
152
- converter_config_registry.register(
153
- name,
154
- lambda uri, cls=converter_config_cls: cls(
155
- llm_params=LLMParams(
156
- base_url=uri,
157
- model_name=DEFAULT_MODEL_NAME,
158
- api_key="",
159
- )
160
- ),
161
- )
162
- for converter_config_cls in [MinerUConverterConfig, DoclingConverterConfig]:
163
- aliases = get_default(converter_config_cls, "aliases") or []
164
- model_name = get_default(converter_config_cls, "model_name")
165
- names = [n for n in aliases + [model_name] if isinstance(n, str)]
166
- for name in names:
167
- converter_config_registry.register(
168
- name,
169
- lambda uri, cls=converter_config_cls: cls(base_url=uri),
170
- )
181
+ for mistral_model in ["mistral-ocr-latest", "mistral-ocr"]:
182
+ converter_config_registry.register(
183
+ mistral_model,
184
+ lambda uri=None, model=mistral_model: MistralOCRConverterConfig(
185
+ base_url="https://api.mistral.ai/v1" if uri is None else uri,
186
+ api_key=os.getenv("MISTRAL_API_KEY"),
187
+ ),
188
+ )
@@ -0,0 +1,127 @@
1
+ """Base classes for server configurations and server lifecycle management."""
2
+
3
+ from abc import ABC, abstractmethod
4
+
5
+ from loguru import logger
6
+ from pydantic import Field
7
+
8
+ from .model_identity import ModelIdentityMixin
9
+
10
+
11
+ class BaseServerConfig(ModelIdentityMixin, ABC):
12
+ """Base configuration for deploying a server.
13
+
14
+ Inherits from ModelIdentityMixin which provides:
15
+ - model_name: str
16
+ - default_model_name: str | None
17
+ - aliases: list[str]
18
+ - _create_client_kwargs(base_url): Helper for creating client configs
19
+ - get_all_names(): All names this model can be referenced by
20
+
21
+ All server configs should inherit from this base class.
22
+ """
23
+
24
+ docker_port: int = 8056
25
+ container_port: int = 8000
26
+ gpu_device_ids: list[str] | None = None
27
+ environment: dict[str, str] = Field(default_factory=dict)
28
+ server_ready_indicators: list[str] = Field(
29
+ default_factory=lambda: [
30
+ "Application startup complete",
31
+ "Uvicorn running",
32
+ "Starting vLLM API server",
33
+ ]
34
+ )
35
+
36
+ class Config:
37
+ extra = "allow"
38
+
39
+ @property
40
+ @abstractmethod
41
+ def client_config(self):
42
+ """Override in subclasses to return appropriate client config."""
43
+ raise NotImplementedError
44
+
45
+ def get_client(self, **kwargs):
46
+ """Get a client instance configured for this server."""
47
+ return self.client_config.get_client(**kwargs)
48
+
49
+ @abstractmethod
50
+ def get_server(self, auto_stop: bool = True):
51
+ """Get a server instance for this configuration."""
52
+ raise NotImplementedError
53
+
54
+ def get_environment(self) -> dict | None:
55
+ """Setup environment variables. Override in subclasses for specific logic."""
56
+ return self.environment if self.environment else None
57
+
58
+ def get_base_url_suffix(self) -> str:
59
+ """Return URL suffix (e.g., '/v1' for OpenAI-compatible APIs). Override in subclasses."""
60
+ return ""
61
+
62
+ def update_command_args(
63
+ self,
64
+ vllm_args: dict | None = None,
65
+ forget_predefined_vllm_args: bool = False,
66
+ ) -> list[str]:
67
+ """Update command arguments. Override in subclasses that support this."""
68
+ _ = vllm_args, forget_predefined_vllm_args
69
+ return []
70
+
71
+
72
+ class BaseServer(ABC):
73
+ """Base class for managing server lifecycle with start/stop methods.
74
+
75
+ All server implementations should inherit from this class.
76
+ """
77
+
78
+ def __init__(self, config: BaseServerConfig, auto_stop: bool = True):
79
+ self.config = config
80
+ self.auto_stop = auto_stop
81
+ self._server_context = None
82
+ self._container = None
83
+ self.base_url = None
84
+
85
+ @abstractmethod
86
+ def _create_server_context(self):
87
+ """Create the appropriate server context. Override in subclasses."""
88
+ raise NotImplementedError
89
+
90
+ def start(self):
91
+ """Start the server."""
92
+ if self._server_context is not None:
93
+ logger.warning("Server already started")
94
+ return self.base_url, self._container
95
+
96
+ self._server_context = self._create_server_context()
97
+ self.base_url, self._container = self._server_context.__enter__()
98
+ logger.info(f"Server started at {self.base_url}")
99
+ if self._container is not None:
100
+ logger.info(f"Container ID: {self._container.id}")
101
+ logger.info(f"Container name: {self._container.name}")
102
+ return self.base_url, self._container
103
+
104
+ def stop(self):
105
+ """Stop the server."""
106
+ if self._server_context is not None:
107
+ try:
108
+ self._server_context.__exit__(None, None, None)
109
+ except Exception as e:
110
+ logger.warning(f"Error during server cleanup: {e}")
111
+ finally:
112
+ self._server_context = None
113
+ self._container = None
114
+ self.base_url = None
115
+ logger.info("Server stopped")
116
+
117
+ def __del__(self):
118
+ """Automatically stop server when object is destroyed if auto_stop is True.
119
+
120
+ Note: This is a fallback mechanism. Prefer using the context manager
121
+ or explicitly calling stop() for reliable cleanup.
122
+ """
123
+ try:
124
+ if self.auto_stop and self._server_context is not None:
125
+ self.stop()
126
+ except Exception:
127
+ pass # Suppress errors during garbage collection