prompture 0.0.29.dev8__py3-none-any.whl → 0.0.35__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. prompture/__init__.py +146 -23
  2. prompture/_version.py +34 -0
  3. prompture/aio/__init__.py +74 -0
  4. prompture/async_conversation.py +607 -0
  5. prompture/async_core.py +803 -0
  6. prompture/async_driver.py +169 -0
  7. prompture/cache.py +469 -0
  8. prompture/callbacks.py +55 -0
  9. prompture/cli.py +63 -4
  10. prompture/conversation.py +631 -0
  11. prompture/core.py +876 -263
  12. prompture/cost_mixin.py +51 -0
  13. prompture/discovery.py +164 -0
  14. prompture/driver.py +168 -5
  15. prompture/drivers/__init__.py +173 -69
  16. prompture/drivers/airllm_driver.py +109 -0
  17. prompture/drivers/async_airllm_driver.py +26 -0
  18. prompture/drivers/async_azure_driver.py +117 -0
  19. prompture/drivers/async_claude_driver.py +107 -0
  20. prompture/drivers/async_google_driver.py +132 -0
  21. prompture/drivers/async_grok_driver.py +91 -0
  22. prompture/drivers/async_groq_driver.py +84 -0
  23. prompture/drivers/async_hugging_driver.py +61 -0
  24. prompture/drivers/async_lmstudio_driver.py +79 -0
  25. prompture/drivers/async_local_http_driver.py +44 -0
  26. prompture/drivers/async_ollama_driver.py +125 -0
  27. prompture/drivers/async_openai_driver.py +96 -0
  28. prompture/drivers/async_openrouter_driver.py +96 -0
  29. prompture/drivers/async_registry.py +129 -0
  30. prompture/drivers/azure_driver.py +36 -9
  31. prompture/drivers/claude_driver.py +251 -34
  32. prompture/drivers/google_driver.py +107 -38
  33. prompture/drivers/grok_driver.py +29 -32
  34. prompture/drivers/groq_driver.py +27 -26
  35. prompture/drivers/hugging_driver.py +6 -6
  36. prompture/drivers/lmstudio_driver.py +26 -13
  37. prompture/drivers/local_http_driver.py +6 -6
  38. prompture/drivers/ollama_driver.py +157 -23
  39. prompture/drivers/openai_driver.py +178 -9
  40. prompture/drivers/openrouter_driver.py +31 -25
  41. prompture/drivers/registry.py +306 -0
  42. prompture/field_definitions.py +106 -96
  43. prompture/logging.py +80 -0
  44. prompture/model_rates.py +217 -0
  45. prompture/runner.py +49 -47
  46. prompture/scaffold/__init__.py +1 -0
  47. prompture/scaffold/generator.py +84 -0
  48. prompture/scaffold/templates/Dockerfile.j2 +12 -0
  49. prompture/scaffold/templates/README.md.j2 +41 -0
  50. prompture/scaffold/templates/config.py.j2 +21 -0
  51. prompture/scaffold/templates/env.example.j2 +8 -0
  52. prompture/scaffold/templates/main.py.j2 +86 -0
  53. prompture/scaffold/templates/models.py.j2 +40 -0
  54. prompture/scaffold/templates/requirements.txt.j2 +5 -0
  55. prompture/server.py +183 -0
  56. prompture/session.py +117 -0
  57. prompture/settings.py +18 -1
  58. prompture/tools.py +219 -267
  59. prompture/tools_schema.py +254 -0
  60. prompture/validator.py +3 -3
  61. {prompture-0.0.29.dev8.dist-info → prompture-0.0.35.dist-info}/METADATA +117 -21
  62. prompture-0.0.35.dist-info/RECORD +66 -0
  63. {prompture-0.0.29.dev8.dist-info → prompture-0.0.35.dist-info}/WHEEL +1 -1
  64. prompture-0.0.29.dev8.dist-info/RECORD +0 -27
  65. {prompture-0.0.29.dev8.dist-info → prompture-0.0.35.dist-info}/entry_points.txt +0 -0
  66. {prompture-0.0.29.dev8.dist-info → prompture-0.0.35.dist-info}/licenses/LICENSE +0 -0
  67. {prompture-0.0.29.dev8.dist-info → prompture-0.0.35.dist-info}/top_level.txt +0 -0
@@ -1,17 +1,27 @@
1
1
  """Minimal OpenAI driver (migrated to openai>=1.0.0).
2
2
  Requires the `openai` package. Uses OPENAI_API_KEY env var.
3
3
  """
4
+
5
+ import json
4
6
  import os
5
- from typing import Any, Dict
7
+ from collections.abc import Iterator
8
+ from typing import Any
9
+
6
10
  try:
7
11
  from openai import OpenAI
8
12
  except Exception:
9
13
  OpenAI = None
10
14
 
15
+ from ..cost_mixin import CostMixin
11
16
  from ..driver import Driver
12
17
 
13
18
 
14
- class OpenAIDriver(Driver):
19
+ class OpenAIDriver(CostMixin, Driver):
20
+ supports_json_mode = True
21
+ supports_json_schema = True
22
+ supports_tool_use = True
23
+ supports_streaming = True
24
+
15
25
  # Approximate pricing per 1K tokens (keep updated with OpenAI's official pricing)
16
26
  # Each model entry also defines which token parameter it supports and
17
27
  # whether it accepts temperature.
@@ -62,7 +72,16 @@ class OpenAIDriver(Driver):
62
72
  else:
63
73
  self.client = None
64
74
 
65
- def generate(self, prompt: str, options: Dict[str, Any]) -> Dict[str, Any]:
75
+ supports_messages = True
76
+
77
+ def generate(self, prompt: str, options: dict[str, Any]) -> dict[str, Any]:
78
+ messages = [{"role": "user", "content": prompt}]
79
+ return self._do_generate(messages, options)
80
+
81
+ def generate_messages(self, messages: list[dict[str, Any]], options: dict[str, Any]) -> dict[str, Any]:
82
+ return self._do_generate(messages, options)
83
+
84
+ def _do_generate(self, messages: list[dict[str, Any]], options: dict[str, Any]) -> dict[str, Any]:
66
85
  if self.client is None:
67
86
  raise RuntimeError("openai package (>=1.0.0) is not installed")
68
87
 
@@ -79,7 +98,7 @@ class OpenAIDriver(Driver):
79
98
  # Base kwargs
80
99
  kwargs = {
81
100
  "model": model,
82
- "messages": [{"role": "user", "content": prompt}],
101
+ "messages": messages,
83
102
  }
84
103
 
85
104
  # Assign token limit with the correct parameter name
@@ -89,6 +108,21 @@ class OpenAIDriver(Driver):
89
108
  if supports_temperature and "temperature" in opts:
90
109
  kwargs["temperature"] = opts["temperature"]
91
110
 
111
+ # Native JSON mode support
112
+ if options.get("json_mode"):
113
+ json_schema = options.get("json_schema")
114
+ if json_schema:
115
+ kwargs["response_format"] = {
116
+ "type": "json_schema",
117
+ "json_schema": {
118
+ "name": "extraction",
119
+ "strict": True,
120
+ "schema": json_schema,
121
+ },
122
+ }
123
+ else:
124
+ kwargs["response_format"] = {"type": "json_object"}
125
+
92
126
  resp = self.client.chat.completions.create(**kwargs)
93
127
 
94
128
  # Extract usage info
@@ -97,11 +131,8 @@ class OpenAIDriver(Driver):
97
131
  completion_tokens = getattr(usage, "completion_tokens", 0)
98
132
  total_tokens = getattr(usage, "total_tokens", 0)
99
133
 
100
- # Calculate cost
101
- model_pricing = self.MODEL_PRICING.get(model, {"prompt": 0, "completion": 0})
102
- prompt_cost = (prompt_tokens / 1000) * model_pricing["prompt"]
103
- completion_cost = (completion_tokens / 1000) * model_pricing["completion"]
104
- total_cost = prompt_cost + completion_cost
134
+ # Calculate cost via shared mixin
135
+ total_cost = self._calculate_cost("openai", model, prompt_tokens, completion_tokens)
105
136
 
106
137
  # Standardized meta object
107
138
  meta = {
@@ -115,3 +146,141 @@ class OpenAIDriver(Driver):
115
146
 
116
147
  text = resp.choices[0].message.content
117
148
  return {"text": text, "meta": meta}
149
+
150
+ # ------------------------------------------------------------------
151
+ # Tool use
152
+ # ------------------------------------------------------------------
153
+
154
+ def generate_messages_with_tools(
155
+ self,
156
+ messages: list[dict[str, Any]],
157
+ tools: list[dict[str, Any]],
158
+ options: dict[str, Any],
159
+ ) -> dict[str, Any]:
160
+ """Generate a response that may include tool calls."""
161
+ if self.client is None:
162
+ raise RuntimeError("openai package (>=1.0.0) is not installed")
163
+
164
+ model = options.get("model", self.model)
165
+ model_info = self.MODEL_PRICING.get(model, {})
166
+ tokens_param = model_info.get("tokens_param", "max_tokens")
167
+ supports_temperature = model_info.get("supports_temperature", True)
168
+
169
+ opts = {"temperature": 1.0, "max_tokens": 512, **options}
170
+
171
+ kwargs: dict[str, Any] = {
172
+ "model": model,
173
+ "messages": messages,
174
+ "tools": tools,
175
+ }
176
+ kwargs[tokens_param] = opts.get("max_tokens", 512)
177
+
178
+ if supports_temperature and "temperature" in opts:
179
+ kwargs["temperature"] = opts["temperature"]
180
+
181
+ resp = self.client.chat.completions.create(**kwargs)
182
+
183
+ usage = getattr(resp, "usage", None)
184
+ prompt_tokens = getattr(usage, "prompt_tokens", 0)
185
+ completion_tokens = getattr(usage, "completion_tokens", 0)
186
+ total_tokens = getattr(usage, "total_tokens", 0)
187
+ total_cost = self._calculate_cost("openai", model, prompt_tokens, completion_tokens)
188
+
189
+ meta = {
190
+ "prompt_tokens": prompt_tokens,
191
+ "completion_tokens": completion_tokens,
192
+ "total_tokens": total_tokens,
193
+ "cost": round(total_cost, 6),
194
+ "raw_response": resp.model_dump(),
195
+ "model_name": model,
196
+ }
197
+
198
+ choice = resp.choices[0]
199
+ text = choice.message.content or ""
200
+ stop_reason = choice.finish_reason
201
+
202
+ tool_calls_out: list[dict[str, Any]] = []
203
+ if choice.message.tool_calls:
204
+ for tc in choice.message.tool_calls:
205
+ try:
206
+ args = json.loads(tc.function.arguments)
207
+ except (json.JSONDecodeError, TypeError):
208
+ args = {}
209
+ tool_calls_out.append({
210
+ "id": tc.id,
211
+ "name": tc.function.name,
212
+ "arguments": args,
213
+ })
214
+
215
+ return {
216
+ "text": text,
217
+ "meta": meta,
218
+ "tool_calls": tool_calls_out,
219
+ "stop_reason": stop_reason,
220
+ }
221
+
222
+ # ------------------------------------------------------------------
223
+ # Streaming
224
+ # ------------------------------------------------------------------
225
+
226
+ def generate_messages_stream(
227
+ self,
228
+ messages: list[dict[str, Any]],
229
+ options: dict[str, Any],
230
+ ) -> Iterator[dict[str, Any]]:
231
+ """Yield response chunks via OpenAI streaming API."""
232
+ if self.client is None:
233
+ raise RuntimeError("openai package (>=1.0.0) is not installed")
234
+
235
+ model = options.get("model", self.model)
236
+ model_info = self.MODEL_PRICING.get(model, {})
237
+ tokens_param = model_info.get("tokens_param", "max_tokens")
238
+ supports_temperature = model_info.get("supports_temperature", True)
239
+
240
+ opts = {"temperature": 1.0, "max_tokens": 512, **options}
241
+
242
+ kwargs: dict[str, Any] = {
243
+ "model": model,
244
+ "messages": messages,
245
+ "stream": True,
246
+ "stream_options": {"include_usage": True},
247
+ }
248
+ kwargs[tokens_param] = opts.get("max_tokens", 512)
249
+
250
+ if supports_temperature and "temperature" in opts:
251
+ kwargs["temperature"] = opts["temperature"]
252
+
253
+ stream = self.client.chat.completions.create(**kwargs)
254
+
255
+ full_text = ""
256
+ prompt_tokens = 0
257
+ completion_tokens = 0
258
+
259
+ for chunk in stream:
260
+ # Usage comes in the final chunk
261
+ if getattr(chunk, "usage", None):
262
+ prompt_tokens = chunk.usage.prompt_tokens or 0
263
+ completion_tokens = chunk.usage.completion_tokens or 0
264
+
265
+ if chunk.choices:
266
+ delta = chunk.choices[0].delta
267
+ content = getattr(delta, "content", None) or ""
268
+ if content:
269
+ full_text += content
270
+ yield {"type": "delta", "text": content}
271
+
272
+ total_tokens = prompt_tokens + completion_tokens
273
+ total_cost = self._calculate_cost("openai", model, prompt_tokens, completion_tokens)
274
+
275
+ yield {
276
+ "type": "done",
277
+ "text": full_text,
278
+ "meta": {
279
+ "prompt_tokens": prompt_tokens,
280
+ "completion_tokens": completion_tokens,
281
+ "total_tokens": total_tokens,
282
+ "cost": round(total_cost, 6),
283
+ "raw_response": {},
284
+ "model_name": model,
285
+ },
286
+ }
@@ -1,14 +1,19 @@
1
1
  """OpenRouter driver implementation.
2
2
  Requires the `requests` package. Uses OPENROUTER_API_KEY env var.
3
3
  """
4
+
4
5
  import os
5
- from typing import Any, Dict
6
+ from typing import Any
7
+
6
8
  import requests
7
9
 
10
+ from ..cost_mixin import CostMixin
8
11
  from ..driver import Driver
9
12
 
10
13
 
11
- class OpenRouterDriver(Driver):
14
+ class OpenRouterDriver(CostMixin, Driver):
15
+ supports_json_mode = True
16
+
12
17
  # Approximate pricing per 1K tokens based on OpenRouter's pricing
13
18
  # https://openrouter.ai/docs#pricing
14
19
  MODEL_PRICING = {
@@ -40,7 +45,7 @@ class OpenRouterDriver(Driver):
40
45
 
41
46
  def __init__(self, api_key: str | None = None, model: str = "openai/gpt-3.5-turbo"):
42
47
  """Initialize OpenRouter driver.
43
-
48
+
44
49
  Args:
45
50
  api_key: OpenRouter API key. If not provided, will look for OPENROUTER_API_KEY env var
46
51
  model: Model to use. Defaults to openai/gpt-3.5-turbo
@@ -48,10 +53,10 @@ class OpenRouterDriver(Driver):
48
53
  self.api_key = api_key or os.getenv("OPENROUTER_API_KEY")
49
54
  if not self.api_key:
50
55
  raise ValueError("OpenRouter API key not found. Set OPENROUTER_API_KEY env var.")
51
-
56
+
52
57
  self.model = model
53
58
  self.base_url = "https://openrouter.ai/api/v1"
54
-
59
+
55
60
  # Required headers for OpenRouter
56
61
  self.headers = {
57
62
  "Authorization": f"Bearer {self.api_key}",
@@ -59,21 +64,21 @@ class OpenRouterDriver(Driver):
59
64
  "Content-Type": "application/json",
60
65
  }
61
66
 
62
- def generate(self, prompt: str, options: Dict[str, Any]) -> Dict[str, Any]:
63
- """Generate completion using OpenRouter API.
64
-
65
- Args:
66
- prompt: The prompt text
67
- options: Generation options
68
-
69
- Returns:
70
- Dict containing generated text and metadata
71
- """
67
+ supports_messages = True
68
+
69
+ def generate(self, prompt: str, options: dict[str, Any]) -> dict[str, Any]:
70
+ messages = [{"role": "user", "content": prompt}]
71
+ return self._do_generate(messages, options)
72
+
73
+ def generate_messages(self, messages: list[dict[str, str]], options: dict[str, Any]) -> dict[str, Any]:
74
+ return self._do_generate(messages, options)
75
+
76
+ def _do_generate(self, messages: list[dict[str, str]], options: dict[str, Any]) -> dict[str, Any]:
72
77
  if not self.api_key:
73
78
  raise RuntimeError("OpenRouter API key not found")
74
79
 
75
80
  model = options.get("model", self.model)
76
-
81
+
77
82
  # Lookup model-specific config
78
83
  model_info = self.MODEL_PRICING.get(model, {})
79
84
  tokens_param = model_info.get("tokens_param", "max_tokens")
@@ -85,7 +90,7 @@ class OpenRouterDriver(Driver):
85
90
  # Base request data
86
91
  data = {
87
92
  "model": model,
88
- "messages": [{"role": "user", "content": prompt}],
93
+ "messages": messages,
89
94
  }
90
95
 
91
96
  # Add token limit with correct parameter name
@@ -95,6 +100,10 @@ class OpenRouterDriver(Driver):
95
100
  if supports_temperature and "temperature" in opts:
96
101
  data["temperature"] = opts["temperature"]
97
102
 
103
+ # Native JSON mode support
104
+ if options.get("json_mode"):
105
+ data["response_format"] = {"type": "json_object"}
106
+
98
107
  try:
99
108
  response = requests.post(
100
109
  f"{self.base_url}/chat/completions",
@@ -110,11 +119,8 @@ class OpenRouterDriver(Driver):
110
119
  completion_tokens = usage.get("completion_tokens", 0)
111
120
  total_tokens = usage.get("total_tokens", 0)
112
121
 
113
- # Calculate cost
114
- model_pricing = self.MODEL_PRICING.get(model, {"prompt": 0, "completion": 0})
115
- prompt_cost = (prompt_tokens / 1000) * model_pricing["prompt"]
116
- completion_cost = (completion_tokens / 1000) * model_pricing["completion"]
117
- total_cost = prompt_cost + completion_cost
122
+ # Calculate cost via shared mixin
123
+ total_cost = self._calculate_cost("openrouter", model, prompt_tokens, completion_tokens)
118
124
 
119
125
  # Standardized meta object
120
126
  meta = {
@@ -130,11 +136,11 @@ class OpenRouterDriver(Driver):
130
136
  return {"text": text, "meta": meta}
131
137
 
132
138
  except requests.exceptions.RequestException as e:
133
- error_msg = f"OpenRouter API request failed: {str(e)}"
134
- if hasattr(e.response, 'json'):
139
+ error_msg = f"OpenRouter API request failed: {e!s}"
140
+ if hasattr(e.response, "json"):
135
141
  try:
136
142
  error_details = e.response.json()
137
143
  error_msg = f"{error_msg} - {error_details.get('error', {}).get('message', '')}"
138
144
  except Exception:
139
145
  pass
140
- raise RuntimeError(error_msg) from e
146
+ raise RuntimeError(error_msg) from e
@@ -0,0 +1,306 @@
1
+ """Driver registry with plugin support.
2
+
3
+ This module provides a public API for registering custom drivers and
4
+ supports auto-discovery of drivers via Python entry points.
5
+
6
+ Example usage:
7
+ # Register a custom driver
8
+ from prompture import register_driver
9
+
10
+ def my_driver_factory(model=None):
11
+ return MyCustomDriver(model=model)
12
+
13
+ register_driver("my_provider", my_driver_factory)
14
+
15
+ # Now you can use it
16
+ driver = get_driver_for_model("my_provider/my-model")
17
+
18
+ For entry point discovery, add to your package's pyproject.toml:
19
+ [project.entry-points."prompture.drivers"]
20
+ my_provider = "my_package.drivers:my_driver_factory"
21
+
22
+ [project.entry-points."prompture.async_drivers"]
23
+ my_provider = "my_package.drivers:my_async_driver_factory"
24
+ """
25
+
26
+ from __future__ import annotations
27
+
28
+ import logging
29
+ import sys
30
+ from typing import Callable
31
+
32
+ logger = logging.getLogger("prompture.drivers.registry")
33
+
34
+ # Type alias for driver factory functions
35
+ # A factory takes an optional model name and returns a driver instance
36
+ DriverFactory = Callable[[str | None], object]
37
+
38
+ # Internal registries - populated by built-in drivers and plugins
39
+ _SYNC_REGISTRY: dict[str, DriverFactory] = {}
40
+ _ASYNC_REGISTRY: dict[str, DriverFactory] = {}
41
+
42
+ # Track whether entry points have been loaded
43
+ _entry_points_loaded = False
44
+
45
+
46
+ def register_driver(name: str, factory: DriverFactory, *, overwrite: bool = False) -> None:
47
+ """Register a custom driver factory for a provider name.
48
+
49
+ Args:
50
+ name: Provider name (e.g., "my_provider"). Will be lowercased.
51
+ factory: A callable that takes an optional model name and returns
52
+ a driver instance. The driver must implement the
53
+ ``Driver`` interface (specifically ``generate()``).
54
+ overwrite: If True, allow overwriting an existing registration.
55
+ Defaults to False.
56
+
57
+ Raises:
58
+ ValueError: If a driver with this name is already registered
59
+ and overwrite=False.
60
+
61
+ Example:
62
+ >>> def my_factory(model=None):
63
+ ... return MyDriver(model=model or "default-model")
64
+ >>> register_driver("my_provider", my_factory)
65
+ >>> driver = get_driver_for_model("my_provider/custom-model")
66
+ """
67
+ name = name.lower()
68
+ if name in _SYNC_REGISTRY and not overwrite:
69
+ raise ValueError(f"Driver '{name}' is already registered. Use overwrite=True to replace it.")
70
+ _SYNC_REGISTRY[name] = factory
71
+ logger.debug("Registered sync driver: %s", name)
72
+
73
+
74
+ def register_async_driver(name: str, factory: DriverFactory, *, overwrite: bool = False) -> None:
75
+ """Register a custom async driver factory for a provider name.
76
+
77
+ Args:
78
+ name: Provider name (e.g., "my_provider"). Will be lowercased.
79
+ factory: A callable that takes an optional model name and returns
80
+ an async driver instance. The driver must implement the
81
+ ``AsyncDriver`` interface (specifically ``async generate()``).
82
+ overwrite: If True, allow overwriting an existing registration.
83
+ Defaults to False.
84
+
85
+ Raises:
86
+ ValueError: If an async driver with this name is already registered
87
+ and overwrite=False.
88
+
89
+ Example:
90
+ >>> def my_async_factory(model=None):
91
+ ... return MyAsyncDriver(model=model or "default-model")
92
+ >>> register_async_driver("my_provider", my_async_factory)
93
+ >>> driver = get_async_driver_for_model("my_provider/custom-model")
94
+ """
95
+ name = name.lower()
96
+ if name in _ASYNC_REGISTRY and not overwrite:
97
+ raise ValueError(f"Async driver '{name}' is already registered. Use overwrite=True to replace it.")
98
+ _ASYNC_REGISTRY[name] = factory
99
+ logger.debug("Registered async driver: %s", name)
100
+
101
+
102
+ def unregister_driver(name: str) -> bool:
103
+ """Unregister a sync driver by name.
104
+
105
+ Args:
106
+ name: Provider name to unregister.
107
+
108
+ Returns:
109
+ True if the driver was unregistered, False if it wasn't registered.
110
+ """
111
+ name = name.lower()
112
+ if name in _SYNC_REGISTRY:
113
+ del _SYNC_REGISTRY[name]
114
+ logger.debug("Unregistered sync driver: %s", name)
115
+ return True
116
+ return False
117
+
118
+
119
+ def unregister_async_driver(name: str) -> bool:
120
+ """Unregister an async driver by name.
121
+
122
+ Args:
123
+ name: Provider name to unregister.
124
+
125
+ Returns:
126
+ True if the driver was unregistered, False if it wasn't registered.
127
+ """
128
+ name = name.lower()
129
+ if name in _ASYNC_REGISTRY:
130
+ del _ASYNC_REGISTRY[name]
131
+ logger.debug("Unregistered async driver: %s", name)
132
+ return True
133
+ return False
134
+
135
+
136
+ def list_registered_drivers() -> list[str]:
137
+ """Return a sorted list of registered sync driver names."""
138
+ _ensure_entry_points_loaded()
139
+ return sorted(_SYNC_REGISTRY.keys())
140
+
141
+
142
+ def list_registered_async_drivers() -> list[str]:
143
+ """Return a sorted list of registered async driver names."""
144
+ _ensure_entry_points_loaded()
145
+ return sorted(_ASYNC_REGISTRY.keys())
146
+
147
+
148
+ def is_driver_registered(name: str) -> bool:
149
+ """Check if a sync driver is registered.
150
+
151
+ Args:
152
+ name: Provider name to check.
153
+
154
+ Returns:
155
+ True if the driver is registered.
156
+ """
157
+ _ensure_entry_points_loaded()
158
+ return name.lower() in _SYNC_REGISTRY
159
+
160
+
161
+ def is_async_driver_registered(name: str) -> bool:
162
+ """Check if an async driver is registered.
163
+
164
+ Args:
165
+ name: Provider name to check.
166
+
167
+ Returns:
168
+ True if the async driver is registered.
169
+ """
170
+ _ensure_entry_points_loaded()
171
+ return name.lower() in _ASYNC_REGISTRY
172
+
173
+
174
+ def get_driver_factory(name: str) -> DriverFactory:
175
+ """Get a registered sync driver factory by name.
176
+
177
+ Args:
178
+ name: Provider name.
179
+
180
+ Returns:
181
+ The factory function.
182
+
183
+ Raises:
184
+ ValueError: If the driver is not registered.
185
+ """
186
+ _ensure_entry_points_loaded()
187
+ name = name.lower()
188
+ if name not in _SYNC_REGISTRY:
189
+ raise ValueError(f"Unsupported provider '{name}'")
190
+ return _SYNC_REGISTRY[name]
191
+
192
+
193
+ def get_async_driver_factory(name: str) -> DriverFactory:
194
+ """Get a registered async driver factory by name.
195
+
196
+ Args:
197
+ name: Provider name.
198
+
199
+ Returns:
200
+ The factory function.
201
+
202
+ Raises:
203
+ ValueError: If the async driver is not registered.
204
+ """
205
+ _ensure_entry_points_loaded()
206
+ name = name.lower()
207
+ if name not in _ASYNC_REGISTRY:
208
+ raise ValueError(f"Unsupported provider '{name}'")
209
+ return _ASYNC_REGISTRY[name]
210
+
211
+
212
+ def load_entry_point_drivers() -> tuple[int, int]:
213
+ """Load drivers from installed packages via entry points.
214
+
215
+ This function scans for packages that define entry points in the
216
+ ``prompture.drivers`` and ``prompture.async_drivers`` groups.
217
+
218
+ Returns:
219
+ A tuple of (sync_count, async_count) indicating how many drivers
220
+ were loaded from entry points.
221
+
222
+ Example pyproject.toml for a plugin package:
223
+ [project.entry-points."prompture.drivers"]
224
+ my_provider = "my_package.drivers:create_my_driver"
225
+
226
+ [project.entry-points."prompture.async_drivers"]
227
+ my_provider = "my_package.drivers:create_my_async_driver"
228
+ """
229
+ global _entry_points_loaded
230
+
231
+ sync_count = 0
232
+ async_count = 0
233
+
234
+ # Python 3.9+ has importlib.metadata in stdlib
235
+ # Python 3.8 needs importlib_metadata backport
236
+ if sys.version_info >= (3, 10):
237
+ from importlib.metadata import entry_points
238
+
239
+ sync_eps = entry_points(group="prompture.drivers")
240
+ async_eps = entry_points(group="prompture.async_drivers")
241
+ else:
242
+ from importlib.metadata import entry_points
243
+
244
+ all_eps = entry_points()
245
+ sync_eps = all_eps.get("prompture.drivers", [])
246
+ async_eps = all_eps.get("prompture.async_drivers", [])
247
+
248
+ # Load sync drivers
249
+ for ep in sync_eps:
250
+ try:
251
+ # Skip if already registered (built-in drivers take precedence)
252
+ if ep.name.lower() in _SYNC_REGISTRY:
253
+ logger.debug("Skipping entry point driver '%s' (already registered)", ep.name)
254
+ continue
255
+
256
+ factory = ep.load()
257
+ _SYNC_REGISTRY[ep.name.lower()] = factory
258
+ sync_count += 1
259
+ logger.info("Loaded sync driver from entry point: %s", ep.name)
260
+ except Exception:
261
+ logger.exception("Failed to load sync driver entry point: %s", ep.name)
262
+
263
+ # Load async drivers
264
+ for ep in async_eps:
265
+ try:
266
+ # Skip if already registered (built-in drivers take precedence)
267
+ if ep.name.lower() in _ASYNC_REGISTRY:
268
+ logger.debug("Skipping entry point async driver '%s' (already registered)", ep.name)
269
+ continue
270
+
271
+ factory = ep.load()
272
+ _ASYNC_REGISTRY[ep.name.lower()] = factory
273
+ async_count += 1
274
+ logger.info("Loaded async driver from entry point: %s", ep.name)
275
+ except Exception:
276
+ logger.exception("Failed to load async driver entry point: %s", ep.name)
277
+
278
+ _entry_points_loaded = True
279
+ return (sync_count, async_count)
280
+
281
+
282
+ def _ensure_entry_points_loaded() -> None:
283
+ """Ensure entry points have been loaded (lazy initialization)."""
284
+ global _entry_points_loaded
285
+ if not _entry_points_loaded:
286
+ load_entry_point_drivers()
287
+
288
+
289
+ def _get_sync_registry() -> dict[str, DriverFactory]:
290
+ """Get the internal sync registry dict (for internal use by drivers/__init__.py)."""
291
+ _ensure_entry_points_loaded()
292
+ return _SYNC_REGISTRY
293
+
294
+
295
+ def _get_async_registry() -> dict[str, DriverFactory]:
296
+ """Get the internal async registry dict (for internal use by drivers/async_registry.py)."""
297
+ _ensure_entry_points_loaded()
298
+ return _ASYNC_REGISTRY
299
+
300
+
301
+ def _reset_registries() -> None:
302
+ """Reset registries to empty state (for testing only)."""
303
+ global _entry_points_loaded
304
+ _SYNC_REGISTRY.clear()
305
+ _ASYNC_REGISTRY.clear()
306
+ _entry_points_loaded = False