prompture 0.0.38.dev3__py3-none-any.whl → 0.0.39.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
prompture/__init__.py CHANGED
@@ -111,7 +111,13 @@ from .image import (
111
111
  make_image,
112
112
  )
113
113
  from .logging import JSONFormatter, configure_logging
114
- from .model_rates import get_model_info, get_model_rates, refresh_rates_cache
114
+ from .model_rates import (
115
+ ModelCapabilities,
116
+ get_model_capabilities,
117
+ get_model_info,
118
+ get_model_rates,
119
+ refresh_rates_cache,
120
+ )
115
121
  from .persistence import ConversationStore
116
122
  from .persona import (
117
123
  PERSONAS,
@@ -213,6 +219,7 @@ __all__ = [
213
219
  "LocalHTTPDriver",
214
220
  "LoopGroup",
215
221
  "MemoryCacheBackend",
222
+ "ModelCapabilities",
216
223
  "ModelRetry",
217
224
  "OllamaDriver",
218
225
  "OpenAIDriver",
@@ -255,6 +262,7 @@ __all__ = [
255
262
  "get_driver_for_model",
256
263
  "get_field_definition",
257
264
  "get_field_names",
265
+ "get_model_capabilities",
258
266
  "get_model_info",
259
267
  "get_model_rates",
260
268
  "get_persona",
prompture/_version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '0.0.38.dev3'
32
- __version_tuple__ = version_tuple = (0, 0, 38, 'dev3')
31
+ __version__ = version = '0.0.39.dev1'
32
+ __version_tuple__ = version_tuple = (0, 0, 39, 'dev1')
33
33
 
34
34
  __commit_id__ = commit_id = None
prompture/async_driver.py CHANGED
@@ -166,6 +166,45 @@ class AsyncDriver:
166
166
  except Exception:
167
167
  logger.exception("Callback %s raised an exception", event)
168
168
 
169
+ def _validate_model_capabilities(
170
+ self,
171
+ provider: str,
172
+ model: str,
173
+ *,
174
+ using_tool_use: bool = False,
175
+ using_json_schema: bool = False,
176
+ using_vision: bool = False,
177
+ ) -> None:
178
+ """Log warnings when the model may not support a requested feature.
179
+
180
+ Uses models.dev metadata as a secondary signal. Warnings only — the
181
+ API is the final authority and models.dev data may be stale.
182
+ """
183
+ from .model_rates import get_model_capabilities
184
+
185
+ caps = get_model_capabilities(provider, model)
186
+ if caps is None:
187
+ return
188
+
189
+ if using_tool_use and caps.supports_tool_use is False:
190
+ logger.warning(
191
+ "Model %s/%s may not support tool use according to models.dev metadata",
192
+ provider,
193
+ model,
194
+ )
195
+ if using_json_schema and caps.supports_structured_output is False:
196
+ logger.warning(
197
+ "Model %s/%s may not support structured output / JSON schema according to models.dev metadata",
198
+ provider,
199
+ model,
200
+ )
201
+ if using_vision and caps.supports_vision is False:
202
+ logger.warning(
203
+ "Model %s/%s may not support vision/image inputs according to models.dev metadata",
204
+ provider,
205
+ model,
206
+ )
207
+
169
208
  def _check_vision_support(self, messages: list[dict[str, Any]]) -> None:
170
209
  """Raise if messages contain image blocks and the driver lacks vision support."""
171
210
  if self.supports_vision:
prompture/cost_mixin.py CHANGED
@@ -49,3 +49,40 @@ class CostMixin:
49
49
  completion_cost = (completion_tokens / unit) * model_pricing["completion"]
50
50
 
51
51
  return round(prompt_cost + completion_cost, 6)
52
+
53
+ def _get_model_config(self, provider: str, model: str) -> dict[str, Any]:
54
+ """Merge live models.dev capabilities with hardcoded ``MODEL_PRICING``.
55
+
56
+ Returns a dict with:
57
+ - ``tokens_param`` — always from hardcoded ``MODEL_PRICING`` (API-specific)
58
+ - ``supports_temperature`` — prefers live data, falls back to hardcoded, default ``True``
59
+ - ``context_window`` — from live data only (``None`` if unavailable)
60
+ - ``max_output_tokens`` — from live data only (``None`` if unavailable)
61
+ """
62
+ from .model_rates import get_model_capabilities
63
+
64
+ hardcoded = self.MODEL_PRICING.get(model, {})
65
+
66
+ # tokens_param is always from hardcoded config (API-specific, not in models.dev)
67
+ tokens_param = hardcoded.get("tokens_param", "max_tokens")
68
+
69
+ # Start with hardcoded supports_temperature, default True
70
+ supports_temperature = hardcoded.get("supports_temperature", True)
71
+
72
+ context_window: int | None = None
73
+ max_output_tokens: int | None = None
74
+
75
+ # Override with live data when available
76
+ caps = get_model_capabilities(provider, model)
77
+ if caps is not None:
78
+ if caps.supports_temperature is not None:
79
+ supports_temperature = caps.supports_temperature
80
+ context_window = caps.context_window
81
+ max_output_tokens = caps.max_output_tokens
82
+
83
+ return {
84
+ "tokens_param": tokens_param,
85
+ "supports_temperature": supports_temperature,
86
+ "context_window": context_window,
87
+ "max_output_tokens": max_output_tokens,
88
+ }
prompture/discovery.py CHANGED
@@ -1,7 +1,11 @@
1
1
  """Discovery module for auto-detecting available models."""
2
2
 
3
+ from __future__ import annotations
4
+
5
+ import dataclasses
3
6
  import logging
4
7
  import os
8
+ from typing import Any, overload
5
9
 
6
10
  import requests
7
11
 
@@ -22,23 +26,34 @@ from .settings import settings
22
26
  logger = logging.getLogger(__name__)
23
27
 
24
28
 
25
- def get_available_models() -> list[str]:
26
- """
27
- Auto-detects all available models based on configured drivers and environment variables.
29
+ @overload
30
+ def get_available_models(*, include_capabilities: bool = False) -> list[str]: ...
31
+
32
+
33
+ @overload
34
+ def get_available_models(*, include_capabilities: bool = True) -> list[dict[str, Any]]: ...
28
35
 
29
- Iterates through supported providers and checks if they are configured (e.g. API key present).
30
- For static drivers, returns models from their MODEL_PRICING keys.
31
- For dynamic drivers (like Ollama), attempts to fetch available models from the endpoint.
36
+
37
+ def get_available_models(*, include_capabilities: bool = False) -> list[str] | list[dict[str, Any]]:
38
+ """Auto-detect available models based on configured drivers and environment variables.
39
+
40
+ Iterates through supported providers and checks if they are configured
41
+ (e.g. API key present). For static drivers, returns models from their
42
+ ``MODEL_PRICING`` keys. For dynamic drivers (like Ollama), attempts to
43
+ fetch available models from the endpoint.
44
+
45
+ Args:
46
+ include_capabilities: When ``True``, return enriched dicts with
47
+ ``model``, ``provider``, ``model_id``, and ``capabilities``
48
+ fields instead of plain ``"provider/model_id"`` strings.
32
49
 
33
50
  Returns:
34
- A list of unique model strings in the format "provider/model_id".
51
+ A sorted list of unique model strings (default) or enriched dicts.
35
52
  """
36
53
  available_models: set[str] = set()
37
54
  configured_providers: set[str] = set()
38
55
 
39
56
  # Map of provider name to driver class
40
- # We need to map the registry keys to the actual classes to check MODEL_PRICING
41
- # and instantiate for dynamic checks if needed.
42
57
  provider_classes = {
43
58
  "openai": OpenAIDriver,
44
59
  "azure": AzureDriver,
@@ -54,11 +69,6 @@ def get_available_models() -> list[str]:
54
69
 
55
70
  for provider, driver_cls in provider_classes.items():
56
71
  try:
57
- # 1. Check if the provider is configured (has API key or endpoint)
58
- # We can check this by looking at the settings or env vars that the driver uses.
59
- # A simple way is to try to instantiate it with defaults, but that might fail if keys are missing.
60
- # Instead, let's check the specific requirements for each known provider.
61
-
62
72
  is_configured = False
63
73
 
64
74
  if provider == "openai":
@@ -87,13 +97,10 @@ def get_available_models() -> list[str]:
87
97
  if settings.grok_api_key or os.getenv("GROK_API_KEY"):
88
98
  is_configured = True
89
99
  elif provider == "ollama":
90
- # Ollama is always considered "configured" as it defaults to localhost
91
- # We will check connectivity later
92
100
  is_configured = True
93
101
  elif provider == "lmstudio":
94
- # LM Studio is similar to Ollama, defaults to localhost
95
102
  is_configured = True
96
- elif provider == "local_http" and (settings.local_http_endpoint or os.getenv("LOCAL_HTTP_ENDPOINT")):
103
+ elif provider == "local_http" and os.getenv("LOCAL_HTTP_ENDPOINT"):
97
104
  is_configured = True
98
105
 
99
106
  if not is_configured:
@@ -101,36 +108,20 @@ def get_available_models() -> list[str]:
101
108
 
102
109
  configured_providers.add(provider)
103
110
 
104
- # 2. Static Detection: Get models from MODEL_PRICING
111
+ # Static Detection: Get models from MODEL_PRICING
105
112
  if hasattr(driver_cls, "MODEL_PRICING"):
106
113
  pricing = driver_cls.MODEL_PRICING
107
114
  for model_id in pricing:
108
- # Skip "default" or generic keys if they exist
109
115
  if model_id == "default":
110
116
  continue
111
-
112
- # For Azure, the model_id in pricing is usually the base model name,
113
- # but the user needs to use the deployment ID.
114
- # However, our Azure driver implementation uses the deployment_id from init
115
- # as the "model" for the request, but expects the user to pass a model name
116
- # that maps to pricing?
117
- # Looking at AzureDriver:
118
- # kwargs = {"model": self.deployment_id, ...}
119
- # model = options.get("model", self.model) -> used for pricing lookup
120
- # So we should list the keys in MODEL_PRICING as available "models"
121
- # even though for Azure specifically it's a bit weird because of deployment IDs.
122
- # But for general discovery, listing supported models is correct.
123
-
124
117
  available_models.add(f"{provider}/{model_id}")
125
118
 
126
- # 3. Dynamic Detection: Specific logic for Ollama
119
+ # Dynamic Detection: Specific logic for Ollama
127
120
  if provider == "ollama":
128
121
  try:
129
122
  endpoint = settings.ollama_endpoint or os.getenv(
130
123
  "OLLAMA_ENDPOINT", "http://localhost:11434/api/generate"
131
124
  )
132
- # We need the base URL for tags, usually http://localhost:11434/api/tags
133
- # The configured endpoint might be .../api/generate or .../api/chat
134
125
  base_url = endpoint.split("/api/")[0]
135
126
  tags_url = f"{base_url}/api/tags"
136
127
 
@@ -141,8 +132,6 @@ def get_available_models() -> list[str]:
141
132
  for model in models:
142
133
  name = model.get("name")
143
134
  if name:
144
- # Ollama model names often include tags like "llama3:latest"
145
- # We can keep them as is.
146
135
  available_models.add(f"ollama/{name}")
147
136
  except Exception as e:
148
137
  logger.debug(f"Failed to fetch Ollama models: {e}")
@@ -184,4 +173,30 @@ def get_available_models() -> list[str]:
184
173
  for model_id in get_all_provider_models(api_name):
185
174
  available_models.add(f"{prompture_name}/{model_id}")
186
175
 
187
- return sorted(list(available_models))
176
+ sorted_models = sorted(available_models)
177
+
178
+ if not include_capabilities:
179
+ return sorted_models
180
+
181
+ # Build enriched dicts with capabilities from models.dev
182
+ from .model_rates import get_model_capabilities
183
+
184
+ enriched: list[dict[str, Any]] = []
185
+ for model_str in sorted_models:
186
+ parts = model_str.split("/", 1)
187
+ provider = parts[0]
188
+ model_id = parts[1] if len(parts) > 1 else parts[0]
189
+
190
+ caps = get_model_capabilities(provider, model_id)
191
+ caps_dict = dataclasses.asdict(caps) if caps is not None else None
192
+
193
+ enriched.append(
194
+ {
195
+ "model": model_str,
196
+ "provider": provider,
197
+ "model_id": model_id,
198
+ "capabilities": caps_dict,
199
+ }
200
+ )
201
+
202
+ return enriched
prompture/driver.py CHANGED
@@ -173,6 +173,45 @@ class Driver:
173
173
  except Exception:
174
174
  logger.exception("Callback %s raised an exception", event)
175
175
 
176
+ def _validate_model_capabilities(
177
+ self,
178
+ provider: str,
179
+ model: str,
180
+ *,
181
+ using_tool_use: bool = False,
182
+ using_json_schema: bool = False,
183
+ using_vision: bool = False,
184
+ ) -> None:
185
+ """Log warnings when the model may not support a requested feature.
186
+
187
+ Uses models.dev metadata as a secondary signal. Warnings only — the
188
+ API is the final authority and models.dev data may be stale.
189
+ """
190
+ from .model_rates import get_model_capabilities
191
+
192
+ caps = get_model_capabilities(provider, model)
193
+ if caps is None:
194
+ return
195
+
196
+ if using_tool_use and caps.supports_tool_use is False:
197
+ logger.warning(
198
+ "Model %s/%s may not support tool use according to models.dev metadata",
199
+ provider,
200
+ model,
201
+ )
202
+ if using_json_schema and caps.supports_structured_output is False:
203
+ logger.warning(
204
+ "Model %s/%s may not support structured output / JSON schema according to models.dev metadata",
205
+ provider,
206
+ model,
207
+ )
208
+ if using_vision and caps.supports_vision is False:
209
+ logger.warning(
210
+ "Model %s/%s may not support vision/image inputs according to models.dev metadata",
211
+ provider,
212
+ model,
213
+ )
214
+
176
215
  def _check_vision_support(self, messages: list[dict[str, Any]]) -> None:
177
216
  """Raise if messages contain image blocks and the driver lacks vision support."""
178
217
  if self.supports_vision:
@@ -70,9 +70,9 @@ class AsyncAzureDriver(CostMixin, AsyncDriver):
70
70
  raise RuntimeError("openai package (>=1.0.0) with AsyncAzureOpenAI not installed")
71
71
 
72
72
  model = options.get("model", self.model)
73
- model_info = self.MODEL_PRICING.get(model, {})
74
- tokens_param = model_info.get("tokens_param", "max_tokens")
75
- supports_temperature = model_info.get("supports_temperature", True)
73
+ model_config = self._get_model_config("azure", model)
74
+ tokens_param = model_config["tokens_param"]
75
+ supports_temperature = model_config["supports_temperature"]
76
76
 
77
77
  opts = {"temperature": 1.0, "max_tokens": 512, **options}
78
78
 
@@ -51,6 +51,13 @@ class AsyncClaudeDriver(CostMixin, AsyncDriver):
51
51
  opts = {**{"temperature": 0.0, "max_tokens": 512}, **options}
52
52
  model = options.get("model", self.model)
53
53
 
54
+ # Validate capabilities against models.dev metadata
55
+ self._validate_model_capabilities(
56
+ "claude",
57
+ model,
58
+ using_json_schema=bool(options.get("json_schema")),
59
+ )
60
+
54
61
  client = anthropic.AsyncAnthropic(api_key=self.api_key)
55
62
 
56
63
  # Anthropic requires system messages as a top-level parameter
@@ -142,6 +149,9 @@ class AsyncClaudeDriver(CostMixin, AsyncDriver):
142
149
 
143
150
  opts = {**{"temperature": 0.0, "max_tokens": 512}, **options}
144
151
  model = options.get("model", self.model)
152
+
153
+ self._validate_model_capabilities("claude", model, using_tool_use=True)
154
+
145
155
  client = anthropic.AsyncAnthropic(api_key=self.api_key)
146
156
 
147
157
  system_content, api_messages = self._extract_system_and_messages(messages)
@@ -169,6 +169,13 @@ class AsyncGoogleDriver(CostMixin, AsyncDriver):
169
169
  ) -> dict[str, Any]:
170
170
  gen_input, gen_kwargs, model_kwargs = self._build_generation_args(messages, options)
171
171
 
172
+ # Validate capabilities against models.dev metadata
173
+ self._validate_model_capabilities(
174
+ "google",
175
+ self.model,
176
+ using_json_schema=bool((options or {}).get("json_schema")),
177
+ )
178
+
172
179
  try:
173
180
  model = genai.GenerativeModel(self.model, **model_kwargs)
174
181
  response = await model.generate_content_async(gen_input, **gen_kwargs)
@@ -201,6 +208,9 @@ class AsyncGoogleDriver(CostMixin, AsyncDriver):
201
208
  options: dict[str, Any],
202
209
  ) -> dict[str, Any]:
203
210
  """Generate a response that may include tool/function calls (async)."""
211
+ model = options.get("model", self.model)
212
+ self._validate_model_capabilities("google", model, using_tool_use=True)
213
+
204
214
  gen_input, gen_kwargs, model_kwargs = self._build_generation_args(
205
215
  self._prepare_messages(messages), options
206
216
  )
@@ -44,9 +44,9 @@ class AsyncGrokDriver(CostMixin, AsyncDriver):
44
44
 
45
45
  model = options.get("model", self.model)
46
46
 
47
- model_info = self.MODEL_PRICING.get(model, {})
48
- tokens_param = model_info.get("tokens_param", "max_tokens")
49
- supports_temperature = model_info.get("supports_temperature", True)
47
+ model_config = self._get_model_config("grok", model)
48
+ tokens_param = model_config["tokens_param"]
49
+ supports_temperature = model_config["supports_temperature"]
50
50
 
51
51
  opts = {"temperature": 1.0, "max_tokens": 512, **options}
52
52
 
@@ -49,9 +49,9 @@ class AsyncGroqDriver(CostMixin, AsyncDriver):
49
49
 
50
50
  model = options.get("model", self.model)
51
51
 
52
- model_info = self.MODEL_PRICING.get(model, {})
53
- tokens_param = model_info.get("tokens_param", "max_tokens")
54
- supports_temperature = model_info.get("supports_temperature", True)
52
+ model_config = self._get_model_config("groq", model)
53
+ tokens_param = model_config["tokens_param"]
54
+ supports_temperature = model_config["supports_temperature"]
55
55
 
56
56
  opts = {"temperature": 0.7, "max_tokens": 512, **options}
57
57
 
@@ -54,9 +54,16 @@ class AsyncOpenAIDriver(CostMixin, AsyncDriver):
54
54
 
55
55
  model = options.get("model", self.model)
56
56
 
57
- model_info = self.MODEL_PRICING.get(model, {})
58
- tokens_param = model_info.get("tokens_param", "max_tokens")
59
- supports_temperature = model_info.get("supports_temperature", True)
57
+ model_config = self._get_model_config("openai", model)
58
+ tokens_param = model_config["tokens_param"]
59
+ supports_temperature = model_config["supports_temperature"]
60
+
61
+ # Validate capabilities against models.dev metadata
62
+ self._validate_model_capabilities(
63
+ "openai",
64
+ model,
65
+ using_json_schema=bool(options.get("json_schema")),
66
+ )
60
67
 
61
68
  opts = {"temperature": 1.0, "max_tokens": 512, **options}
62
69
 
@@ -120,9 +127,11 @@ class AsyncOpenAIDriver(CostMixin, AsyncDriver):
120
127
  raise RuntimeError("openai package (>=1.0.0) is not installed")
121
128
 
122
129
  model = options.get("model", self.model)
123
- model_info = self.MODEL_PRICING.get(model, {})
124
- tokens_param = model_info.get("tokens_param", "max_tokens")
125
- supports_temperature = model_info.get("supports_temperature", True)
130
+ model_config = self._get_model_config("openai", model)
131
+ tokens_param = model_config["tokens_param"]
132
+ supports_temperature = model_config["supports_temperature"]
133
+
134
+ self._validate_model_capabilities("openai", model, using_tool_use=True)
126
135
 
127
136
  opts = {"temperature": 1.0, "max_tokens": 512, **options}
128
137
 
@@ -191,9 +200,9 @@ class AsyncOpenAIDriver(CostMixin, AsyncDriver):
191
200
  raise RuntimeError("openai package (>=1.0.0) is not installed")
192
201
 
193
202
  model = options.get("model", self.model)
194
- model_info = self.MODEL_PRICING.get(model, {})
195
- tokens_param = model_info.get("tokens_param", "max_tokens")
196
- supports_temperature = model_info.get("supports_temperature", True)
203
+ model_config = self._get_model_config("openai", model)
204
+ tokens_param = model_config["tokens_param"]
205
+ supports_temperature = model_config["supports_temperature"]
197
206
 
198
207
  opts = {"temperature": 1.0, "max_tokens": 512, **options}
199
208
 
@@ -47,9 +47,9 @@ class AsyncOpenRouterDriver(CostMixin, AsyncDriver):
47
47
  async def _do_generate(self, messages: list[dict[str, str]], options: dict[str, Any]) -> dict[str, Any]:
48
48
  model = options.get("model", self.model)
49
49
 
50
- model_info = self.MODEL_PRICING.get(model, {})
51
- tokens_param = model_info.get("tokens_param", "max_tokens")
52
- supports_temperature = model_info.get("supports_temperature", True)
50
+ model_config = self._get_model_config("openrouter", model)
51
+ tokens_param = model_config["tokens_param"]
52
+ supports_temperature = model_config["supports_temperature"]
53
53
 
54
54
  opts = {"temperature": 1.0, "max_tokens": 512, **options}
55
55
 
@@ -108,9 +108,9 @@ class AzureDriver(CostMixin, Driver):
108
108
  raise RuntimeError("openai package (>=1.0.0) with AzureOpenAI not installed")
109
109
 
110
110
  model = options.get("model", self.model)
111
- model_info = self.MODEL_PRICING.get(model, {})
112
- tokens_param = model_info.get("tokens_param", "max_tokens")
113
- supports_temperature = model_info.get("supports_temperature", True)
111
+ model_config = self._get_model_config("azure", model)
112
+ tokens_param = model_config["tokens_param"]
113
+ supports_temperature = model_config["supports_temperature"]
114
114
 
115
115
  opts = {"temperature": 1.0, "max_tokens": 512, **options}
116
116
 
@@ -77,6 +77,13 @@ class ClaudeDriver(CostMixin, Driver):
77
77
  opts = {**{"temperature": 0.0, "max_tokens": 512}, **options}
78
78
  model = options.get("model", self.model)
79
79
 
80
+ # Validate capabilities against models.dev metadata
81
+ self._validate_model_capabilities(
82
+ "claude",
83
+ model,
84
+ using_json_schema=bool(options.get("json_schema")),
85
+ )
86
+
80
87
  client = anthropic.Anthropic(api_key=self.api_key)
81
88
 
82
89
  # Anthropic requires system messages as a top-level parameter
@@ -177,6 +184,9 @@ class ClaudeDriver(CostMixin, Driver):
177
184
 
178
185
  opts = {**{"temperature": 0.0, "max_tokens": 512}, **options}
179
186
  model = options.get("model", self.model)
187
+
188
+ self._validate_model_capabilities("claude", model, using_tool_use=True)
189
+
180
190
  client = anthropic.Anthropic(api_key=self.api_key)
181
191
 
182
192
  system_content, api_messages = self._extract_system_and_messages(messages)
@@ -228,6 +228,13 @@ class GoogleDriver(CostMixin, Driver):
228
228
  def _do_generate(self, messages: list[dict[str, str]], options: Optional[dict[str, Any]] = None) -> dict[str, Any]:
229
229
  gen_input, gen_kwargs, model_kwargs = self._build_generation_args(messages, options)
230
230
 
231
+ # Validate capabilities against models.dev metadata
232
+ self._validate_model_capabilities(
233
+ "google",
234
+ self.model,
235
+ using_json_schema=bool((options or {}).get("json_schema")),
236
+ )
237
+
231
238
  try:
232
239
  logger.debug(f"Initializing {self.model} for generation")
233
240
  model = genai.GenerativeModel(self.model, **model_kwargs)
@@ -263,6 +270,9 @@ class GoogleDriver(CostMixin, Driver):
263
270
  options: dict[str, Any],
264
271
  ) -> dict[str, Any]:
265
272
  """Generate a response that may include tool/function calls."""
273
+ model = options.get("model", self.model)
274
+ self._validate_model_capabilities("google", model, using_tool_use=True)
275
+
266
276
  gen_input, gen_kwargs, model_kwargs = self._build_generation_args(
267
277
  self._prepare_messages(messages), options
268
278
  )
@@ -99,10 +99,10 @@ class GrokDriver(CostMixin, Driver):
99
99
 
100
100
  model = options.get("model", self.model)
101
101
 
102
- # Lookup model-specific config
103
- model_info = self.MODEL_PRICING.get(model, {})
104
- tokens_param = model_info.get("tokens_param", "max_tokens")
105
- supports_temperature = model_info.get("supports_temperature", True)
102
+ # Lookup model-specific config (live models.dev data + hardcoded fallback)
103
+ model_config = self._get_model_config("grok", model)
104
+ tokens_param = model_config["tokens_param"]
105
+ supports_temperature = model_config["supports_temperature"]
106
106
 
107
107
  # Defaults
108
108
  opts = {"temperature": 1.0, "max_tokens": 512, **options}
@@ -69,10 +69,10 @@ class GroqDriver(CostMixin, Driver):
69
69
 
70
70
  model = options.get("model", self.model)
71
71
 
72
- # Lookup model-specific config
73
- model_info = self.MODEL_PRICING.get(model, {})
74
- tokens_param = model_info.get("tokens_param", "max_tokens")
75
- supports_temperature = model_info.get("supports_temperature", True)
72
+ # Lookup model-specific config (live models.dev data + hardcoded fallback)
73
+ model_config = self._get_model_config("groq", model)
74
+ tokens_param = model_config["tokens_param"]
75
+ supports_temperature = model_config["supports_temperature"]
76
76
 
77
77
  # Base configuration
78
78
  opts = {"temperature": 0.7, "max_tokens": 512, **options}
@@ -93,10 +93,17 @@ class OpenAIDriver(CostMixin, Driver):
93
93
 
94
94
  model = options.get("model", self.model)
95
95
 
96
- # Lookup model-specific config
97
- model_info = self.MODEL_PRICING.get(model, {})
98
- tokens_param = model_info.get("tokens_param", "max_tokens")
99
- supports_temperature = model_info.get("supports_temperature", True)
96
+ # Lookup model-specific config (live models.dev data + hardcoded fallback)
97
+ model_config = self._get_model_config("openai", model)
98
+ tokens_param = model_config["tokens_param"]
99
+ supports_temperature = model_config["supports_temperature"]
100
+
101
+ # Validate capabilities against models.dev metadata
102
+ self._validate_model_capabilities(
103
+ "openai",
104
+ model,
105
+ using_json_schema=bool(options.get("json_schema")),
106
+ )
100
107
 
101
108
  # Defaults
102
109
  opts = {"temperature": 1.0, "max_tokens": 512, **options}
@@ -168,9 +175,11 @@ class OpenAIDriver(CostMixin, Driver):
168
175
  raise RuntimeError("openai package (>=1.0.0) is not installed")
169
176
 
170
177
  model = options.get("model", self.model)
171
- model_info = self.MODEL_PRICING.get(model, {})
172
- tokens_param = model_info.get("tokens_param", "max_tokens")
173
- supports_temperature = model_info.get("supports_temperature", True)
178
+ model_config = self._get_model_config("openai", model)
179
+ tokens_param = model_config["tokens_param"]
180
+ supports_temperature = model_config["supports_temperature"]
181
+
182
+ self._validate_model_capabilities("openai", model, using_tool_use=True)
174
183
 
175
184
  opts = {"temperature": 1.0, "max_tokens": 512, **options}
176
185
 
@@ -239,9 +248,9 @@ class OpenAIDriver(CostMixin, Driver):
239
248
  raise RuntimeError("openai package (>=1.0.0) is not installed")
240
249
 
241
250
  model = options.get("model", self.model)
242
- model_info = self.MODEL_PRICING.get(model, {})
243
- tokens_param = model_info.get("tokens_param", "max_tokens")
244
- supports_temperature = model_info.get("supports_temperature", True)
251
+ model_config = self._get_model_config("openai", model)
252
+ tokens_param = model_config["tokens_param"]
253
+ supports_temperature = model_config["supports_temperature"]
245
254
 
246
255
  opts = {"temperature": 1.0, "max_tokens": 512, **options}
247
256
 
@@ -85,10 +85,10 @@ class OpenRouterDriver(CostMixin, Driver):
85
85
 
86
86
  model = options.get("model", self.model)
87
87
 
88
- # Lookup model-specific config
89
- model_info = self.MODEL_PRICING.get(model, {})
90
- tokens_param = model_info.get("tokens_param", "max_tokens")
91
- supports_temperature = model_info.get("supports_temperature", True)
88
+ # Lookup model-specific config (live models.dev data + hardcoded fallback)
89
+ model_config = self._get_model_config("openrouter", model)
90
+ tokens_param = model_config["tokens_param"]
91
+ supports_temperature = model_config["supports_temperature"]
92
92
 
93
93
  # Defaults
94
94
  opts = {"temperature": 1.0, "max_tokens": 512, **options}
prompture/model_rates.py CHANGED
@@ -9,6 +9,7 @@ import contextlib
9
9
  import json
10
10
  import logging
11
11
  import threading
12
+ from dataclasses import dataclass
12
13
  from datetime import datetime, timezone
13
14
  from pathlib import Path
14
15
  from typing import Any, Optional
@@ -139,7 +140,12 @@ def _lookup_model(provider: str, model_id: str) -> Optional[dict[str, Any]]:
139
140
  if not isinstance(provider_data, dict):
140
141
  return None
141
142
 
142
- return provider_data.get(model_id)
143
+ # models.dev nests actual models under a "models" key
144
+ models = provider_data.get("models", provider_data)
145
+ if not isinstance(models, dict):
146
+ return None
147
+
148
+ return models.get(model_id)
143
149
 
144
150
 
145
151
  # ── Public API ──────────────────────────────────────────────────────────────
@@ -189,7 +195,12 @@ def get_all_provider_models(provider: str) -> list[str]:
189
195
  if not isinstance(provider_data, dict):
190
196
  return []
191
197
 
192
- return list(provider_data.keys())
198
+ # models.dev nests actual models under a "models" key
199
+ models = provider_data.get("models", provider_data)
200
+ if not isinstance(models, dict):
201
+ return []
202
+
203
+ return list(models.keys())
193
204
 
194
205
 
195
206
  def refresh_rates_cache(force: bool = False) -> bool:
@@ -215,3 +226,102 @@ def refresh_rates_cache(force: bool = False) -> bool:
215
226
  return True
216
227
 
217
228
  return False
229
+
230
+
231
+ # ── Model Capabilities ─────────────────────────────────────────────────────
232
+
233
+
234
+ @dataclass(frozen=True)
235
+ class ModelCapabilities:
236
+ """Normalized capability metadata for an LLM model from models.dev.
237
+
238
+ All fields default to ``None`` (unknown) so callers can distinguish
239
+ "the model doesn't support X" from "we have no data about X".
240
+ """
241
+
242
+ supports_temperature: Optional[bool] = None
243
+ supports_tool_use: Optional[bool] = None
244
+ supports_structured_output: Optional[bool] = None
245
+ supports_vision: Optional[bool] = None
246
+ is_reasoning: Optional[bool] = None
247
+ context_window: Optional[int] = None
248
+ max_output_tokens: Optional[int] = None
249
+ modalities_input: tuple[str, ...] = ()
250
+ modalities_output: tuple[str, ...] = ()
251
+
252
+
253
+ def get_model_capabilities(provider: str, model_id: str) -> Optional[ModelCapabilities]:
254
+ """Return capability metadata for a model, or ``None`` if unavailable.
255
+
256
+ Maps models.dev fields to a :class:`ModelCapabilities` instance:
257
+
258
+ - ``temperature`` → ``supports_temperature``
259
+ - ``tool_call`` → ``supports_tool_use``
260
+ - ``structured_output`` → ``supports_structured_output``
261
+ - ``"image" in modalities.input`` → ``supports_vision``
262
+ - ``reasoning`` → ``is_reasoning``
263
+ - ``limit.context`` → ``context_window``
264
+ - ``limit.output`` → ``max_output_tokens``
265
+ """
266
+ entry = _lookup_model(provider, model_id)
267
+ if entry is None:
268
+ return None
269
+
270
+ # Boolean capabilities (True/False/None)
271
+ supports_temperature: Optional[bool] = None
272
+ if "temperature" in entry:
273
+ supports_temperature = bool(entry["temperature"])
274
+
275
+ supports_tool_use: Optional[bool] = None
276
+ if "tool_call" in entry:
277
+ supports_tool_use = bool(entry["tool_call"])
278
+
279
+ supports_structured_output: Optional[bool] = None
280
+ if "structured_output" in entry:
281
+ supports_structured_output = bool(entry["structured_output"])
282
+
283
+ is_reasoning: Optional[bool] = None
284
+ if "reasoning" in entry:
285
+ is_reasoning = bool(entry["reasoning"])
286
+
287
+ # Modalities
288
+ modalities = entry.get("modalities", {})
289
+ modalities_input: tuple[str, ...] = ()
290
+ modalities_output: tuple[str, ...] = ()
291
+ if isinstance(modalities, dict):
292
+ raw_in = modalities.get("input")
293
+ if isinstance(raw_in, (list, tuple)):
294
+ modalities_input = tuple(str(m) for m in raw_in)
295
+ raw_out = modalities.get("output")
296
+ if isinstance(raw_out, (list, tuple)):
297
+ modalities_output = tuple(str(m) for m in raw_out)
298
+
299
+ supports_vision: Optional[bool] = None
300
+ if modalities_input:
301
+ supports_vision = "image" in modalities_input
302
+
303
+ # Limits
304
+ context_window: Optional[int] = None
305
+ max_output_tokens: Optional[int] = None
306
+ limits = entry.get("limit", {})
307
+ if isinstance(limits, dict):
308
+ ctx = limits.get("context")
309
+ if ctx is not None:
310
+ with contextlib.suppress(TypeError, ValueError):
311
+ context_window = int(ctx)
312
+ out = limits.get("output")
313
+ if out is not None:
314
+ with contextlib.suppress(TypeError, ValueError):
315
+ max_output_tokens = int(out)
316
+
317
+ return ModelCapabilities(
318
+ supports_temperature=supports_temperature,
319
+ supports_tool_use=supports_tool_use,
320
+ supports_structured_output=supports_structured_output,
321
+ supports_vision=supports_vision,
322
+ is_reasoning=is_reasoning,
323
+ context_window=context_window,
324
+ max_output_tokens=max_output_tokens,
325
+ modalities_input=modalities_input,
326
+ modalities_output=modalities_output,
327
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: prompture
3
- Version: 0.0.38.dev3
3
+ Version: 0.0.39.dev1
4
4
  Summary: Ask LLMs to return structured JSON and run cross-model tests. API-first.
5
5
  Author-email: Juan Denis <juan@vene.co>
6
6
  License-Expression: MIT
@@ -1,26 +1,26 @@
1
- prompture/__init__.py,sha256=RrpHZlLPpzntUOp2tL2II2DdVxQRoCxY6JBF_b4k3s0,7213
2
- prompture/_version.py,sha256=e1uep7-PEqCFbKHaF3uTPcu4UaXdHJjkYrnGcuFmFZM,719
1
+ prompture/__init__.py,sha256=pRD1BjR9wHMmbX_8gqBYfQrZk6aOH9kJfsd7klOEyzA,7336
2
+ prompture/_version.py,sha256=vmkhULu1sKz5TZsHnh6Ft106Bg-CVqwMF0zt2_28S5U,719
3
3
  prompture/agent.py,sha256=xe_yFHGDzTxaU4tmaLt5AQnzrN0I72hBGwGVrCxg2D0,34704
4
4
  prompture/agent_types.py,sha256=Icl16PQI-ThGLMFCU43adtQA6cqETbsPn4KssKBI4xc,4664
5
5
  prompture/async_agent.py,sha256=nOLOQCNkg0sKKTpryIiidmIcAAlA3FR2NfnZwrNBuCg,33066
6
6
  prompture/async_conversation.py,sha256=7NOkXdT5LnUNPVzTammtHeiOV2vB6IfZGuysbtNfcHQ,30861
7
7
  prompture/async_core.py,sha256=s8G0nGUGR1Bf_BQG9_FcQRpveSnJKkEwcWNfbAJaSkg,29208
8
- prompture/async_driver.py,sha256=99WZNlfEpPH9kVykRH6Qa2ZooeVw6pXa23_UUQj0Tag,7179
8
+ prompture/async_driver.py,sha256=4VQ9Q_tI6Ufw6W1CYJ5j8hVtgVdqFGuk6e2tLaSceWE,8581
9
9
  prompture/async_groups.py,sha256=8B383EF_qI9NzcG9zljLKjIZ_37bpNivvsmfJQoOGRk,19894
10
10
  prompture/cache.py,sha256=4dfQDMsEZ9JMQDXLOkiugPmmMJQIfKVE8rTAKDH4oL8,14401
11
11
  prompture/callbacks.py,sha256=JPDqWGzPIzv44l54ocmezlYVBnbKPDEEXRrLdluWGAo,1731
12
12
  prompture/cli.py,sha256=tNiIddRmgC1BomjY5O1VVVAwvqHVzF8IHmQrM-cG2wQ,2902
13
13
  prompture/conversation.py,sha256=jPwp5m5ZYqLHvD3zYVWJEnegnGPgexVZanZeGVOWrjE,32462
14
14
  prompture/core.py,sha256=ZCKhqXI7msI-r4zy_Y_Lx8Sz--OJ6qb4b6jchdb5Boo,56671
15
- prompture/cost_mixin.py,sha256=_spz84i8Qsplh6V3GkWyXXSUE4EwGy2IsbcsU2LEBxs,1918
16
- prompture/discovery.py,sha256=T0GbiTSrCnvAEMzQiLj6ZI-sYBOQgcnLtwtlLSkIb3Q,8228
17
- prompture/driver.py,sha256=2yVGw8vE5xko7HQlP6HfJKFyJLfc8cffdXwmdp5o3Uo,9008
15
+ prompture/cost_mixin.py,sha256=BR-zd42Tj4K865iRIntXlJEfryUcrd5Tuwcfx89QknE,3547
16
+ prompture/discovery.py,sha256=bkGyeDp7reHS8ih5jN20g-qxsdfpQ4Lg1aVG06j6Qnk,7532
17
+ prompture/driver.py,sha256=wE7K3vnqeCVT5pEEBP-3uZ6e-YyU6TXtnEKRSB25eOc,10410
18
18
  prompture/field_definitions.py,sha256=PLvxq2ot-ngJ8JbWkkZ-XLtM1wvjUQ3TL01vSEo-a6E,21368
19
19
  prompture/group_types.py,sha256=BxeFV1tI4PTH3xPOie7q3-35ivkTdB9lJUPLH0kPH7A,4731
20
20
  prompture/groups.py,sha256=q9lpD57VWw6iQgK9S0nLVidItJZmusJkmpblM4EX9Sc,18349
21
21
  prompture/image.py,sha256=3uBxC6blXRNyY5KAJ5MkG6ow8KGAslX8WxM8Is8S8cw,5620
22
22
  prompture/logging.py,sha256=SkFO26_56Zai05vW8kTq3jvJudfLG2ipI5qNHaXKH3g,2574
23
- prompture/model_rates.py,sha256=qtZUjsCVskA9LyG73JklG_kjKJHABA6ldBmBX0UzlSQ,6415
23
+ prompture/model_rates.py,sha256=w2syZCbYM3DGP978Wopgy0AbmvSQcDm-6ALLBLLrGkg,10482
24
24
  prompture/persistence.py,sha256=stcsH9Onth3BlK0QTWDKtXFp3FBmwUS5PI5R1glsIQc,9293
25
25
  prompture/persona.py,sha256=SpLW-XPdG0avvJx8uGqJvMRZy65OjzfmJck7qbd28gc,17526
26
26
  prompture/runner.py,sha256=lHe2L2jqY1pDXoKNPJALN9lAm-Q8QOY8C8gw-vM9VrM,4213
@@ -35,29 +35,29 @@ prompture/aio/__init__.py,sha256=bKqTu4Jxld16aP_7SP9wU5au45UBIb041ORo4E4HzVo,181
35
35
  prompture/drivers/__init__.py,sha256=VuEBZPqaQzXLl_Lvn_c5mRlJJrrlObZCLeHaR8n2eJ4,7050
36
36
  prompture/drivers/airllm_driver.py,sha256=SaTh7e7Plvuct_TfRqQvsJsKHvvM_3iVqhBtlciM-Kw,3858
37
37
  prompture/drivers/async_airllm_driver.py,sha256=1hIWLXfyyIg9tXaOE22tLJvFyNwHnOi1M5BIKnV8ysk,908
38
- prompture/drivers/async_azure_driver.py,sha256=lGZICROspP2_o2XlwIZZvrCDenSJZPNYTu7clCgRD68,4473
39
- prompture/drivers/async_claude_driver.py,sha256=dbUHH2EEotxUWz8cTXVCWtf4ExtiLv3FzzNenvHSVVI,10275
40
- prompture/drivers/async_google_driver.py,sha256=MIemYcE0ppSWfvVaxv4V-Tqjmy6BKO7sRG6UfZqtdV8,13349
41
- prompture/drivers/async_grok_driver.py,sha256=fvqEK-mrAx4U4_0C1RePGdZ-TUmQI9Qvj-x1f_uGI5c,3556
42
- prompture/drivers/async_groq_driver.py,sha256=PEAAj7QHjVqT9UtLfnFY4i__Mk-QpngmHGvbaBNEUrE,3085
38
+ prompture/drivers/async_azure_driver.py,sha256=CFYh4TsI16m7KgAQ_jThJCRw60e_MlHEejDhm7klGH4,4456
39
+ prompture/drivers/async_claude_driver.py,sha256=oawbFVVMtRlikQOmu3jRjbdpoeu95JqTF1YHLKO3ybE,10576
40
+ prompture/drivers/async_google_driver.py,sha256=LTUgCXJjzuTDGzsCsmY2-xH2KdTLJD7htwO49ZNFOdE,13711
41
+ prompture/drivers/async_grok_driver.py,sha256=s3bXEGhVrMyw10CowkBhs5522mhipWJyWWu-xVixzyg,3538
42
+ prompture/drivers/async_groq_driver.py,sha256=pjAh_bgZWSWaNSm5XrU-u3gRV6YSGwNG5NfAbkYeJ84,3067
43
43
  prompture/drivers/async_hugging_driver.py,sha256=IblxqU6TpNUiigZ0BCgNkAgzpUr2FtPHJOZnOZMnHF0,2152
44
44
  prompture/drivers/async_lmstudio_driver.py,sha256=rPn2qVPm6UE2APzAn7ZHYTELUwr0dQMi8XHv6gAhyH8,5782
45
45
  prompture/drivers/async_local_http_driver.py,sha256=qoigIf-w3_c2dbVdM6m1e2RMAWP4Gk4VzVs5hM3lPvQ,1609
46
46
  prompture/drivers/async_ollama_driver.py,sha256=FaSXtFXrgeVHIe0b90Vg6rGeSTWLpPnjaThh9Ai7qQo,5042
47
- prompture/drivers/async_openai_driver.py,sha256=6p538rPlfAWhsTZ5HKAg8KEW1xM4WEFzXVPZsigz_P4,8704
48
- prompture/drivers/async_openrouter_driver.py,sha256=qvvwJADjnEj6J9f8m0eGlfWTBEm6oXTjwrgt_Im4K7w,3793
47
+ prompture/drivers/async_openai_driver.py,sha256=mv0_H2ZQFm96xfDL1oFz3qRhB9v-whv48dwvE0b02dA,8956
48
+ prompture/drivers/async_openrouter_driver.py,sha256=pMenRxnRnJlx5lR25qejlsAzt6wGPBr10L85wLYKncI,3781
49
49
  prompture/drivers/async_registry.py,sha256=syervbb7THneJ-NUVSuxy4cnxGW6VuNzKv-Aqqn2ysU,4329
50
- prompture/drivers/azure_driver.py,sha256=QZr7HEvgSKT9LOTCtCjuBdHl57yvrnWmeTHtmewuJQY,5727
51
- prompture/drivers/claude_driver.py,sha256=8XnCBHtk6N_PzHStwxIUlcvekdPN896BqOLShmgxU9k,11536
52
- prompture/drivers/google_driver.py,sha256=8bnAcve1xtgpUXrCdVzWpU_yAqwaeuiBWk8-PbG1cmM,15956
53
- prompture/drivers/grok_driver.py,sha256=AIwuzNAQyOhmVDA07ISWt2e-rsv5aYk3I5AM4HkLM7o,5294
54
- prompture/drivers/groq_driver.py,sha256=9cZI21RsgYJTjnrtX2fVA0AadDL-VklhY4ugjDCutwM,4195
50
+ prompture/drivers/azure_driver.py,sha256=bcfYxfkIbfxqopr_O6sbhdtk4PLl7t-4gbUL0OoMeM0,5710
51
+ prompture/drivers/claude_driver.py,sha256=C8Av3DXP2x3f35jEv8BRwEM_4vh0cfmLsy3t5dsR6aM,11837
52
+ prompture/drivers/google_driver.py,sha256=Zck5VUsW37kDgohXz3cUWRmZ88OfhmTpVD-qzAVMp-8,16318
53
+ prompture/drivers/grok_driver.py,sha256=CzAXKAbbWmbE8qLFZxxoEhf4Qzbtc9YqDX7kkCsE4dk,5320
54
+ prompture/drivers/groq_driver.py,sha256=61LKHhYyRiFkHKbLKFYX10fqjpL_INtPY_Zeb55AV0o,4221
55
55
  prompture/drivers/hugging_driver.py,sha256=gZir3XnM77VfYIdnu3S1pRftlZJM6G3L8bgGn5esg-Q,2346
56
56
  prompture/drivers/lmstudio_driver.py,sha256=9ZnJ1l5LuWAjkH2WKfFjZprNMVIXoSC7qXDNDTxm-tA,6748
57
57
  prompture/drivers/local_http_driver.py,sha256=QJgEf9kAmy8YZ5fb8FHnWuhoDoZYNd8at4jegzNVJH0,1658
58
58
  prompture/drivers/ollama_driver.py,sha256=k9xeUwFp91OrDbjkbYI-F8CDFy5ew-zQ0btXqwbXXWM,10220
59
- prompture/drivers/openai_driver.py,sha256=BykJ3Z16BaWREVnAGaTYFwK2ZCI2aGOjo2YdsR8m_6w,10164
60
- prompture/drivers/openrouter_driver.py,sha256=OAVmvCQ1ZW1ApJHsXJa8i1Dst9EUsZAt6uEDqF9aIQw,5408
59
+ prompture/drivers/openai_driver.py,sha256=WJ2LnSttq0FvrRzEeweAxzigv3qu_BYvpXv7PSVRZSI,10460
60
+ prompture/drivers/openrouter_driver.py,sha256=J7SMZXH-nK_J9H-GVuYMtJMYuK_2kZcDSmOpBipieNI,5440
61
61
  prompture/drivers/registry.py,sha256=Dg_5w9alnIPKhOnsR9Xspuf5T7roBGu0r_L2Cf-UhXs,9926
62
62
  prompture/drivers/vision_helpers.py,sha256=l5iYXHJLR_vLFvqDPPPK1QqK7YPKh5GwocpbSyt0R04,5403
63
63
  prompture/scaffold/__init__.py,sha256=aitUxBV0MpjC7Od3iG8WUzcC7tGPXSt3oMzUBX8UDwQ,60
@@ -69,9 +69,9 @@ prompture/scaffold/templates/env.example.j2,sha256=eESKr1KWgyrczO6d-nwAhQwSpf_G-
69
69
  prompture/scaffold/templates/main.py.j2,sha256=TEgc5OvsZOEX0JthkSW1NI_yLwgoeVN_x97Ibg-vyWY,2632
70
70
  prompture/scaffold/templates/models.py.j2,sha256=JrZ99GCVK6TKWapskVRSwCssGrTu5cGZ_r46fOhY2GE,858
71
71
  prompture/scaffold/templates/requirements.txt.j2,sha256=m3S5fi1hq9KG9l_9j317rjwWww0a43WMKd8VnUWv2A4,102
72
- prompture-0.0.38.dev3.dist-info/licenses/LICENSE,sha256=0HgDepH7aaHNFhHF-iXuW6_GqDfYPnVkjtiCAZ4yS8I,1060
73
- prompture-0.0.38.dev3.dist-info/METADATA,sha256=ejIH91dOyVKrmJ4nKEbsutiI5Gb2xMRiqKuhzgz04Kw,10842
74
- prompture-0.0.38.dev3.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
75
- prompture-0.0.38.dev3.dist-info/entry_points.txt,sha256=AFPG3lJR86g4IJMoWQUW5Ph7G6MLNWG3A2u2Tp9zkp8,48
76
- prompture-0.0.38.dev3.dist-info/top_level.txt,sha256=to86zq_kjfdoLeAxQNr420UWqT0WzkKoZ509J7Qr2t4,10
77
- prompture-0.0.38.dev3.dist-info/RECORD,,
72
+ prompture-0.0.39.dev1.dist-info/licenses/LICENSE,sha256=0HgDepH7aaHNFhHF-iXuW6_GqDfYPnVkjtiCAZ4yS8I,1060
73
+ prompture-0.0.39.dev1.dist-info/METADATA,sha256=tU2YOuZDS573rWDVQPyYWAxtysF0OFWGHdr_9dmNFoQ,10842
74
+ prompture-0.0.39.dev1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
75
+ prompture-0.0.39.dev1.dist-info/entry_points.txt,sha256=AFPG3lJR86g4IJMoWQUW5Ph7G6MLNWG3A2u2Tp9zkp8,48
76
+ prompture-0.0.39.dev1.dist-info/top_level.txt,sha256=to86zq_kjfdoLeAxQNr420UWqT0WzkKoZ509J7Qr2t4,10
77
+ prompture-0.0.39.dev1.dist-info/RECORD,,