prompture 0.0.38.dev2__py3-none-any.whl → 0.0.40.dev1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prompture/__init__.py +12 -1
- prompture/_version.py +2 -2
- prompture/async_conversation.py +9 -0
- prompture/async_core.py +16 -0
- prompture/async_driver.py +39 -0
- prompture/conversation.py +9 -0
- prompture/core.py +16 -0
- prompture/cost_mixin.py +37 -0
- prompture/discovery.py +108 -43
- prompture/driver.py +39 -0
- prompture/drivers/async_azure_driver.py +4 -4
- prompture/drivers/async_claude_driver.py +177 -8
- prompture/drivers/async_google_driver.py +10 -0
- prompture/drivers/async_grok_driver.py +4 -4
- prompture/drivers/async_groq_driver.py +4 -4
- prompture/drivers/async_openai_driver.py +155 -4
- prompture/drivers/async_openrouter_driver.py +4 -4
- prompture/drivers/azure_driver.py +3 -3
- prompture/drivers/claude_driver.py +10 -0
- prompture/drivers/google_driver.py +10 -0
- prompture/drivers/grok_driver.py +4 -4
- prompture/drivers/groq_driver.py +4 -4
- prompture/drivers/openai_driver.py +19 -10
- prompture/drivers/openrouter_driver.py +4 -4
- prompture/ledger.py +252 -0
- prompture/model_rates.py +112 -2
- {prompture-0.0.38.dev2.dist-info → prompture-0.0.40.dev1.dist-info}/METADATA +1 -1
- {prompture-0.0.38.dev2.dist-info → prompture-0.0.40.dev1.dist-info}/RECORD +32 -31
- {prompture-0.0.38.dev2.dist-info → prompture-0.0.40.dev1.dist-info}/WHEEL +0 -0
- {prompture-0.0.38.dev2.dist-info → prompture-0.0.40.dev1.dist-info}/entry_points.txt +0 -0
- {prompture-0.0.38.dev2.dist-info → prompture-0.0.40.dev1.dist-info}/licenses/LICENSE +0 -0
- {prompture-0.0.38.dev2.dist-info → prompture-0.0.40.dev1.dist-info}/top_level.txt +0 -0
prompture/ledger.py
ADDED
|
@@ -0,0 +1,252 @@
|
|
|
1
|
+
"""Persistent model usage ledger — tracks which LLM models have been used.
|
|
2
|
+
|
|
3
|
+
Stores per-model usage stats (call count, tokens, cost, timestamps) in a
|
|
4
|
+
SQLite database at ``~/.prompture/usage/model_ledger.db``. The public
|
|
5
|
+
convenience functions are fire-and-forget: they never raise exceptions so
|
|
6
|
+
they cannot break existing extraction/conversation flows.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import hashlib
|
|
12
|
+
import logging
|
|
13
|
+
import sqlite3
|
|
14
|
+
import threading
|
|
15
|
+
from datetime import datetime, timezone
|
|
16
|
+
from pathlib import Path
|
|
17
|
+
from typing import Any
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger("prompture.ledger")
|
|
20
|
+
|
|
21
|
+
_DEFAULT_DB_DIR = Path.home() / ".prompture" / "usage"
|
|
22
|
+
_DEFAULT_DB_PATH = _DEFAULT_DB_DIR / "model_ledger.db"
|
|
23
|
+
|
|
24
|
+
_SCHEMA_SQL = """
|
|
25
|
+
CREATE TABLE IF NOT EXISTS model_usage (
|
|
26
|
+
model_name TEXT NOT NULL,
|
|
27
|
+
api_key_hash TEXT NOT NULL,
|
|
28
|
+
use_count INTEGER NOT NULL DEFAULT 1,
|
|
29
|
+
total_tokens INTEGER NOT NULL DEFAULT 0,
|
|
30
|
+
total_cost REAL NOT NULL DEFAULT 0.0,
|
|
31
|
+
first_used TEXT NOT NULL,
|
|
32
|
+
last_used TEXT NOT NULL,
|
|
33
|
+
last_status TEXT NOT NULL DEFAULT 'success',
|
|
34
|
+
PRIMARY KEY (model_name, api_key_hash)
|
|
35
|
+
);
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class ModelUsageLedger:
|
|
40
|
+
"""SQLite-backed model usage tracker.
|
|
41
|
+
|
|
42
|
+
Thread-safe via an internal :class:`threading.Lock`.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
db_path: Path to the SQLite database file. Defaults to
|
|
46
|
+
``~/.prompture/usage/model_ledger.db``.
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
def __init__(self, db_path: str | Path | None = None) -> None:
|
|
50
|
+
self._db_path = Path(db_path) if db_path else _DEFAULT_DB_PATH
|
|
51
|
+
self._db_path.parent.mkdir(parents=True, exist_ok=True)
|
|
52
|
+
self._lock = threading.Lock()
|
|
53
|
+
self._init_db()
|
|
54
|
+
|
|
55
|
+
def _init_db(self) -> None:
|
|
56
|
+
with self._lock:
|
|
57
|
+
conn = sqlite3.connect(str(self._db_path))
|
|
58
|
+
try:
|
|
59
|
+
conn.executescript(_SCHEMA_SQL)
|
|
60
|
+
conn.commit()
|
|
61
|
+
finally:
|
|
62
|
+
conn.close()
|
|
63
|
+
|
|
64
|
+
def _connect(self) -> sqlite3.Connection:
|
|
65
|
+
conn = sqlite3.connect(str(self._db_path))
|
|
66
|
+
conn.row_factory = sqlite3.Row
|
|
67
|
+
return conn
|
|
68
|
+
|
|
69
|
+
# ------------------------------------------------------------------ #
|
|
70
|
+
# Recording
|
|
71
|
+
# ------------------------------------------------------------------ #
|
|
72
|
+
|
|
73
|
+
def record_usage(
|
|
74
|
+
self,
|
|
75
|
+
model_name: str,
|
|
76
|
+
*,
|
|
77
|
+
api_key_hash: str = "",
|
|
78
|
+
tokens: int = 0,
|
|
79
|
+
cost: float = 0.0,
|
|
80
|
+
status: str = "success",
|
|
81
|
+
) -> None:
|
|
82
|
+
"""Record a model usage event (upsert).
|
|
83
|
+
|
|
84
|
+
On conflict the row's counters are incremented and ``last_used``
|
|
85
|
+
is updated.
|
|
86
|
+
"""
|
|
87
|
+
now = datetime.now(timezone.utc).isoformat()
|
|
88
|
+
with self._lock:
|
|
89
|
+
conn = self._connect()
|
|
90
|
+
try:
|
|
91
|
+
conn.execute(
|
|
92
|
+
"""
|
|
93
|
+
INSERT INTO model_usage
|
|
94
|
+
(model_name, api_key_hash, use_count, total_tokens, total_cost,
|
|
95
|
+
first_used, last_used, last_status)
|
|
96
|
+
VALUES (?, ?, 1, ?, ?, ?, ?, ?)
|
|
97
|
+
ON CONFLICT(model_name, api_key_hash) DO UPDATE SET
|
|
98
|
+
use_count = use_count + 1,
|
|
99
|
+
total_tokens = total_tokens + excluded.total_tokens,
|
|
100
|
+
total_cost = total_cost + excluded.total_cost,
|
|
101
|
+
last_used = excluded.last_used,
|
|
102
|
+
last_status = excluded.last_status
|
|
103
|
+
""",
|
|
104
|
+
(model_name, api_key_hash, tokens, cost, now, now, status),
|
|
105
|
+
)
|
|
106
|
+
conn.commit()
|
|
107
|
+
finally:
|
|
108
|
+
conn.close()
|
|
109
|
+
|
|
110
|
+
# ------------------------------------------------------------------ #
|
|
111
|
+
# Queries
|
|
112
|
+
# ------------------------------------------------------------------ #
|
|
113
|
+
|
|
114
|
+
def get_model_stats(self, model_name: str, api_key_hash: str = "") -> dict[str, Any] | None:
|
|
115
|
+
"""Return stats for a specific model + key combination, or ``None``."""
|
|
116
|
+
with self._lock:
|
|
117
|
+
conn = self._connect()
|
|
118
|
+
try:
|
|
119
|
+
row = conn.execute(
|
|
120
|
+
"SELECT * FROM model_usage WHERE model_name = ? AND api_key_hash = ?",
|
|
121
|
+
(model_name, api_key_hash),
|
|
122
|
+
).fetchone()
|
|
123
|
+
if row is None:
|
|
124
|
+
return None
|
|
125
|
+
return dict(row)
|
|
126
|
+
finally:
|
|
127
|
+
conn.close()
|
|
128
|
+
|
|
129
|
+
def get_verified_models(self) -> set[str]:
|
|
130
|
+
"""Return model names that have at least one successful usage."""
|
|
131
|
+
with self._lock:
|
|
132
|
+
conn = self._connect()
|
|
133
|
+
try:
|
|
134
|
+
rows = conn.execute(
|
|
135
|
+
"SELECT DISTINCT model_name FROM model_usage WHERE last_status = 'success'"
|
|
136
|
+
).fetchall()
|
|
137
|
+
return {r["model_name"] for r in rows}
|
|
138
|
+
finally:
|
|
139
|
+
conn.close()
|
|
140
|
+
|
|
141
|
+
def get_recently_used(self, limit: int = 10) -> list[dict[str, Any]]:
|
|
142
|
+
"""Return recent model usage rows ordered by ``last_used`` descending."""
|
|
143
|
+
with self._lock:
|
|
144
|
+
conn = self._connect()
|
|
145
|
+
try:
|
|
146
|
+
rows = conn.execute(
|
|
147
|
+
"SELECT * FROM model_usage ORDER BY last_used DESC LIMIT ?",
|
|
148
|
+
(limit,),
|
|
149
|
+
).fetchall()
|
|
150
|
+
return [dict(r) for r in rows]
|
|
151
|
+
finally:
|
|
152
|
+
conn.close()
|
|
153
|
+
|
|
154
|
+
def get_all_stats(self) -> list[dict[str, Any]]:
|
|
155
|
+
"""Return all usage rows."""
|
|
156
|
+
with self._lock:
|
|
157
|
+
conn = self._connect()
|
|
158
|
+
try:
|
|
159
|
+
rows = conn.execute("SELECT * FROM model_usage ORDER BY last_used DESC").fetchall()
|
|
160
|
+
return [dict(r) for r in rows]
|
|
161
|
+
finally:
|
|
162
|
+
conn.close()
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
# ------------------------------------------------------------------
|
|
166
|
+
# Module-level singleton
|
|
167
|
+
# ------------------------------------------------------------------
|
|
168
|
+
|
|
169
|
+
_ledger: ModelUsageLedger | None = None
|
|
170
|
+
_ledger_lock = threading.Lock()
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def _get_ledger() -> ModelUsageLedger:
|
|
174
|
+
"""Return (and lazily create) the module-level singleton ledger."""
|
|
175
|
+
global _ledger
|
|
176
|
+
if _ledger is None:
|
|
177
|
+
with _ledger_lock:
|
|
178
|
+
if _ledger is None:
|
|
179
|
+
_ledger = ModelUsageLedger()
|
|
180
|
+
return _ledger
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
# ------------------------------------------------------------------
|
|
184
|
+
# Public convenience functions (fire-and-forget)
|
|
185
|
+
# ------------------------------------------------------------------
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def record_model_usage(
|
|
189
|
+
model_name: str,
|
|
190
|
+
*,
|
|
191
|
+
api_key_hash: str = "",
|
|
192
|
+
tokens: int = 0,
|
|
193
|
+
cost: float = 0.0,
|
|
194
|
+
status: str = "success",
|
|
195
|
+
) -> None:
|
|
196
|
+
"""Record a model usage event. Never raises — all exceptions are swallowed."""
|
|
197
|
+
try:
|
|
198
|
+
_get_ledger().record_usage(
|
|
199
|
+
model_name,
|
|
200
|
+
api_key_hash=api_key_hash,
|
|
201
|
+
tokens=tokens,
|
|
202
|
+
cost=cost,
|
|
203
|
+
status=status,
|
|
204
|
+
)
|
|
205
|
+
except Exception:
|
|
206
|
+
logger.debug("Failed to record model usage for %s", model_name, exc_info=True)
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
def get_recently_used_models(limit: int = 10) -> list[dict[str, Any]]:
|
|
210
|
+
"""Return recently used models. Returns empty list on error."""
|
|
211
|
+
try:
|
|
212
|
+
return _get_ledger().get_recently_used(limit)
|
|
213
|
+
except Exception:
|
|
214
|
+
logger.debug("Failed to get recently used models", exc_info=True)
|
|
215
|
+
return []
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
# ------------------------------------------------------------------
|
|
219
|
+
# API key hash helper
|
|
220
|
+
# ------------------------------------------------------------------
|
|
221
|
+
|
|
222
|
+
_LOCAL_PROVIDERS = frozenset({"ollama", "lmstudio", "local_http", "airllm"})
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def _resolve_api_key_hash(model_name: str) -> str:
|
|
226
|
+
"""Derive an 8-char hex hash of the API key for the given model's provider.
|
|
227
|
+
|
|
228
|
+
Local providers (ollama, lmstudio, etc.) return ``""``.
|
|
229
|
+
"""
|
|
230
|
+
try:
|
|
231
|
+
provider = model_name.split("/", 1)[0].lower() if "/" in model_name else model_name.lower()
|
|
232
|
+
if provider in _LOCAL_PROVIDERS:
|
|
233
|
+
return ""
|
|
234
|
+
|
|
235
|
+
from .settings import settings
|
|
236
|
+
|
|
237
|
+
key_map: dict[str, str | None] = {
|
|
238
|
+
"openai": settings.openai_api_key,
|
|
239
|
+
"claude": settings.claude_api_key,
|
|
240
|
+
"google": settings.google_api_key,
|
|
241
|
+
"groq": settings.groq_api_key,
|
|
242
|
+
"grok": settings.grok_api_key,
|
|
243
|
+
"openrouter": settings.openrouter_api_key,
|
|
244
|
+
"azure": settings.azure_api_key,
|
|
245
|
+
"huggingface": settings.hf_token,
|
|
246
|
+
}
|
|
247
|
+
api_key = key_map.get(provider)
|
|
248
|
+
if not api_key:
|
|
249
|
+
return ""
|
|
250
|
+
return hashlib.sha256(api_key.encode()).hexdigest()[:8]
|
|
251
|
+
except Exception:
|
|
252
|
+
return ""
|
prompture/model_rates.py
CHANGED
|
@@ -9,6 +9,7 @@ import contextlib
|
|
|
9
9
|
import json
|
|
10
10
|
import logging
|
|
11
11
|
import threading
|
|
12
|
+
from dataclasses import dataclass
|
|
12
13
|
from datetime import datetime, timezone
|
|
13
14
|
from pathlib import Path
|
|
14
15
|
from typing import Any, Optional
|
|
@@ -139,7 +140,12 @@ def _lookup_model(provider: str, model_id: str) -> Optional[dict[str, Any]]:
|
|
|
139
140
|
if not isinstance(provider_data, dict):
|
|
140
141
|
return None
|
|
141
142
|
|
|
142
|
-
|
|
143
|
+
# models.dev nests actual models under a "models" key
|
|
144
|
+
models = provider_data.get("models", provider_data)
|
|
145
|
+
if not isinstance(models, dict):
|
|
146
|
+
return None
|
|
147
|
+
|
|
148
|
+
return models.get(model_id)
|
|
143
149
|
|
|
144
150
|
|
|
145
151
|
# ── Public API ──────────────────────────────────────────────────────────────
|
|
@@ -189,7 +195,12 @@ def get_all_provider_models(provider: str) -> list[str]:
|
|
|
189
195
|
if not isinstance(provider_data, dict):
|
|
190
196
|
return []
|
|
191
197
|
|
|
192
|
-
|
|
198
|
+
# models.dev nests actual models under a "models" key
|
|
199
|
+
models = provider_data.get("models", provider_data)
|
|
200
|
+
if not isinstance(models, dict):
|
|
201
|
+
return []
|
|
202
|
+
|
|
203
|
+
return list(models.keys())
|
|
193
204
|
|
|
194
205
|
|
|
195
206
|
def refresh_rates_cache(force: bool = False) -> bool:
|
|
@@ -215,3 +226,102 @@ def refresh_rates_cache(force: bool = False) -> bool:
|
|
|
215
226
|
return True
|
|
216
227
|
|
|
217
228
|
return False
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
# ── Model Capabilities ─────────────────────────────────────────────────────
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
@dataclass(frozen=True)
|
|
235
|
+
class ModelCapabilities:
|
|
236
|
+
"""Normalized capability metadata for an LLM model from models.dev.
|
|
237
|
+
|
|
238
|
+
All fields default to ``None`` (unknown) so callers can distinguish
|
|
239
|
+
"the model doesn't support X" from "we have no data about X".
|
|
240
|
+
"""
|
|
241
|
+
|
|
242
|
+
supports_temperature: Optional[bool] = None
|
|
243
|
+
supports_tool_use: Optional[bool] = None
|
|
244
|
+
supports_structured_output: Optional[bool] = None
|
|
245
|
+
supports_vision: Optional[bool] = None
|
|
246
|
+
is_reasoning: Optional[bool] = None
|
|
247
|
+
context_window: Optional[int] = None
|
|
248
|
+
max_output_tokens: Optional[int] = None
|
|
249
|
+
modalities_input: tuple[str, ...] = ()
|
|
250
|
+
modalities_output: tuple[str, ...] = ()
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def get_model_capabilities(provider: str, model_id: str) -> Optional[ModelCapabilities]:
|
|
254
|
+
"""Return capability metadata for a model, or ``None`` if unavailable.
|
|
255
|
+
|
|
256
|
+
Maps models.dev fields to a :class:`ModelCapabilities` instance:
|
|
257
|
+
|
|
258
|
+
- ``temperature`` → ``supports_temperature``
|
|
259
|
+
- ``tool_call`` → ``supports_tool_use``
|
|
260
|
+
- ``structured_output`` → ``supports_structured_output``
|
|
261
|
+
- ``"image" in modalities.input`` → ``supports_vision``
|
|
262
|
+
- ``reasoning`` → ``is_reasoning``
|
|
263
|
+
- ``limit.context`` → ``context_window``
|
|
264
|
+
- ``limit.output`` → ``max_output_tokens``
|
|
265
|
+
"""
|
|
266
|
+
entry = _lookup_model(provider, model_id)
|
|
267
|
+
if entry is None:
|
|
268
|
+
return None
|
|
269
|
+
|
|
270
|
+
# Boolean capabilities (True/False/None)
|
|
271
|
+
supports_temperature: Optional[bool] = None
|
|
272
|
+
if "temperature" in entry:
|
|
273
|
+
supports_temperature = bool(entry["temperature"])
|
|
274
|
+
|
|
275
|
+
supports_tool_use: Optional[bool] = None
|
|
276
|
+
if "tool_call" in entry:
|
|
277
|
+
supports_tool_use = bool(entry["tool_call"])
|
|
278
|
+
|
|
279
|
+
supports_structured_output: Optional[bool] = None
|
|
280
|
+
if "structured_output" in entry:
|
|
281
|
+
supports_structured_output = bool(entry["structured_output"])
|
|
282
|
+
|
|
283
|
+
is_reasoning: Optional[bool] = None
|
|
284
|
+
if "reasoning" in entry:
|
|
285
|
+
is_reasoning = bool(entry["reasoning"])
|
|
286
|
+
|
|
287
|
+
# Modalities
|
|
288
|
+
modalities = entry.get("modalities", {})
|
|
289
|
+
modalities_input: tuple[str, ...] = ()
|
|
290
|
+
modalities_output: tuple[str, ...] = ()
|
|
291
|
+
if isinstance(modalities, dict):
|
|
292
|
+
raw_in = modalities.get("input")
|
|
293
|
+
if isinstance(raw_in, (list, tuple)):
|
|
294
|
+
modalities_input = tuple(str(m) for m in raw_in)
|
|
295
|
+
raw_out = modalities.get("output")
|
|
296
|
+
if isinstance(raw_out, (list, tuple)):
|
|
297
|
+
modalities_output = tuple(str(m) for m in raw_out)
|
|
298
|
+
|
|
299
|
+
supports_vision: Optional[bool] = None
|
|
300
|
+
if modalities_input:
|
|
301
|
+
supports_vision = "image" in modalities_input
|
|
302
|
+
|
|
303
|
+
# Limits
|
|
304
|
+
context_window: Optional[int] = None
|
|
305
|
+
max_output_tokens: Optional[int] = None
|
|
306
|
+
limits = entry.get("limit", {})
|
|
307
|
+
if isinstance(limits, dict):
|
|
308
|
+
ctx = limits.get("context")
|
|
309
|
+
if ctx is not None:
|
|
310
|
+
with contextlib.suppress(TypeError, ValueError):
|
|
311
|
+
context_window = int(ctx)
|
|
312
|
+
out = limits.get("output")
|
|
313
|
+
if out is not None:
|
|
314
|
+
with contextlib.suppress(TypeError, ValueError):
|
|
315
|
+
max_output_tokens = int(out)
|
|
316
|
+
|
|
317
|
+
return ModelCapabilities(
|
|
318
|
+
supports_temperature=supports_temperature,
|
|
319
|
+
supports_tool_use=supports_tool_use,
|
|
320
|
+
supports_structured_output=supports_structured_output,
|
|
321
|
+
supports_vision=supports_vision,
|
|
322
|
+
is_reasoning=is_reasoning,
|
|
323
|
+
context_window=context_window,
|
|
324
|
+
max_output_tokens=max_output_tokens,
|
|
325
|
+
modalities_input=modalities_input,
|
|
326
|
+
modalities_output=modalities_output,
|
|
327
|
+
)
|
|
@@ -1,26 +1,27 @@
|
|
|
1
|
-
prompture/__init__.py,sha256=
|
|
2
|
-
prompture/_version.py,sha256=
|
|
1
|
+
prompture/__init__.py,sha256=cJnkefDpiyFbU77juw4tXPdKJQWoJ-c6XBFt2v-e5Q4,7455
|
|
2
|
+
prompture/_version.py,sha256=0--ZUdBaCZlj76yYZsjkHfNkwola4VJybJ6Yx9HtHNA,719
|
|
3
3
|
prompture/agent.py,sha256=xe_yFHGDzTxaU4tmaLt5AQnzrN0I72hBGwGVrCxg2D0,34704
|
|
4
4
|
prompture/agent_types.py,sha256=Icl16PQI-ThGLMFCU43adtQA6cqETbsPn4KssKBI4xc,4664
|
|
5
5
|
prompture/async_agent.py,sha256=nOLOQCNkg0sKKTpryIiidmIcAAlA3FR2NfnZwrNBuCg,33066
|
|
6
|
-
prompture/async_conversation.py,sha256=
|
|
7
|
-
prompture/async_core.py,sha256=
|
|
8
|
-
prompture/async_driver.py,sha256=
|
|
6
|
+
prompture/async_conversation.py,sha256=m9sdKBu1wxo5veGwO6g6Zvf1sBzpuxP-mSIEeNKlBjQ,31155
|
|
7
|
+
prompture/async_core.py,sha256=hbRXLvsBJv3JAnUwGZbazsL6x022FrsJU6swmZolgxY,29745
|
|
8
|
+
prompture/async_driver.py,sha256=4VQ9Q_tI6Ufw6W1CYJ5j8hVtgVdqFGuk6e2tLaSceWE,8581
|
|
9
9
|
prompture/async_groups.py,sha256=8B383EF_qI9NzcG9zljLKjIZ_37bpNivvsmfJQoOGRk,19894
|
|
10
10
|
prompture/cache.py,sha256=4dfQDMsEZ9JMQDXLOkiugPmmMJQIfKVE8rTAKDH4oL8,14401
|
|
11
11
|
prompture/callbacks.py,sha256=JPDqWGzPIzv44l54ocmezlYVBnbKPDEEXRrLdluWGAo,1731
|
|
12
12
|
prompture/cli.py,sha256=tNiIddRmgC1BomjY5O1VVVAwvqHVzF8IHmQrM-cG2wQ,2902
|
|
13
|
-
prompture/conversation.py,sha256=
|
|
14
|
-
prompture/core.py,sha256=
|
|
15
|
-
prompture/cost_mixin.py,sha256=
|
|
16
|
-
prompture/discovery.py,sha256=
|
|
17
|
-
prompture/driver.py,sha256=
|
|
13
|
+
prompture/conversation.py,sha256=kBflwh7Qmw1I_jcUGyV36oskdVz4SYDSw_dCjemRRRc,32756
|
|
14
|
+
prompture/core.py,sha256=5FHwX7fNPwFHMbFCMvV-RH7LpPpTToLAmcyDnKbrN0E,57202
|
|
15
|
+
prompture/cost_mixin.py,sha256=BR-zd42Tj4K865iRIntXlJEfryUcrd5Tuwcfx89QknE,3547
|
|
16
|
+
prompture/discovery.py,sha256=EWx2d-LJHmlDpm8dlpOicey6XZdDx70ZEetIlOOIlxw,9464
|
|
17
|
+
prompture/driver.py,sha256=wE7K3vnqeCVT5pEEBP-3uZ6e-YyU6TXtnEKRSB25eOc,10410
|
|
18
18
|
prompture/field_definitions.py,sha256=PLvxq2ot-ngJ8JbWkkZ-XLtM1wvjUQ3TL01vSEo-a6E,21368
|
|
19
19
|
prompture/group_types.py,sha256=BxeFV1tI4PTH3xPOie7q3-35ivkTdB9lJUPLH0kPH7A,4731
|
|
20
20
|
prompture/groups.py,sha256=q9lpD57VWw6iQgK9S0nLVidItJZmusJkmpblM4EX9Sc,18349
|
|
21
21
|
prompture/image.py,sha256=3uBxC6blXRNyY5KAJ5MkG6ow8KGAslX8WxM8Is8S8cw,5620
|
|
22
|
+
prompture/ledger.py,sha256=2iXkd9PWiM9WpRCxvnHG1-nwh_IM4mCbxjF4LE92Gzs,8576
|
|
22
23
|
prompture/logging.py,sha256=SkFO26_56Zai05vW8kTq3jvJudfLG2ipI5qNHaXKH3g,2574
|
|
23
|
-
prompture/model_rates.py,sha256=
|
|
24
|
+
prompture/model_rates.py,sha256=w2syZCbYM3DGP978Wopgy0AbmvSQcDm-6ALLBLLrGkg,10482
|
|
24
25
|
prompture/persistence.py,sha256=stcsH9Onth3BlK0QTWDKtXFp3FBmwUS5PI5R1glsIQc,9293
|
|
25
26
|
prompture/persona.py,sha256=SpLW-XPdG0avvJx8uGqJvMRZy65OjzfmJck7qbd28gc,17526
|
|
26
27
|
prompture/runner.py,sha256=lHe2L2jqY1pDXoKNPJALN9lAm-Q8QOY8C8gw-vM9VrM,4213
|
|
@@ -35,29 +36,29 @@ prompture/aio/__init__.py,sha256=bKqTu4Jxld16aP_7SP9wU5au45UBIb041ORo4E4HzVo,181
|
|
|
35
36
|
prompture/drivers/__init__.py,sha256=VuEBZPqaQzXLl_Lvn_c5mRlJJrrlObZCLeHaR8n2eJ4,7050
|
|
36
37
|
prompture/drivers/airllm_driver.py,sha256=SaTh7e7Plvuct_TfRqQvsJsKHvvM_3iVqhBtlciM-Kw,3858
|
|
37
38
|
prompture/drivers/async_airllm_driver.py,sha256=1hIWLXfyyIg9tXaOE22tLJvFyNwHnOi1M5BIKnV8ysk,908
|
|
38
|
-
prompture/drivers/async_azure_driver.py,sha256=
|
|
39
|
-
prompture/drivers/async_claude_driver.py,sha256=
|
|
40
|
-
prompture/drivers/async_google_driver.py,sha256=
|
|
41
|
-
prompture/drivers/async_grok_driver.py,sha256=
|
|
42
|
-
prompture/drivers/async_groq_driver.py,sha256=
|
|
39
|
+
prompture/drivers/async_azure_driver.py,sha256=CFYh4TsI16m7KgAQ_jThJCRw60e_MlHEejDhm7klGH4,4456
|
|
40
|
+
prompture/drivers/async_claude_driver.py,sha256=oawbFVVMtRlikQOmu3jRjbdpoeu95JqTF1YHLKO3ybE,10576
|
|
41
|
+
prompture/drivers/async_google_driver.py,sha256=LTUgCXJjzuTDGzsCsmY2-xH2KdTLJD7htwO49ZNFOdE,13711
|
|
42
|
+
prompture/drivers/async_grok_driver.py,sha256=s3bXEGhVrMyw10CowkBhs5522mhipWJyWWu-xVixzyg,3538
|
|
43
|
+
prompture/drivers/async_groq_driver.py,sha256=pjAh_bgZWSWaNSm5XrU-u3gRV6YSGwNG5NfAbkYeJ84,3067
|
|
43
44
|
prompture/drivers/async_hugging_driver.py,sha256=IblxqU6TpNUiigZ0BCgNkAgzpUr2FtPHJOZnOZMnHF0,2152
|
|
44
45
|
prompture/drivers/async_lmstudio_driver.py,sha256=rPn2qVPm6UE2APzAn7ZHYTELUwr0dQMi8XHv6gAhyH8,5782
|
|
45
46
|
prompture/drivers/async_local_http_driver.py,sha256=qoigIf-w3_c2dbVdM6m1e2RMAWP4Gk4VzVs5hM3lPvQ,1609
|
|
46
47
|
prompture/drivers/async_ollama_driver.py,sha256=FaSXtFXrgeVHIe0b90Vg6rGeSTWLpPnjaThh9Ai7qQo,5042
|
|
47
|
-
prompture/drivers/async_openai_driver.py,sha256=
|
|
48
|
-
prompture/drivers/async_openrouter_driver.py,sha256=
|
|
48
|
+
prompture/drivers/async_openai_driver.py,sha256=mv0_H2ZQFm96xfDL1oFz3qRhB9v-whv48dwvE0b02dA,8956
|
|
49
|
+
prompture/drivers/async_openrouter_driver.py,sha256=pMenRxnRnJlx5lR25qejlsAzt6wGPBr10L85wLYKncI,3781
|
|
49
50
|
prompture/drivers/async_registry.py,sha256=syervbb7THneJ-NUVSuxy4cnxGW6VuNzKv-Aqqn2ysU,4329
|
|
50
|
-
prompture/drivers/azure_driver.py,sha256=
|
|
51
|
-
prompture/drivers/claude_driver.py,sha256=
|
|
52
|
-
prompture/drivers/google_driver.py,sha256=
|
|
53
|
-
prompture/drivers/grok_driver.py,sha256=
|
|
54
|
-
prompture/drivers/groq_driver.py,sha256=
|
|
51
|
+
prompture/drivers/azure_driver.py,sha256=bcfYxfkIbfxqopr_O6sbhdtk4PLl7t-4gbUL0OoMeM0,5710
|
|
52
|
+
prompture/drivers/claude_driver.py,sha256=C8Av3DXP2x3f35jEv8BRwEM_4vh0cfmLsy3t5dsR6aM,11837
|
|
53
|
+
prompture/drivers/google_driver.py,sha256=Zck5VUsW37kDgohXz3cUWRmZ88OfhmTpVD-qzAVMp-8,16318
|
|
54
|
+
prompture/drivers/grok_driver.py,sha256=CzAXKAbbWmbE8qLFZxxoEhf4Qzbtc9YqDX7kkCsE4dk,5320
|
|
55
|
+
prompture/drivers/groq_driver.py,sha256=61LKHhYyRiFkHKbLKFYX10fqjpL_INtPY_Zeb55AV0o,4221
|
|
55
56
|
prompture/drivers/hugging_driver.py,sha256=gZir3XnM77VfYIdnu3S1pRftlZJM6G3L8bgGn5esg-Q,2346
|
|
56
57
|
prompture/drivers/lmstudio_driver.py,sha256=9ZnJ1l5LuWAjkH2WKfFjZprNMVIXoSC7qXDNDTxm-tA,6748
|
|
57
58
|
prompture/drivers/local_http_driver.py,sha256=QJgEf9kAmy8YZ5fb8FHnWuhoDoZYNd8at4jegzNVJH0,1658
|
|
58
59
|
prompture/drivers/ollama_driver.py,sha256=k9xeUwFp91OrDbjkbYI-F8CDFy5ew-zQ0btXqwbXXWM,10220
|
|
59
|
-
prompture/drivers/openai_driver.py,sha256=
|
|
60
|
-
prompture/drivers/openrouter_driver.py,sha256=
|
|
60
|
+
prompture/drivers/openai_driver.py,sha256=WJ2LnSttq0FvrRzEeweAxzigv3qu_BYvpXv7PSVRZSI,10460
|
|
61
|
+
prompture/drivers/openrouter_driver.py,sha256=J7SMZXH-nK_J9H-GVuYMtJMYuK_2kZcDSmOpBipieNI,5440
|
|
61
62
|
prompture/drivers/registry.py,sha256=Dg_5w9alnIPKhOnsR9Xspuf5T7roBGu0r_L2Cf-UhXs,9926
|
|
62
63
|
prompture/drivers/vision_helpers.py,sha256=l5iYXHJLR_vLFvqDPPPK1QqK7YPKh5GwocpbSyt0R04,5403
|
|
63
64
|
prompture/scaffold/__init__.py,sha256=aitUxBV0MpjC7Od3iG8WUzcC7tGPXSt3oMzUBX8UDwQ,60
|
|
@@ -69,9 +70,9 @@ prompture/scaffold/templates/env.example.j2,sha256=eESKr1KWgyrczO6d-nwAhQwSpf_G-
|
|
|
69
70
|
prompture/scaffold/templates/main.py.j2,sha256=TEgc5OvsZOEX0JthkSW1NI_yLwgoeVN_x97Ibg-vyWY,2632
|
|
70
71
|
prompture/scaffold/templates/models.py.j2,sha256=JrZ99GCVK6TKWapskVRSwCssGrTu5cGZ_r46fOhY2GE,858
|
|
71
72
|
prompture/scaffold/templates/requirements.txt.j2,sha256=m3S5fi1hq9KG9l_9j317rjwWww0a43WMKd8VnUWv2A4,102
|
|
72
|
-
prompture-0.0.
|
|
73
|
-
prompture-0.0.
|
|
74
|
-
prompture-0.0.
|
|
75
|
-
prompture-0.0.
|
|
76
|
-
prompture-0.0.
|
|
77
|
-
prompture-0.0.
|
|
73
|
+
prompture-0.0.40.dev1.dist-info/licenses/LICENSE,sha256=0HgDepH7aaHNFhHF-iXuW6_GqDfYPnVkjtiCAZ4yS8I,1060
|
|
74
|
+
prompture-0.0.40.dev1.dist-info/METADATA,sha256=0GXp_XMAxefYsTblXEtrbwi_HaKMKHDBeqZzN9gcQW4,10842
|
|
75
|
+
prompture-0.0.40.dev1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
76
|
+
prompture-0.0.40.dev1.dist-info/entry_points.txt,sha256=AFPG3lJR86g4IJMoWQUW5Ph7G6MLNWG3A2u2Tp9zkp8,48
|
|
77
|
+
prompture-0.0.40.dev1.dist-info/top_level.txt,sha256=to86zq_kjfdoLeAxQNr420UWqT0WzkKoZ509J7Qr2t4,10
|
|
78
|
+
prompture-0.0.40.dev1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|