prompture 0.0.35__py3-none-any.whl → 0.0.40.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. prompture/__init__.py +132 -3
  2. prompture/_version.py +2 -2
  3. prompture/agent.py +924 -0
  4. prompture/agent_types.py +156 -0
  5. prompture/async_agent.py +880 -0
  6. prompture/async_conversation.py +208 -17
  7. prompture/async_core.py +16 -0
  8. prompture/async_driver.py +63 -0
  9. prompture/async_groups.py +551 -0
  10. prompture/conversation.py +222 -18
  11. prompture/core.py +46 -12
  12. prompture/cost_mixin.py +37 -0
  13. prompture/discovery.py +132 -44
  14. prompture/driver.py +77 -0
  15. prompture/drivers/__init__.py +5 -1
  16. prompture/drivers/async_azure_driver.py +11 -5
  17. prompture/drivers/async_claude_driver.py +184 -9
  18. prompture/drivers/async_google_driver.py +222 -28
  19. prompture/drivers/async_grok_driver.py +11 -5
  20. prompture/drivers/async_groq_driver.py +11 -5
  21. prompture/drivers/async_lmstudio_driver.py +74 -5
  22. prompture/drivers/async_ollama_driver.py +13 -3
  23. prompture/drivers/async_openai_driver.py +162 -5
  24. prompture/drivers/async_openrouter_driver.py +11 -5
  25. prompture/drivers/async_registry.py +5 -1
  26. prompture/drivers/azure_driver.py +10 -4
  27. prompture/drivers/claude_driver.py +17 -1
  28. prompture/drivers/google_driver.py +227 -33
  29. prompture/drivers/grok_driver.py +11 -5
  30. prompture/drivers/groq_driver.py +11 -5
  31. prompture/drivers/lmstudio_driver.py +73 -8
  32. prompture/drivers/ollama_driver.py +16 -5
  33. prompture/drivers/openai_driver.py +26 -11
  34. prompture/drivers/openrouter_driver.py +11 -5
  35. prompture/drivers/vision_helpers.py +153 -0
  36. prompture/group_types.py +147 -0
  37. prompture/groups.py +530 -0
  38. prompture/image.py +180 -0
  39. prompture/ledger.py +252 -0
  40. prompture/model_rates.py +112 -2
  41. prompture/persistence.py +254 -0
  42. prompture/persona.py +482 -0
  43. prompture/serialization.py +218 -0
  44. prompture/settings.py +1 -0
  45. prompture-0.0.40.dev1.dist-info/METADATA +369 -0
  46. prompture-0.0.40.dev1.dist-info/RECORD +78 -0
  47. prompture-0.0.35.dist-info/METADATA +0 -464
  48. prompture-0.0.35.dist-info/RECORD +0 -66
  49. {prompture-0.0.35.dist-info → prompture-0.0.40.dev1.dist-info}/WHEEL +0 -0
  50. {prompture-0.0.35.dist-info → prompture-0.0.40.dev1.dist-info}/entry_points.txt +0 -0
  51. {prompture-0.0.35.dist-info → prompture-0.0.40.dev1.dist-info}/licenses/LICENSE +0 -0
  52. {prompture-0.0.35.dist-info → prompture-0.0.40.dev1.dist-info}/top_level.txt +0 -0
prompture/settings.py CHANGED
@@ -33,6 +33,7 @@ class Settings(BaseSettings):
33
33
  # LM Studio
34
34
  lmstudio_endpoint: str = "http://127.0.0.1:1234/v1/chat/completions"
35
35
  lmstudio_model: str = "deepseek/deepseek-r1-0528-qwen3-8b"
36
+ lmstudio_api_key: Optional[str] = None
36
37
 
37
38
  # Google
38
39
  google_api_key: Optional[str] = None
@@ -0,0 +1,369 @@
1
+ Metadata-Version: 2.4
2
+ Name: prompture
3
+ Version: 0.0.40.dev1
4
+ Summary: Ask LLMs to return structured JSON and run cross-model tests. API-first.
5
+ Author-email: Juan Denis <juan@vene.co>
6
+ License-Expression: MIT
7
+ Project-URL: Homepage, https://github.com/jhd3197/prompture
8
+ Classifier: Programming Language :: Python :: 3
9
+ Classifier: Operating System :: OS Independent
10
+ Requires-Python: >=3.9
11
+ Description-Content-Type: text/markdown
12
+ License-File: LICENSE
13
+ Requires-Dist: anthropic>=0.8.0
14
+ Requires-Dist: click>=8.0
15
+ Requires-Dist: google-generativeai>=0.3.0
16
+ Requires-Dist: groq>=0.4.0
17
+ Requires-Dist: httpx>=0.25.0
18
+ Requires-Dist: jsonschema>=4.0
19
+ Requires-Dist: openai>=1.0.0
20
+ Requires-Dist: pandas>=1.3.0
21
+ Requires-Dist: pydantic>=1.10
22
+ Requires-Dist: pydantic-settings>=2.0
23
+ Requires-Dist: python-dotenv>=0.19.0
24
+ Requires-Dist: python-toon>=0.1.0
25
+ Requires-Dist: requests>=2.28
26
+ Requires-Dist: python-dateutil>=2.9.0
27
+ Requires-Dist: tukuy>=0.0.6
28
+ Requires-Dist: pyyaml>=6.0
29
+ Provides-Extra: test
30
+ Requires-Dist: pytest>=7.0; extra == "test"
31
+ Requires-Dist: pytest-asyncio>=0.23.0; extra == "test"
32
+ Provides-Extra: dev
33
+ Requires-Dist: pytest>=7.0; extra == "dev"
34
+ Requires-Dist: pytest-asyncio>=0.23.0; extra == "dev"
35
+ Requires-Dist: ruff>=0.8.0; extra == "dev"
36
+ Provides-Extra: airllm
37
+ Requires-Dist: airllm>=2.8.0; extra == "airllm"
38
+ Provides-Extra: redis
39
+ Requires-Dist: redis>=4.0; extra == "redis"
40
+ Provides-Extra: serve
41
+ Requires-Dist: fastapi>=0.100; extra == "serve"
42
+ Requires-Dist: uvicorn[standard]>=0.20; extra == "serve"
43
+ Requires-Dist: sse-starlette>=1.6; extra == "serve"
44
+ Provides-Extra: scaffold
45
+ Requires-Dist: jinja2>=3.0; extra == "scaffold"
46
+ Dynamic: license-file
47
+
48
+ <p align="center">
49
+ <h1 align="center">Prompture</h1>
50
+ <p align="center">Structured JSON extraction from any LLM. Schema-enforced, Pydantic-native, multi-provider.</p>
51
+ </p>
52
+
53
+ <p align="center">
54
+ <a href="https://pypi.org/project/prompture/"><img src="https://badge.fury.io/py/prompture.svg" alt="PyPI version"></a>
55
+ <a href="https://pypi.org/project/prompture/"><img src="https://img.shields.io/pypi/pyversions/prompture.svg" alt="Python versions"></a>
56
+ <a href="https://opensource.org/licenses/MIT"><img src="https://img.shields.io/badge/License-MIT-blue.svg" alt="License: MIT"></a>
57
+ <a href="https://pepy.tech/project/prompture"><img src="https://static.pepy.tech/badge/prompture" alt="Downloads"></a>
58
+ <a href="https://github.com/jhd3197/prompture"><img src="https://img.shields.io/github/stars/jhd3197/prompture?style=social" alt="GitHub stars"></a>
59
+ </p>
60
+
61
+ ---
62
+
63
+ **Prompture** is a Python library that turns LLM responses into validated, structured data. Define a schema or Pydantic model, point it at any provider, and get typed output back — with token tracking, cost calculation, and automatic JSON repair built in.
64
+
65
+ ```python
66
+ from pydantic import BaseModel
67
+ from prompture import extract_with_model
68
+
69
+ class Person(BaseModel):
70
+ name: str
71
+ age: int
72
+ profession: str
73
+
74
+ person = extract_with_model(Person, "Maria is 32, a developer in NYC.", model_name="openai/gpt-4")
75
+ print(person.name) # Maria
76
+ ```
77
+
78
+ ## Key Features
79
+
80
+ - **Structured output** — JSON schema enforcement and direct Pydantic model population
81
+ - **12 providers** — OpenAI, Claude, Google, Groq, Grok, Azure, Ollama, LM Studio, OpenRouter, HuggingFace, AirLLM, and generic HTTP
82
+ - **TOON input conversion** — 45-60% token savings when sending structured data via [Token-Oriented Object Notation](https://github.com/jhd3197/python-toon)
83
+ - **Stepwise extraction** — Per-field prompts with smart type coercion (shorthand numbers, multilingual booleans, dates)
84
+ - **Field registry** — 50+ predefined extraction fields with template variables and Pydantic integration
85
+ - **Conversations** — Stateful multi-turn sessions with sync and async support
86
+ - **Tool use** — Function calling and streaming across supported providers
87
+ - **Caching** — Built-in response cache with memory, SQLite, and Redis backends
88
+ - **Plugin system** — Register custom drivers via entry points
89
+ - **Usage tracking** — Token counts and cost calculation on every call
90
+ - **Auto-repair** — Optional second LLM pass to fix malformed JSON
91
+ - **Batch testing** — Spec-driven suites to compare models side by side
92
+
93
+ ## Installation
94
+
95
+ ```bash
96
+ pip install prompture
97
+ ```
98
+
99
+ Optional extras:
100
+
101
+ ```bash
102
+ pip install prompture[redis] # Redis cache backend
103
+ pip install prompture[serve] # FastAPI server mode
104
+ pip install prompture[airllm] # AirLLM local inference
105
+ ```
106
+
107
+ ## Configuration
108
+
109
+ Set API keys for the providers you use. Prompture reads from environment variables or a `.env` file:
110
+
111
+ ```bash
112
+ OPENAI_API_KEY=sk-...
113
+ ANTHROPIC_API_KEY=sk-ant-...
114
+ GOOGLE_API_KEY=...
115
+ GROQ_API_KEY=...
116
+ GROK_API_KEY=...
117
+ OPENROUTER_API_KEY=...
118
+ AZURE_OPENAI_ENDPOINT=...
119
+ AZURE_OPENAI_API_KEY=...
120
+ ```
121
+
122
+ Local providers (Ollama, LM Studio) work out of the box with no keys required.
123
+
124
+ ## Providers
125
+
126
+ Model strings use `"provider/model"` format. The provider prefix routes to the correct driver automatically.
127
+
128
+ | Provider | Example Model | Cost |
129
+ |---|---|---|
130
+ | `openai` | `openai/gpt-4` | Automatic |
131
+ | `claude` | `claude/claude-3` | Automatic |
132
+ | `google` | `google/gemini-1.5-pro` | Automatic |
133
+ | `groq` | `groq/llama2-70b-4096` | Automatic |
134
+ | `grok` | `grok/grok-4-fast-reasoning` | Automatic |
135
+ | `azure` | `azure/deployed-name` | Automatic |
136
+ | `openrouter` | `openrouter/anthropic/claude-2` | Automatic |
137
+ | `ollama` | `ollama/llama3.1:8b` | Free (local) |
138
+ | `lmstudio` | `lmstudio/local-model` | Free (local) |
139
+ | `huggingface` | `hf/model-name` | Free (local) |
140
+ | `http` | `http/self-hosted` | Free |
141
+
142
+ ## Usage
143
+
144
+ ### One-Shot Pydantic Extraction
145
+
146
+ Single LLM call, returns a validated Pydantic instance:
147
+
148
+ ```python
149
+ from typing import List, Optional
150
+ from pydantic import BaseModel
151
+ from prompture import extract_with_model
152
+
153
+ class Person(BaseModel):
154
+ name: str
155
+ age: int
156
+ profession: str
157
+ city: str
158
+ hobbies: List[str]
159
+ education: Optional[str] = None
160
+
161
+ person = extract_with_model(
162
+ Person,
163
+ "Maria is 32, a software developer in New York. She loves hiking and photography.",
164
+ model_name="openai/gpt-4"
165
+ )
166
+ print(person.model_dump())
167
+ ```
168
+
169
+ ### Stepwise Extraction
170
+
171
+ One LLM call per field. Higher accuracy, per-field error recovery:
172
+
173
+ ```python
174
+ from prompture import stepwise_extract_with_model
175
+
176
+ result = stepwise_extract_with_model(
177
+ Person,
178
+ "Maria is 32, a software developer in New York. She loves hiking and photography.",
179
+ model_name="openai/gpt-4"
180
+ )
181
+ print(result["model"].model_dump())
182
+ print(result["usage"]) # per-field and total token usage
183
+ ```
184
+
185
+ | Aspect | `extract_with_model` | `stepwise_extract_with_model` |
186
+ |---|---|---|
187
+ | LLM calls | 1 | N (one per field) |
188
+ | Speed / cost | Faster, cheaper | Slower, higher |
189
+ | Accuracy | Good global coherence | Higher per-field accuracy |
190
+ | Error handling | All-or-nothing | Per-field recovery |
191
+
192
+ ### JSON Schema Extraction
193
+
194
+ For raw JSON output with full control:
195
+
196
+ ```python
197
+ from prompture import ask_for_json
198
+
199
+ schema = {
200
+ "type": "object",
201
+ "required": ["name", "age"],
202
+ "properties": {
203
+ "name": {"type": "string"},
204
+ "age": {"type": "integer"}
205
+ }
206
+ }
207
+
208
+ result = ask_for_json(
209
+ content_prompt="Extract the person's info from: John is 28 and lives in Miami.",
210
+ json_schema=schema,
211
+ model_name="openai/gpt-4"
212
+ )
213
+ print(result["json_object"]) # {"name": "John", "age": 28}
214
+ print(result["usage"]) # token counts and cost
215
+ ```
216
+
217
+ ### TOON Input — Token Savings
218
+
219
+ Analyze structured data with automatic TOON conversion for 45-60% fewer tokens:
220
+
221
+ ```python
222
+ from prompture import extract_from_data
223
+
224
+ products = [
225
+ {"id": 1, "name": "Laptop", "price": 999.99, "rating": 4.5},
226
+ {"id": 2, "name": "Book", "price": 19.99, "rating": 4.2},
227
+ {"id": 3, "name": "Headphones", "price": 149.99, "rating": 4.7},
228
+ ]
229
+
230
+ result = extract_from_data(
231
+ data=products,
232
+ question="What is the average price and highest rated product?",
233
+ json_schema={
234
+ "type": "object",
235
+ "properties": {
236
+ "average_price": {"type": "number"},
237
+ "highest_rated": {"type": "string"}
238
+ }
239
+ },
240
+ model_name="openai/gpt-4"
241
+ )
242
+
243
+ print(result["json_object"])
244
+ # {"average_price": 389.99, "highest_rated": "Headphones"}
245
+
246
+ print(f"Token savings: {result['token_savings']['percentage_saved']}%")
247
+ ```
248
+
249
+ Works with Pandas DataFrames via `extract_from_pandas()`.
250
+
251
+ ### Field Definitions
252
+
253
+ Use the built-in field registry for consistent extraction across models:
254
+
255
+ ```python
256
+ from pydantic import BaseModel
257
+ from prompture import field_from_registry, stepwise_extract_with_model
258
+
259
+ class Person(BaseModel):
260
+ name: str = field_from_registry("name")
261
+ age: int = field_from_registry("age")
262
+ email: str = field_from_registry("email")
263
+ occupation: str = field_from_registry("occupation")
264
+
265
+ result = stepwise_extract_with_model(
266
+ Person,
267
+ "John Smith, 25, software engineer at TechCorp, john@example.com",
268
+ model_name="openai/gpt-4"
269
+ )
270
+ ```
271
+
272
+ Register custom fields with template variables:
273
+
274
+ ```python
275
+ from prompture import register_field
276
+
277
+ register_field("document_date", {
278
+ "type": "str",
279
+ "description": "Document creation date",
280
+ "instructions": "Use {{current_date}} if not specified",
281
+ "default": "{{current_date}}",
282
+ "nullable": False
283
+ })
284
+ ```
285
+
286
+ ### Conversations
287
+
288
+ Stateful multi-turn sessions:
289
+
290
+ ```python
291
+ from prompture import Conversation
292
+
293
+ conv = Conversation(model_name="openai/gpt-4")
294
+ conv.add_message("system", "You are a helpful assistant.")
295
+ response = conv.send("What is the capital of France?")
296
+ follow_up = conv.send("What about Germany?") # retains context
297
+ ```
298
+
299
+ ### Model Discovery
300
+
301
+ Auto-detect available models from configured providers:
302
+
303
+ ```python
304
+ from prompture import get_available_models
305
+
306
+ models = get_available_models()
307
+ for model in models:
308
+ print(model) # "openai/gpt-4", "ollama/llama3:latest", ...
309
+ ```
310
+
311
+ ### Logging and Debugging
312
+
313
+ ```python
314
+ import logging
315
+ from prompture import configure_logging
316
+
317
+ configure_logging(logging.DEBUG)
318
+ ```
319
+
320
+ ### Response Shape
321
+
322
+ All extraction functions return a consistent structure:
323
+
324
+ ```python
325
+ {
326
+ "json_string": str, # raw JSON text
327
+ "json_object": dict, # parsed result
328
+ "usage": {
329
+ "prompt_tokens": int,
330
+ "completion_tokens": int,
331
+ "total_tokens": int,
332
+ "cost": float,
333
+ "model_name": str
334
+ }
335
+ }
336
+ ```
337
+
338
+ ## CLI
339
+
340
+ ```bash
341
+ prompture run <spec-file>
342
+ ```
343
+
344
+ Run spec-driven extraction suites for cross-model comparison.
345
+
346
+ ## Development
347
+
348
+ ```bash
349
+ # Install with dev dependencies
350
+ pip install -e ".[test,dev]"
351
+
352
+ # Run tests
353
+ pytest
354
+
355
+ # Run integration tests (requires live LLM access)
356
+ pytest --run-integration
357
+
358
+ # Lint and format
359
+ ruff check .
360
+ ruff format .
361
+ ```
362
+
363
+ ## Contributing
364
+
365
+ PRs welcome. Please add tests for new functionality and examples under `examples/` for new drivers or patterns.
366
+
367
+ ## License
368
+
369
+ [MIT](https://opensource.org/licenses/MIT)
@@ -0,0 +1,78 @@
1
+ prompture/__init__.py,sha256=cJnkefDpiyFbU77juw4tXPdKJQWoJ-c6XBFt2v-e5Q4,7455
2
+ prompture/_version.py,sha256=0--ZUdBaCZlj76yYZsjkHfNkwola4VJybJ6Yx9HtHNA,719
3
+ prompture/agent.py,sha256=xe_yFHGDzTxaU4tmaLt5AQnzrN0I72hBGwGVrCxg2D0,34704
4
+ prompture/agent_types.py,sha256=Icl16PQI-ThGLMFCU43adtQA6cqETbsPn4KssKBI4xc,4664
5
+ prompture/async_agent.py,sha256=nOLOQCNkg0sKKTpryIiidmIcAAlA3FR2NfnZwrNBuCg,33066
6
+ prompture/async_conversation.py,sha256=m9sdKBu1wxo5veGwO6g6Zvf1sBzpuxP-mSIEeNKlBjQ,31155
7
+ prompture/async_core.py,sha256=hbRXLvsBJv3JAnUwGZbazsL6x022FrsJU6swmZolgxY,29745
8
+ prompture/async_driver.py,sha256=4VQ9Q_tI6Ufw6W1CYJ5j8hVtgVdqFGuk6e2tLaSceWE,8581
9
+ prompture/async_groups.py,sha256=8B383EF_qI9NzcG9zljLKjIZ_37bpNivvsmfJQoOGRk,19894
10
+ prompture/cache.py,sha256=4dfQDMsEZ9JMQDXLOkiugPmmMJQIfKVE8rTAKDH4oL8,14401
11
+ prompture/callbacks.py,sha256=JPDqWGzPIzv44l54ocmezlYVBnbKPDEEXRrLdluWGAo,1731
12
+ prompture/cli.py,sha256=tNiIddRmgC1BomjY5O1VVVAwvqHVzF8IHmQrM-cG2wQ,2902
13
+ prompture/conversation.py,sha256=kBflwh7Qmw1I_jcUGyV36oskdVz4SYDSw_dCjemRRRc,32756
14
+ prompture/core.py,sha256=5FHwX7fNPwFHMbFCMvV-RH7LpPpTToLAmcyDnKbrN0E,57202
15
+ prompture/cost_mixin.py,sha256=BR-zd42Tj4K865iRIntXlJEfryUcrd5Tuwcfx89QknE,3547
16
+ prompture/discovery.py,sha256=EWx2d-LJHmlDpm8dlpOicey6XZdDx70ZEetIlOOIlxw,9464
17
+ prompture/driver.py,sha256=wE7K3vnqeCVT5pEEBP-3uZ6e-YyU6TXtnEKRSB25eOc,10410
18
+ prompture/field_definitions.py,sha256=PLvxq2ot-ngJ8JbWkkZ-XLtM1wvjUQ3TL01vSEo-a6E,21368
19
+ prompture/group_types.py,sha256=BxeFV1tI4PTH3xPOie7q3-35ivkTdB9lJUPLH0kPH7A,4731
20
+ prompture/groups.py,sha256=q9lpD57VWw6iQgK9S0nLVidItJZmusJkmpblM4EX9Sc,18349
21
+ prompture/image.py,sha256=3uBxC6blXRNyY5KAJ5MkG6ow8KGAslX8WxM8Is8S8cw,5620
22
+ prompture/ledger.py,sha256=2iXkd9PWiM9WpRCxvnHG1-nwh_IM4mCbxjF4LE92Gzs,8576
23
+ prompture/logging.py,sha256=SkFO26_56Zai05vW8kTq3jvJudfLG2ipI5qNHaXKH3g,2574
24
+ prompture/model_rates.py,sha256=w2syZCbYM3DGP978Wopgy0AbmvSQcDm-6ALLBLLrGkg,10482
25
+ prompture/persistence.py,sha256=stcsH9Onth3BlK0QTWDKtXFp3FBmwUS5PI5R1glsIQc,9293
26
+ prompture/persona.py,sha256=SpLW-XPdG0avvJx8uGqJvMRZy65OjzfmJck7qbd28gc,17526
27
+ prompture/runner.py,sha256=lHe2L2jqY1pDXoKNPJALN9lAm-Q8QOY8C8gw-vM9VrM,4213
28
+ prompture/serialization.py,sha256=m4cdAQJspitMcfwRgecElkY2SBt3BjEwubbhS3W-0s0,7433
29
+ prompture/server.py,sha256=W6Kn6Et8nG5twXjD2wKn_N9yplGjz5Z-2naeI_UPd1Y,6198
30
+ prompture/session.py,sha256=FldK3cKq_jO0-beukVOhIiwsYWb6U_lLBlAERx95aaM,3821
31
+ prompture/settings.py,sha256=ADtYS8N5XYILI4TXrYTDOQo3pkDNSEVgn9ab4oMHkO4,2019
32
+ prompture/tools.py,sha256=PmFbGHTWYWahpJOG6BLlM0Y-EG6S37IFW57C-8GdsXo,36449
33
+ prompture/tools_schema.py,sha256=JVc0dxC4aIHIUlgE8yFCcn1gPzJ3unTMVmZ8Ec04aD0,7764
34
+ prompture/validator.py,sha256=FY_VjIVEbjG2nwzh-r6l23Kt3UzaLyCis8_pZMNGHBA,993
35
+ prompture/aio/__init__.py,sha256=bKqTu4Jxld16aP_7SP9wU5au45UBIb041ORo4E4HzVo,1810
36
+ prompture/drivers/__init__.py,sha256=VuEBZPqaQzXLl_Lvn_c5mRlJJrrlObZCLeHaR8n2eJ4,7050
37
+ prompture/drivers/airllm_driver.py,sha256=SaTh7e7Plvuct_TfRqQvsJsKHvvM_3iVqhBtlciM-Kw,3858
38
+ prompture/drivers/async_airllm_driver.py,sha256=1hIWLXfyyIg9tXaOE22tLJvFyNwHnOi1M5BIKnV8ysk,908
39
+ prompture/drivers/async_azure_driver.py,sha256=CFYh4TsI16m7KgAQ_jThJCRw60e_MlHEejDhm7klGH4,4456
40
+ prompture/drivers/async_claude_driver.py,sha256=oawbFVVMtRlikQOmu3jRjbdpoeu95JqTF1YHLKO3ybE,10576
41
+ prompture/drivers/async_google_driver.py,sha256=LTUgCXJjzuTDGzsCsmY2-xH2KdTLJD7htwO49ZNFOdE,13711
42
+ prompture/drivers/async_grok_driver.py,sha256=s3bXEGhVrMyw10CowkBhs5522mhipWJyWWu-xVixzyg,3538
43
+ prompture/drivers/async_groq_driver.py,sha256=pjAh_bgZWSWaNSm5XrU-u3gRV6YSGwNG5NfAbkYeJ84,3067
44
+ prompture/drivers/async_hugging_driver.py,sha256=IblxqU6TpNUiigZ0BCgNkAgzpUr2FtPHJOZnOZMnHF0,2152
45
+ prompture/drivers/async_lmstudio_driver.py,sha256=rPn2qVPm6UE2APzAn7ZHYTELUwr0dQMi8XHv6gAhyH8,5782
46
+ prompture/drivers/async_local_http_driver.py,sha256=qoigIf-w3_c2dbVdM6m1e2RMAWP4Gk4VzVs5hM3lPvQ,1609
47
+ prompture/drivers/async_ollama_driver.py,sha256=FaSXtFXrgeVHIe0b90Vg6rGeSTWLpPnjaThh9Ai7qQo,5042
48
+ prompture/drivers/async_openai_driver.py,sha256=mv0_H2ZQFm96xfDL1oFz3qRhB9v-whv48dwvE0b02dA,8956
49
+ prompture/drivers/async_openrouter_driver.py,sha256=pMenRxnRnJlx5lR25qejlsAzt6wGPBr10L85wLYKncI,3781
50
+ prompture/drivers/async_registry.py,sha256=syervbb7THneJ-NUVSuxy4cnxGW6VuNzKv-Aqqn2ysU,4329
51
+ prompture/drivers/azure_driver.py,sha256=bcfYxfkIbfxqopr_O6sbhdtk4PLl7t-4gbUL0OoMeM0,5710
52
+ prompture/drivers/claude_driver.py,sha256=C8Av3DXP2x3f35jEv8BRwEM_4vh0cfmLsy3t5dsR6aM,11837
53
+ prompture/drivers/google_driver.py,sha256=Zck5VUsW37kDgohXz3cUWRmZ88OfhmTpVD-qzAVMp-8,16318
54
+ prompture/drivers/grok_driver.py,sha256=CzAXKAbbWmbE8qLFZxxoEhf4Qzbtc9YqDX7kkCsE4dk,5320
55
+ prompture/drivers/groq_driver.py,sha256=61LKHhYyRiFkHKbLKFYX10fqjpL_INtPY_Zeb55AV0o,4221
56
+ prompture/drivers/hugging_driver.py,sha256=gZir3XnM77VfYIdnu3S1pRftlZJM6G3L8bgGn5esg-Q,2346
57
+ prompture/drivers/lmstudio_driver.py,sha256=9ZnJ1l5LuWAjkH2WKfFjZprNMVIXoSC7qXDNDTxm-tA,6748
58
+ prompture/drivers/local_http_driver.py,sha256=QJgEf9kAmy8YZ5fb8FHnWuhoDoZYNd8at4jegzNVJH0,1658
59
+ prompture/drivers/ollama_driver.py,sha256=k9xeUwFp91OrDbjkbYI-F8CDFy5ew-zQ0btXqwbXXWM,10220
60
+ prompture/drivers/openai_driver.py,sha256=WJ2LnSttq0FvrRzEeweAxzigv3qu_BYvpXv7PSVRZSI,10460
61
+ prompture/drivers/openrouter_driver.py,sha256=J7SMZXH-nK_J9H-GVuYMtJMYuK_2kZcDSmOpBipieNI,5440
62
+ prompture/drivers/registry.py,sha256=Dg_5w9alnIPKhOnsR9Xspuf5T7roBGu0r_L2Cf-UhXs,9926
63
+ prompture/drivers/vision_helpers.py,sha256=l5iYXHJLR_vLFvqDPPPK1QqK7YPKh5GwocpbSyt0R04,5403
64
+ prompture/scaffold/__init__.py,sha256=aitUxBV0MpjC7Od3iG8WUzcC7tGPXSt3oMzUBX8UDwQ,60
65
+ prompture/scaffold/generator.py,sha256=5QTHdWEXB7ADqOttfU7NgoxuaofNQnObzzI7NIPWFgo,2387
66
+ prompture/scaffold/templates/Dockerfile.j2,sha256=ukox6eVzQMVw-hAaFmNRL5HTrXw2Z0RB6g-vvbMVeu8,207
67
+ prompture/scaffold/templates/README.md.j2,sha256=xFgKnEP_JmLiiwD1QahNWdyKC85smyhOiIBCmN2U3y0,935
68
+ prompture/scaffold/templates/config.py.j2,sha256=q1LOnLlGzgJHPQz5geZ2AvrB-DskkLzay_CSj26hthE,529
69
+ prompture/scaffold/templates/env.example.j2,sha256=eESKr1KWgyrczO6d-nwAhQwSpf_G-T6P9gmHMhR9Sqc,246
70
+ prompture/scaffold/templates/main.py.j2,sha256=TEgc5OvsZOEX0JthkSW1NI_yLwgoeVN_x97Ibg-vyWY,2632
71
+ prompture/scaffold/templates/models.py.j2,sha256=JrZ99GCVK6TKWapskVRSwCssGrTu5cGZ_r46fOhY2GE,858
72
+ prompture/scaffold/templates/requirements.txt.j2,sha256=m3S5fi1hq9KG9l_9j317rjwWww0a43WMKd8VnUWv2A4,102
73
+ prompture-0.0.40.dev1.dist-info/licenses/LICENSE,sha256=0HgDepH7aaHNFhHF-iXuW6_GqDfYPnVkjtiCAZ4yS8I,1060
74
+ prompture-0.0.40.dev1.dist-info/METADATA,sha256=0GXp_XMAxefYsTblXEtrbwi_HaKMKHDBeqZzN9gcQW4,10842
75
+ prompture-0.0.40.dev1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
76
+ prompture-0.0.40.dev1.dist-info/entry_points.txt,sha256=AFPG3lJR86g4IJMoWQUW5Ph7G6MLNWG3A2u2Tp9zkp8,48
77
+ prompture-0.0.40.dev1.dist-info/top_level.txt,sha256=to86zq_kjfdoLeAxQNr420UWqT0WzkKoZ509J7Qr2t4,10
78
+ prompture-0.0.40.dev1.dist-info/RECORD,,