prompture 0.0.35__py3-none-any.whl → 0.0.40.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. prompture/__init__.py +132 -3
  2. prompture/_version.py +2 -2
  3. prompture/agent.py +924 -0
  4. prompture/agent_types.py +156 -0
  5. prompture/async_agent.py +880 -0
  6. prompture/async_conversation.py +208 -17
  7. prompture/async_core.py +16 -0
  8. prompture/async_driver.py +63 -0
  9. prompture/async_groups.py +551 -0
  10. prompture/conversation.py +222 -18
  11. prompture/core.py +46 -12
  12. prompture/cost_mixin.py +37 -0
  13. prompture/discovery.py +132 -44
  14. prompture/driver.py +77 -0
  15. prompture/drivers/__init__.py +5 -1
  16. prompture/drivers/async_azure_driver.py +11 -5
  17. prompture/drivers/async_claude_driver.py +184 -9
  18. prompture/drivers/async_google_driver.py +222 -28
  19. prompture/drivers/async_grok_driver.py +11 -5
  20. prompture/drivers/async_groq_driver.py +11 -5
  21. prompture/drivers/async_lmstudio_driver.py +74 -5
  22. prompture/drivers/async_ollama_driver.py +13 -3
  23. prompture/drivers/async_openai_driver.py +162 -5
  24. prompture/drivers/async_openrouter_driver.py +11 -5
  25. prompture/drivers/async_registry.py +5 -1
  26. prompture/drivers/azure_driver.py +10 -4
  27. prompture/drivers/claude_driver.py +17 -1
  28. prompture/drivers/google_driver.py +227 -33
  29. prompture/drivers/grok_driver.py +11 -5
  30. prompture/drivers/groq_driver.py +11 -5
  31. prompture/drivers/lmstudio_driver.py +73 -8
  32. prompture/drivers/ollama_driver.py +16 -5
  33. prompture/drivers/openai_driver.py +26 -11
  34. prompture/drivers/openrouter_driver.py +11 -5
  35. prompture/drivers/vision_helpers.py +153 -0
  36. prompture/group_types.py +147 -0
  37. prompture/groups.py +530 -0
  38. prompture/image.py +180 -0
  39. prompture/ledger.py +252 -0
  40. prompture/model_rates.py +112 -2
  41. prompture/persistence.py +254 -0
  42. prompture/persona.py +482 -0
  43. prompture/serialization.py +218 -0
  44. prompture/settings.py +1 -0
  45. prompture-0.0.40.dev1.dist-info/METADATA +369 -0
  46. prompture-0.0.40.dev1.dist-info/RECORD +78 -0
  47. prompture-0.0.35.dist-info/METADATA +0 -464
  48. prompture-0.0.35.dist-info/RECORD +0 -66
  49. {prompture-0.0.35.dist-info → prompture-0.0.40.dev1.dist-info}/WHEEL +0 -0
  50. {prompture-0.0.35.dist-info → prompture-0.0.40.dev1.dist-info}/entry_points.txt +0 -0
  51. {prompture-0.0.35.dist-info → prompture-0.0.40.dev1.dist-info}/licenses/LICENSE +0 -0
  52. {prompture-0.0.35.dist-info → prompture-0.0.40.dev1.dist-info}/top_level.txt +0 -0
@@ -1,464 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: prompture
3
- Version: 0.0.35
4
- Summary: Ask LLMs to return structured JSON and run cross-model tests. API-first.
5
- Author-email: Juan Denis <juan@vene.co>
6
- License-Expression: MIT
7
- Project-URL: Homepage, https://github.com/jhd3197/prompture
8
- Classifier: Programming Language :: Python :: 3
9
- Classifier: Operating System :: OS Independent
10
- Requires-Python: >=3.9
11
- Description-Content-Type: text/markdown
12
- License-File: LICENSE
13
- Requires-Dist: anthropic>=0.8.0
14
- Requires-Dist: click>=8.0
15
- Requires-Dist: google-generativeai>=0.3.0
16
- Requires-Dist: groq>=0.4.0
17
- Requires-Dist: httpx>=0.25.0
18
- Requires-Dist: jsonschema>=4.0
19
- Requires-Dist: openai>=1.0.0
20
- Requires-Dist: pandas>=1.3.0
21
- Requires-Dist: pydantic>=1.10
22
- Requires-Dist: pydantic-settings>=2.0
23
- Requires-Dist: python-dotenv>=0.19.0
24
- Requires-Dist: python-toon>=0.1.0
25
- Requires-Dist: requests>=2.28
26
- Requires-Dist: python-dateutil>=2.9.0
27
- Requires-Dist: tukuy>=0.0.6
28
- Requires-Dist: pyyaml>=6.0
29
- Provides-Extra: test
30
- Requires-Dist: pytest>=7.0; extra == "test"
31
- Requires-Dist: pytest-asyncio>=0.23.0; extra == "test"
32
- Provides-Extra: dev
33
- Requires-Dist: pytest>=7.0; extra == "dev"
34
- Requires-Dist: pytest-asyncio>=0.23.0; extra == "dev"
35
- Requires-Dist: ruff>=0.8.0; extra == "dev"
36
- Provides-Extra: airllm
37
- Requires-Dist: airllm>=2.8.0; extra == "airllm"
38
- Provides-Extra: redis
39
- Requires-Dist: redis>=4.0; extra == "redis"
40
- Provides-Extra: serve
41
- Requires-Dist: fastapi>=0.100; extra == "serve"
42
- Requires-Dist: uvicorn[standard]>=0.20; extra == "serve"
43
- Requires-Dist: sse-starlette>=1.6; extra == "serve"
44
- Provides-Extra: scaffold
45
- Requires-Dist: jinja2>=3.0; extra == "scaffold"
46
- Dynamic: license-file
47
-
48
- # Prompture
49
-
50
- [![PyPI version](https://badge.fury.io/py/prompture.svg)](https://badge.fury.io/py/prompture)
51
- [![Python Versions](https://img.shields.io/pypi/pyversions/prompture.svg)](https://pypi.org/project/prompture/)
52
- [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
53
- [![Downloads](https://static.pepy.tech/badge/prompture)](https://pepy.tech/project/prompture)
54
- ![GitHub Repo stars](https://img.shields.io/github/stars/jhd3197/prompture?style=social)
55
-
56
-
57
- **Prompture** is an API-first library for getting **structured JSON** (or any structure) from LLMs, validating it, and benchmarking multiple models with one spec.
58
-
59
- ## ✨ Features
60
-
61
- - ✅ **Structured output** → JSON schema enforcement, or direct **Pydantic** instances
62
- - ✅ **TOON input conversion** → 45-60% token savings for structured data analysis with `extract_from_data()` and `extract_from_pandas()`
63
- - ✅ **Stepwise extraction** → Per-field prompts, with smart type conversion (incl. shorthand numbers)
64
- - ✅ **Multi-driver** → OpenAI, Azure, Claude, Ollama, LM Studio, Google, Groq, OpenRouter, Grok, HTTP, Mock, HuggingFace (via `get_driver()`)
65
- - ✅ **Usage & cost** → Token + $ tracking on every call (`usage` from driver meta)
66
- - ✅ **AI cleanup** → Optional LLM pass to fix malformed JSON
67
- - ✅ **Batch testing** → Define suites and compare models (spec-driven)
68
- - 🧪 **Experimental TOON output** → Request Token-Oriented Object Notation when you need ultra-compact text
69
- <br>
70
-
71
- > [!TIP]
72
- > Starring this repo helps more developers discover Prompture ✨
73
- >
74
- >![prompture_no_forks](https://github.com/user-attachments/assets/720f888e-a885-4eb3-970c-ba5809fe2ce7)
75
- >
76
- > 🔥 Also check out my other project [RepoGif](https://github.com/jhd3197/RepoGif) – the tool I used to generate the GIF above!
77
- <br>
78
-
79
-
80
- ---
81
-
82
- ## Installation
83
-
84
- ```bash
85
- pip install prompture
86
- ````
87
-
88
- ---
89
-
90
- ## Configure a Provider
91
-
92
- Model names now support provider prefixes (e.g., "ollama/llama3.1:8b"). The `get_driver_for_model()` function automatically selects the appropriate driver based on the provider prefix.
93
-
94
- You can configure providers either through environment variables or by using provider-prefixed model names:
95
-
96
- ```bash
97
- # Environment variable approach:
98
- export AI_PROVIDER=ollama # One of: ollama | openai | azure | claude | google | groq | openrouter | grok | lmstudio | http | huggingface
99
-
100
- # Only if the provider needs them:
101
- export OPENAI_API_KEY=...
102
- export AZURE_OPENAI_ENDPOINT=...
103
- export AZURE_OPENAI_API_KEY=...
104
- export ANTHROPIC_API_KEY=...
105
- export GOOGLE_API_KEY=...
106
- export GROQ_API_KEY=...
107
- export OPENROUTER_API_KEY=...
108
- export GROK_API_KEY=...
109
- export LMSTUDIO_ENDPOINT=...
110
- ```
111
-
112
- | Provider | Example models | Cost calc |
113
- | -------- | ------------------------------------- | --------------- |
114
- | `ollama` | `ollama/llama3.1:8b`, `ollama/qwen2.5:3b` | `$0.00` (local) |
115
- | `openai` | `openai/gpt-4`, `openai/gpt-3.5-turbo` | Automatic |
116
- | `azure` | `azure/deployed-name` | Automatic |
117
- | `claude` | `claude/claude-3` | Automatic |
118
- | `google` | `google/gemini-1.5-pro`, `google/gemini-1.5-flash` | Automatic |
119
- | `groq` | `groq/llama2-70b-4096`, `groq/mixtral-8x7b-32768` | Automatic |
120
- | `openrouter` | `openrouter/openai/gpt-3.5-turbo`, `openrouter/anthropic/claude-2` | Automatic |
121
- | `grok` | `grok/grok-4-fast-reasoning`, `grok/grok-3-mini` | Automatic |
122
- | `lmstudio` | `lmstudio/local-model` | `$0.00` (local) |
123
- | `huggingface` | `hf/local-or-endpoint` | `$0.00` (local) |
124
- | `http` | `http/self-hosted` | `$0.00` |
125
-
126
- ---
127
-
128
- ## 🔍 Model Discovery
129
-
130
- Prompture can auto-detect available models from your configured environment. This is especially useful for local setups (like Ollama) or when you want to see which models are available to your application.
131
-
132
- ```python
133
- from prompture import get_available_models
134
-
135
- # Returns a list of strings like ["openai/gpt-4o", "ollama/llama3:latest", ...]
136
- models = get_available_models()
137
-
138
- for model in models:
139
- print(f"Found: {model}")
140
- ```
141
-
142
- - **Static Drivers** (OpenAI, Claude, Azure, etc.): Returns models listed in the driver's `MODEL_PRICING` configuration if the driver is configured (API key present).
143
- - **Dynamic Drivers** (Ollama): Queries the local endpoint (e.g., `http://localhost:11434/api/tags`) to fetch currently installed models.
144
-
145
- ---
146
-
147
- ## Quickstart: Pydantic in one line (auto driver)
148
-
149
- Use `extract_with_model` for a single LLM call that fills your Pydantic model.
150
-
151
- ```python
152
- from typing import List, Optional
153
- from pydantic import BaseModel
154
- from prompture import extract_with_model
155
-
156
- class Person(BaseModel):
157
- name: str
158
- age: int
159
- profession: str
160
- city: str
161
- hobbies: List[str]
162
- education: Optional[str] = None
163
-
164
- text = "Maria is 32, a software developer in New York. She loves hiking and photography."
165
-
166
- # Uses get_driver_for_model() internally based on model name prefix
167
- person = extract_with_model(Person, text, model_name="ollama/gpt-oss:20b")
168
- print(person.dict())
169
- ```
170
-
171
- **Why start here?** It's fast (one call), cost-efficient, and returns a validated Pydantic instance.
172
-
173
-
174
- ## 🚀 TOON Input Conversion: 45-60% Token Savings
175
-
176
- Analyze structured data with automatic TOON (Token-Oriented Object Notation) conversion for massive token savings.
177
-
178
- ```python
179
- from prompture import extract_from_data, extract_from_pandas
180
-
181
- # Your product data
182
- products = [
183
- {"id": 1, "name": "Laptop", "price": 999.99, "rating": 4.5},
184
- {"id": 2, "name": "Book", "price": 19.99, "rating": 4.2},
185
- {"id": 3, "name": "Headphones", "price": 149.99, "rating": 4.7}
186
- ]
187
-
188
- # Ask questions about your data - automatically uses TOON format for 60%+ token savings
189
- result = extract_from_data(
190
- data=products,
191
- question="What is the average price and highest rated product?",
192
- json_schema={
193
- "type": "object",
194
- "properties": {
195
- "average_price": {"type": "number"},
196
- "highest_rated": {"type": "string"}
197
- }
198
- },
199
- model_name="openai/gpt-4"
200
- )
201
-
202
- print(result["json_object"])
203
- # {"average_price": 389.96, "highest_rated": "Headphones"}
204
-
205
- print(f"Token savings: {result['token_savings']['percentage_saved']}%")
206
- # Token savings: 62.3%
207
-
208
- # Works with Pandas DataFrames too!
209
- import pandas as pd
210
- df = pd.DataFrame(products)
211
- result = extract_from_pandas(df=df, question="...", json_schema=schema, model_name="openai/gpt-4")
212
- ```
213
-
214
- **Preview token savings without LLM calls:**
215
- ```bash
216
- python examples/token_comparison_utility.py
217
- ```
218
-
219
- > **Note:** Both `python-toon` and `pandas` are now included by default when you install Prompture!
220
-
221
- ---
222
- ---
223
-
224
- ## 📋 Field Definitions
225
-
226
- Prompture includes a powerful **field definitions system** that provides a centralized registry of structured data extraction fields. This system enables consistent, reusable field configurations across your data extraction workflows with built-in fields for common use cases like personal info, contact details, professional data, and more.
227
-
228
- **Key benefits:**
229
- - 🎯 Pre-configured fields with descriptions and extraction instructions
230
- - 🔄 Template variables like `{{current_year}}`, `{{current_date}}`, `{{current_datetime}}`
231
- - 🔌 Seamless Pydantic integration via `field_from_registry()`
232
- - ⚙️ Easy custom field registration
233
-
234
- ### Using Built-in Fields
235
-
236
- ```python
237
- from pydantic import BaseModel
238
- from prompture import field_from_registry, stepwise_extract_with_model
239
-
240
- class Person(BaseModel):
241
- name: str = field_from_registry("name")
242
- age: int = field_from_registry("age")
243
- email: str = field_from_registry("email")
244
- occupation: str = field_from_registry("occupation")
245
- company: str = field_from_registry("company")
246
-
247
- # Built-in fields include: name, age, email, phone, address, city, country,
248
- # occupation, company, education_level, salary, and many more!
249
-
250
- result = stepwise_extract_with_model(
251
- Person,
252
- "John Smith is 25 years old, software engineer at TechCorp, john@example.com",
253
- model_name="openai/gpt-4"
254
- )
255
- ```
256
-
257
- ### Registering Custom Fields
258
-
259
- ```python
260
- from prompture import register_field, field_from_registry
261
-
262
- # Register a custom field with template variables
263
- register_field("document_date", {
264
- "type": "str",
265
- "description": "Document creation or processing date",
266
- "instructions": "Use {{current_date}} if not specified in document",
267
- "default": "{{current_date}}",
268
- "nullable": False
269
- })
270
-
271
- # Use custom field in your model
272
- class Document(BaseModel):
273
- title: str = field_from_registry("name")
274
- created_date: str = field_from_registry("document_date")
275
- ```
276
-
277
- 📚 **[View Full Field Definitions Reference →](https://prompture.readthedocs.io/en/latest/field_definitions_reference.html)**
278
-
279
- ---
280
-
281
- ## JSON-first (low-level primitives)
282
-
283
- When you want raw JSON with a schema and full control, use `ask_for_json` or `extract_and_jsonify`.
284
-
285
- ```python
286
- from prompture.drivers import get_driver
287
- from prompture import ask_for_json, extract_and_jsonify
288
-
289
- schema = {
290
- "type": "object",
291
- "required": ["name", "age"],
292
- "properties": {
293
- "name": {"type": "string"},
294
- "age": {"type": "integer"}
295
- }
296
- }
297
-
298
- # 1) ask_for_json: you provide the full content prompt
299
- resp1 = ask_for_json(
300
- content_prompt="Extract the person's info from: John is 28 and lives in Miami.",
301
- json_schema=schema,
302
- model_name="google/gemini-1.5-pro"
303
- )
304
- print(resp1["json_object"], resp1["usage"])
305
-
306
- # 2) extract_and_jsonify: you provide text & an instruction template; it builds the prompt
307
- resp2 = extract_and_jsonify(
308
- text="John is 28 and lives in Miami.",
309
- json_schema=schema,
310
- model_name="groq/mixtral-8x7b-32768",
311
- instruction_template="Extract the person's information:"
312
- )
313
- print(resp2["json_object"], resp2["usage"])
314
- ```
315
-
316
- ### Experimental TOON output
317
-
318
- Prompture can ask for TOON (Token-Oriented Object Notation) instead of JSON by setting `output_format="toon"` on `ask_for_json`, `extract_and_jsonify`, `manual_extract_and_jsonify`, or `extract_with_model`. The LLM is still instructed to return JSON (for reliability); Prompture parses it and emits a TOON string via `python-toon`.
319
-
320
- ```python
321
- result = extract_and_jsonify(
322
- text="Alice Johnson is a 30-year-old data scientist...",
323
- json_schema=schema,
324
- model_name="lmstudio/deepseek/deepseek-r1-0528-qwen3-8b",
325
- output_format="toon",
326
- )
327
- print(result["toon_string"]) # TOON text generated locally
328
- print(result["json_object"]) # regular dict parsed from the JSON response
329
- # result["json_string"] still contains the original JSON text
330
- ```
331
-
332
- > [!IMPORTANT]
333
- > TOON output is **experimental**. General-purpose models often emit more verbose completions when asked for TOON, so total token usage can increase (see `toon_token_analysis.md`). Treat it as an opt-in mode until TOON-aware fine-tunes or adapters are available.
334
-
335
- ### Return shape (JSON helpers)
336
-
337
- ```python
338
- {
339
- "json_string": str,
340
- "json_object": dict,
341
- "usage": {
342
- "prompt_tokens": int,
343
- "completion_tokens": int,
344
- "total_tokens": int,
345
- "cost": float,
346
- "model_name": str
347
- }
348
- }
349
- ```
350
-
351
- > If the model returns malformed JSON and `ai_cleanup=True`, a second LLM pass tries to fix it.
352
-
353
- ---
354
-
355
- ## Pydantic: one-shot vs stepwise
356
-
357
- Prompture supports two Pydantic extraction modes:
358
-
359
- * **`extract_with_model`** → Single call; global context; best cost/latency; coherent fields
360
- * **`stepwise_extract_with_model`** → One call per field; higher per-field accuracy; resilient
361
-
362
- | Aspect | `extract_with_model` (one-shot) | `stepwise_extract_with_model` (per-field) |
363
- | -------------- | -------------------------------------- | ----------------------------------------- |
364
- | LLM calls | 1 | N (one per field) |
365
- | Speed & cost | **Faster / cheaper** | Slower / higher |
366
- | Accuracy | Good global coherence | **Higher per-field accuracy** |
367
- | Error handling | All-or-nothing | **Per-field recovery** |
368
- | Best when | Fields are related; throughput matters | Correctness per field is critical |
369
-
370
- ### Examples
371
-
372
- ```python
373
- from prompture import extract_with_model, stepwise_extract_with_model
374
-
375
- person1 = extract_with_model(Person, text, model_name="openrouter/anthropic/claude-2")
376
- print(person1.dict())
377
-
378
- res = stepwise_extract_with_model(Person, text, model_name="grok/grok-4-fast-reasoning")
379
- print(res["model"].dict())
380
- print(res["usage"]) # includes per-field usage and totals
381
- ```
382
-
383
- **Stepwise extras:** internally uses `tools.create_field_schema` + `tools.convert_value` (with `allow_shorthand=True`) so values like `"3.4m"`, `"2k"`, `"1.2b"` can be converted to typed fields where appropriate.
384
-
385
- ---
386
-
387
- ## Manual control with logging
388
-
389
- `manual_extract_and_jsonify` is like `extract_and_jsonify` but lets you provide your own driver.
390
- Enable library logging via Python's standard `logging` module:
391
-
392
- ```python
393
- import logging
394
- from prompture import manual_extract_and_jsonify, configure_logging
395
- from prompture.drivers import get_driver
396
-
397
- configure_logging(logging.DEBUG) # see internal debug output
398
-
399
- driver = get_driver("ollama")
400
- res = manual_extract_and_jsonify(
401
- driver=driver,
402
- text="Maria works as a software developer in New York.",
403
- json_schema={
404
- "type": "object",
405
- "required": ["city", "profession"],
406
- "properties": {"city": {"type": "string"}, "profession": {"type": "string"}}
407
- },
408
- model_name="llama3.1:8b",
409
- options={"temperature": 0.2},
410
- )
411
- print(res["json_object"])
412
- ```
413
-
414
- ---
415
-
416
-
417
- **Example output (Ollama comparison)** — see `examples/ollama_models_comparison.py` for a richer comparison table.
418
-
419
- ---
420
-
421
-
422
- ## Ollama Model Comparison Example
423
-
424
- This example demonstrates how to compare different Ollama models using a specific script located at `examples/ollama_models_comparison.py`.
425
-
426
- | Model | Success | Prompt | Completion | Total | Fields | Validation | Name | Price | Variants | Screen Size | Warranty | Is New |
427
- |------------------|---------|--------|------------|-------|--------|------------|---------------------|----------|----------|-------------|----------|--------|
428
- | gpt-oss:20b | True | 801 | 945 | 1746 | 8 | ✓ | GalaxyFold Ultra | 1299.99 | 9 | 6.9 | 3 | True |
429
- | deepseek-r1:latest | True | 757 | 679 | 1436 | 8 | ✗ | GalaxyFold Ultra | 1299.99 | 3 | 6.9 | None | True |
430
- | llama3.1:8b | True | 746 | 256 | 1002 | 8 | ✓ | GalaxyFold Ultra | 1299.99 | 3 | 6.9 | 3 | True |
431
- | gemma3:latest | True | 857 | 315 | 1172 | 8 | ✗ | GalaxyFold Ultra | 1299.99 | 3 | 6.9 | None | True |
432
- | qwen2.5:1.5b | True | 784 | 236 | 1020 | 8 | ✓ | GalaxyFold Ultra | 1299.99 | 3 | 6.9 | 3 | True |
433
- | qwen2.5:3b | True | 784 | 273 | 1057 | 9 | ✓ | GalaxyFold Ultra | 1299.99 | 3 | 6.9 | 3 | True |
434
- | mistral:latest | True | 928 | 337 | 1265 | 8 | ✓ | GalaxyFold Ultra | 1299.99 | 3 | 6.9 | 3 | True |
435
-
436
- > **Successful models (7):** gpt-oss:20b, deepseek-r1:latest, llama3.1:8b, gemma3:latest, qwen2.5:1.5b, qwen2.5:3b, mistral:latest
437
-
438
- You can run this comparison yourself with:
439
- `python examples/ollama_models_comparison.py`
440
-
441
- This example script compares multiple Ollama models on a complex task of extracting structured information from a smartphone description using a detailed JSON schema. The purpose of this example is to illustrate how `Prompture` can be used to test and compare different models on the same structured output task, showing their success rates, token usage, and validation results.
442
-
443
- ---
444
-
445
- ## Error handling notes
446
-
447
- * With `ai_cleanup=True`, a second LLM pass attempts to fix malformed JSON; on success, `usage` may be a minimal stub.
448
- * `extract_and_jsonify` will **skip tests** under `pytest` if there’s a local server connection error (e.g., Ollama), instead of failing the suite.
449
- * All functions raise `ValueError` for empty text.
450
-
451
- ---
452
-
453
- ## Tips & Best Practices
454
-
455
- * Add `description` to schema fields (or Pydantic field metadata) for better extractions.
456
- * Start with **one-shot Pydantic**; switch specific fields to **stepwise** if they’re noisy.
457
- * Track usage/cost before scaling; tweak `temperature` in `options` if consistency wobbles.
458
- * Use `configure_logging(logging.DEBUG)` in dev to see internal debug output and tighten your specs.
459
-
460
- ---
461
-
462
- ## Contributing
463
-
464
- PRs welcome! Add tests and—if adding drivers or patterns—drop an example under `examples/`.
@@ -1,66 +0,0 @@
1
- prompture/__init__.py,sha256=IzJ-QVN9MYX0VMjPEqsulDDDBvHKQpR9GtBYptSmRBg,4690
2
- prompture/_version.py,sha256=w77E3DIE0tp22UaCyYn9461JfWMpEPhsv91t-4n4Bjw,706
3
- prompture/async_conversation.py,sha256=i-cbV4WfPxU6m-SgeC7X-xl_Hg20B6zK-U4CJjeLFIA,23529
4
- prompture/async_core.py,sha256=s8G0nGUGR1Bf_BQG9_FcQRpveSnJKkEwcWNfbAJaSkg,29208
5
- prompture/async_driver.py,sha256=tyk8qc7VjMCyfP9MBAG15-EXa2kBM3tVhpJABGWAgFU,6075
6
- prompture/cache.py,sha256=4dfQDMsEZ9JMQDXLOkiugPmmMJQIfKVE8rTAKDH4oL8,14401
7
- prompture/callbacks.py,sha256=JPDqWGzPIzv44l54ocmezlYVBnbKPDEEXRrLdluWGAo,1731
8
- prompture/cli.py,sha256=tNiIddRmgC1BomjY5O1VVVAwvqHVzF8IHmQrM-cG2wQ,2902
9
- prompture/conversation.py,sha256=Kwz0iCQX8-bpxXVEX6SwNqbjFKj-NMGxxx6w7AfuqV8,24475
10
- prompture/core.py,sha256=V__4YAt42WKf7CNiFRiET1oT2GFFIElOLFFF7mSFx-o,55735
11
- prompture/cost_mixin.py,sha256=_spz84i8Qsplh6V3GkWyXXSUE4EwGy2IsbcsU2LEBxs,1918
12
- prompture/discovery.py,sha256=_mSeeIQfRl3pHDPm84Mc2LDcOSwfkUanITXWeRPfv5E,7116
13
- prompture/driver.py,sha256=3rLu-QZJBaOE2JwfRDt5xWDHuD5NdMBLfiYq4lMYyWs,7157
14
- prompture/field_definitions.py,sha256=PLvxq2ot-ngJ8JbWkkZ-XLtM1wvjUQ3TL01vSEo-a6E,21368
15
- prompture/logging.py,sha256=SkFO26_56Zai05vW8kTq3jvJudfLG2ipI5qNHaXKH3g,2574
16
- prompture/model_rates.py,sha256=qtZUjsCVskA9LyG73JklG_kjKJHABA6ldBmBX0UzlSQ,6415
17
- prompture/runner.py,sha256=lHe2L2jqY1pDXoKNPJALN9lAm-Q8QOY8C8gw-vM9VrM,4213
18
- prompture/server.py,sha256=W6Kn6Et8nG5twXjD2wKn_N9yplGjz5Z-2naeI_UPd1Y,6198
19
- prompture/session.py,sha256=FldK3cKq_jO0-beukVOhIiwsYWb6U_lLBlAERx95aaM,3821
20
- prompture/settings.py,sha256=o-zsYpxRvSg-ICGWqqVNEoJG23GCMBLlkC7RPXpouSw,1976
21
- prompture/tools.py,sha256=PmFbGHTWYWahpJOG6BLlM0Y-EG6S37IFW57C-8GdsXo,36449
22
- prompture/tools_schema.py,sha256=JVc0dxC4aIHIUlgE8yFCcn1gPzJ3unTMVmZ8Ec04aD0,7764
23
- prompture/validator.py,sha256=FY_VjIVEbjG2nwzh-r6l23Kt3UzaLyCis8_pZMNGHBA,993
24
- prompture/aio/__init__.py,sha256=bKqTu4Jxld16aP_7SP9wU5au45UBIb041ORo4E4HzVo,1810
25
- prompture/drivers/__init__.py,sha256=yj-z7_RMbfxP8zaKjQFZFcxGdf8Q2_KNQ6RTo7SPEpQ,6984
26
- prompture/drivers/airllm_driver.py,sha256=SaTh7e7Plvuct_TfRqQvsJsKHvvM_3iVqhBtlciM-Kw,3858
27
- prompture/drivers/async_airllm_driver.py,sha256=1hIWLXfyyIg9tXaOE22tLJvFyNwHnOi1M5BIKnV8ysk,908
28
- prompture/drivers/async_azure_driver.py,sha256=vRp1PlOB87OLUbEZJEp7En3tvadG956Q6AV2o9UmyLA,4196
29
- prompture/drivers/async_claude_driver.py,sha256=w9HRE7njh1N9BzX4QtsrUnMakKBb-nykBkvTwLmoFnw,3825
30
- prompture/drivers/async_google_driver.py,sha256=qKMl6R60xBPQpsjCd9acXiXphVE5QxKbPSjxal7Sq2U,5656
31
- prompture/drivers/async_grok_driver.py,sha256=yQgdBHiyyweueWDKChm9HnWzvbtWz60SNATsRKQY-RQ,3279
32
- prompture/drivers/async_groq_driver.py,sha256=iShPE_YbR3oObHD3joFJDSmFeyCmaisLl7nwILbVux4,2808
33
- prompture/drivers/async_hugging_driver.py,sha256=IblxqU6TpNUiigZ0BCgNkAgzpUr2FtPHJOZnOZMnHF0,2152
34
- prompture/drivers/async_lmstudio_driver.py,sha256=7iQ-Fqezlpj4O13QC9oPjylGCINU5KvSRkwL52_2Cf0,2810
35
- prompture/drivers/async_local_http_driver.py,sha256=qoigIf-w3_c2dbVdM6m1e2RMAWP4Gk4VzVs5hM3lPvQ,1609
36
- prompture/drivers/async_ollama_driver.py,sha256=vRd2VIl412d6WVSo8vmZg0GBYUo7gBj-S2_55PpUWbk,4511
37
- prompture/drivers/async_openai_driver.py,sha256=jHtSA_MeeIwGeE9o9F1ZsKTNgGGA7xF3WbGZgD8ACEU,3305
38
- prompture/drivers/async_openrouter_driver.py,sha256=OKL4MfRAopXaMevf6A6WcAytyvWr0tWO_BmshdI0fSY,3516
39
- prompture/drivers/async_registry.py,sha256=gJDr60688MZx9rbP-I8Um1gLeDwfRynv5SHsYjHjEGk,4263
40
- prompture/drivers/azure_driver.py,sha256=4IAzdKqcORgVEDUj6itkVmJUg1ayo4HXSfqLKzIGnlM,5460
41
- prompture/drivers/claude_driver.py,sha256=mZE3flcZImNCpMl803hLDd5KrkbYZt4VFQvF1ezs9qU,11269
42
- prompture/drivers/google_driver.py,sha256=Ysa1ZZEAPEKHCJFCBiJtB4K-sGvsYti4hGBv_85nowY,8454
43
- prompture/drivers/grok_driver.py,sha256=_ZQfmE4NfzHTj-nTEtBkWFbypjCF_4aWFODAaxxEjog,5027
44
- prompture/drivers/groq_driver.py,sha256=QfMhzKDhTQyU7yG0oayCojunaKPdxrxzYo-u87_uN18,3928
45
- prompture/drivers/hugging_driver.py,sha256=gZir3XnM77VfYIdnu3S1pRftlZJM6G3L8bgGn5esg-Q,2346
46
- prompture/drivers/lmstudio_driver.py,sha256=AuhmKS4DBvxEPSfjL-uN7wnAyFWoB7Wjk1o0Oscan4I,4010
47
- prompture/drivers/local_http_driver.py,sha256=QJgEf9kAmy8YZ5fb8FHnWuhoDoZYNd8at4jegzNVJH0,1658
48
- prompture/drivers/ollama_driver.py,sha256=o44HCKbljLRN6TCMylj0X4RjvtI_VaZdoDZXsLQBkH4,9577
49
- prompture/drivers/openai_driver.py,sha256=Dg0YiduDSyTiLMG4F9YuWKB0vbzVuOYOog5CpWgbxxo,9897
50
- prompture/drivers/openrouter_driver.py,sha256=WH48KEkafuxFX6b55FzwT57tUlmbwYlHSeNsIxWvM4o,5141
51
- prompture/drivers/registry.py,sha256=Dg_5w9alnIPKhOnsR9Xspuf5T7roBGu0r_L2Cf-UhXs,9926
52
- prompture/scaffold/__init__.py,sha256=aitUxBV0MpjC7Od3iG8WUzcC7tGPXSt3oMzUBX8UDwQ,60
53
- prompture/scaffold/generator.py,sha256=5QTHdWEXB7ADqOttfU7NgoxuaofNQnObzzI7NIPWFgo,2387
54
- prompture/scaffold/templates/Dockerfile.j2,sha256=ukox6eVzQMVw-hAaFmNRL5HTrXw2Z0RB6g-vvbMVeu8,207
55
- prompture/scaffold/templates/README.md.j2,sha256=xFgKnEP_JmLiiwD1QahNWdyKC85smyhOiIBCmN2U3y0,935
56
- prompture/scaffold/templates/config.py.j2,sha256=q1LOnLlGzgJHPQz5geZ2AvrB-DskkLzay_CSj26hthE,529
57
- prompture/scaffold/templates/env.example.j2,sha256=eESKr1KWgyrczO6d-nwAhQwSpf_G-T6P9gmHMhR9Sqc,246
58
- prompture/scaffold/templates/main.py.j2,sha256=TEgc5OvsZOEX0JthkSW1NI_yLwgoeVN_x97Ibg-vyWY,2632
59
- prompture/scaffold/templates/models.py.j2,sha256=JrZ99GCVK6TKWapskVRSwCssGrTu5cGZ_r46fOhY2GE,858
60
- prompture/scaffold/templates/requirements.txt.j2,sha256=m3S5fi1hq9KG9l_9j317rjwWww0a43WMKd8VnUWv2A4,102
61
- prompture-0.0.35.dist-info/licenses/LICENSE,sha256=0HgDepH7aaHNFhHF-iXuW6_GqDfYPnVkjtiCAZ4yS8I,1060
62
- prompture-0.0.35.dist-info/METADATA,sha256=UZZi4UWdcyxEwwdZDeM_8A6bTRobx3zd8R0T0-mjQ1Y,18478
63
- prompture-0.0.35.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
64
- prompture-0.0.35.dist-info/entry_points.txt,sha256=AFPG3lJR86g4IJMoWQUW5Ph7G6MLNWG3A2u2Tp9zkp8,48
65
- prompture-0.0.35.dist-info/top_level.txt,sha256=to86zq_kjfdoLeAxQNr420UWqT0WzkKoZ509J7Qr2t4,10
66
- prompture-0.0.35.dist-info/RECORD,,