prompture 0.0.32__tar.gz → 0.0.42.dev1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prompture-0.0.42.dev1/.claude/skills/add-driver/SKILL.md +85 -0
- prompture-0.0.42.dev1/.claude/skills/add-driver/references/driver-template.md +83 -0
- prompture-0.0.42.dev1/.claude/skills/add-example/SKILL.md +83 -0
- prompture-0.0.42.dev1/.claude/skills/add-field/SKILL.md +59 -0
- prompture-0.0.42.dev1/.claude/skills/add-test/SKILL.md +87 -0
- prompture-0.0.42.dev1/.claude/skills/run-tests/SKILL.md +38 -0
- prompture-0.0.42.dev1/.claude/skills/scaffold-extraction/SKILL.md +76 -0
- prompture-0.0.42.dev1/.claude/skills/update-pricing/SKILL.md +51 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/.env.copy +25 -2
- {prompture-0.0.32 → prompture-0.0.42.dev1}/.github/scripts/update_docs_version.py +74 -38
- {prompture-0.0.32 → prompture-0.0.42.dev1}/.github/workflows/documentation.yml +7 -4
- {prompture-0.0.32 → prompture-0.0.42.dev1}/.github/workflows/publish.yml +1 -1
- prompture-0.0.42.dev1/CLAUDE.md +93 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/MANIFEST.in +0 -1
- prompture-0.0.42.dev1/PKG-INFO +369 -0
- prompture-0.0.42.dev1/README.md +322 -0
- prompture-0.0.42.dev1/ROADMAP.md +341 -0
- prompture-0.0.42.dev1/docs/source/_templates/footer.html +16 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/docs/source/api/core.rst +0 -2
- {prompture-0.0.32 → prompture-0.0.42.dev1}/docs/source/api/index.rst +1 -1
- prompture-0.0.42.dev1/docs/source/api/tools.rst +216 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/docs/source/conf.py +51 -39
- {prompture-0.0.32 → prompture-0.0.42.dev1}/packages/llm_to_json/llm_to_json/__init__.py +3 -2
- {prompture-0.0.32 → prompture-0.0.42.dev1}/packages/llm_to_json/test.py +3 -1
- prompture-0.0.42.dev1/prompture/__init__.py +310 -0
- prompture-0.0.42.dev1/prompture/_version.py +34 -0
- prompture-0.0.42.dev1/prompture/agent.py +924 -0
- prompture-0.0.42.dev1/prompture/agent_types.py +156 -0
- prompture-0.0.42.dev1/prompture/aio/__init__.py +74 -0
- prompture-0.0.42.dev1/prompture/async_agent.py +880 -0
- prompture-0.0.42.dev1/prompture/async_conversation.py +798 -0
- prompture-0.0.42.dev1/prompture/async_core.py +819 -0
- prompture-0.0.42.dev1/prompture/async_driver.py +232 -0
- prompture-0.0.42.dev1/prompture/async_groups.py +551 -0
- prompture-0.0.42.dev1/prompture/cache.py +469 -0
- prompture-0.0.42.dev1/prompture/callbacks.py +55 -0
- prompture-0.0.42.dev1/prompture/cli.py +82 -0
- prompture-0.0.42.dev1/prompture/conversation.py +835 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/prompture/core.py +509 -352
- prompture-0.0.42.dev1/prompture/cost_mixin.py +113 -0
- prompture-0.0.42.dev1/prompture/discovery.py +252 -0
- prompture-0.0.42.dev1/prompture/driver.py +264 -0
- prompture-0.0.42.dev1/prompture/drivers/__init__.py +267 -0
- prompture-0.0.42.dev1/prompture/drivers/airllm_driver.py +109 -0
- prompture-0.0.42.dev1/prompture/drivers/async_airllm_driver.py +26 -0
- prompture-0.0.42.dev1/prompture/drivers/async_azure_driver.py +124 -0
- prompture-0.0.42.dev1/prompture/drivers/async_claude_driver.py +282 -0
- prompture-0.0.42.dev1/prompture/drivers/async_google_driver.py +326 -0
- prompture-0.0.42.dev1/prompture/drivers/async_grok_driver.py +97 -0
- prompture-0.0.42.dev1/prompture/drivers/async_groq_driver.py +90 -0
- prompture-0.0.42.dev1/prompture/drivers/async_hugging_driver.py +61 -0
- prompture-0.0.42.dev1/prompture/drivers/async_lmstudio_driver.py +148 -0
- prompture-0.0.42.dev1/prompture/drivers/async_local_http_driver.py +44 -0
- prompture-0.0.42.dev1/prompture/drivers/async_modelscope_driver.py +286 -0
- prompture-0.0.42.dev1/prompture/drivers/async_moonshot_driver.py +312 -0
- prompture-0.0.42.dev1/prompture/drivers/async_ollama_driver.py +135 -0
- prompture-0.0.42.dev1/prompture/drivers/async_openai_driver.py +254 -0
- prompture-0.0.42.dev1/prompture/drivers/async_openrouter_driver.py +291 -0
- prompture-0.0.42.dev1/prompture/drivers/async_registry.py +163 -0
- prompture-0.0.42.dev1/prompture/drivers/async_zai_driver.py +303 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/prompture/drivers/azure_driver.py +46 -12
- prompture-0.0.42.dev1/prompture/drivers/claude_driver.py +317 -0
- prompture-0.0.42.dev1/prompture/drivers/google_driver.py +390 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/prompture/drivers/grok_driver.py +39 -36
- {prompture-0.0.32 → prompture-0.0.42.dev1}/prompture/drivers/groq_driver.py +37 -30
- {prompture-0.0.32 → prompture-0.0.42.dev1}/prompture/drivers/hugging_driver.py +6 -6
- prompture-0.0.42.dev1/prompture/drivers/lmstudio_driver.py +171 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/prompture/drivers/local_http_driver.py +6 -6
- prompture-0.0.42.dev1/prompture/drivers/modelscope_driver.py +303 -0
- prompture-0.0.42.dev1/prompture/drivers/moonshot_driver.py +342 -0
- prompture-0.0.42.dev1/prompture/drivers/ollama_driver.py +261 -0
- prompture-0.0.42.dev1/prompture/drivers/openai_driver.py +302 -0
- prompture-0.0.42.dev1/prompture/drivers/openrouter_driver.py +356 -0
- prompture-0.0.42.dev1/prompture/drivers/registry.py +306 -0
- prompture-0.0.42.dev1/prompture/drivers/vision_helpers.py +153 -0
- prompture-0.0.42.dev1/prompture/drivers/zai_driver.py +318 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/prompture/field_definitions.py +106 -96
- prompture-0.0.42.dev1/prompture/group_types.py +147 -0
- prompture-0.0.42.dev1/prompture/groups.py +530 -0
- prompture-0.0.42.dev1/prompture/image.py +180 -0
- prompture-0.0.42.dev1/prompture/ledger.py +252 -0
- prompture-0.0.42.dev1/prompture/logging.py +80 -0
- prompture-0.0.42.dev1/prompture/model_rates.py +329 -0
- prompture-0.0.42.dev1/prompture/persistence.py +254 -0
- prompture-0.0.42.dev1/prompture/persona.py +482 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/prompture/runner.py +49 -47
- prompture-0.0.42.dev1/prompture/scaffold/__init__.py +1 -0
- prompture-0.0.42.dev1/prompture/scaffold/generator.py +84 -0
- prompture-0.0.42.dev1/prompture/scaffold/templates/Dockerfile.j2 +12 -0
- prompture-0.0.42.dev1/prompture/scaffold/templates/README.md.j2 +41 -0
- prompture-0.0.42.dev1/prompture/scaffold/templates/config.py.j2 +21 -0
- prompture-0.0.42.dev1/prompture/scaffold/templates/env.example.j2 +8 -0
- prompture-0.0.42.dev1/prompture/scaffold/templates/main.py.j2 +86 -0
- prompture-0.0.42.dev1/prompture/scaffold/templates/models.py.j2 +40 -0
- prompture-0.0.42.dev1/prompture/scaffold/templates/requirements.txt.j2 +5 -0
- prompture-0.0.42.dev1/prompture/serialization.py +218 -0
- prompture-0.0.42.dev1/prompture/server.py +183 -0
- prompture-0.0.42.dev1/prompture/session.py +117 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/prompture/settings.py +35 -2
- {prompture-0.0.32 → prompture-0.0.42.dev1}/prompture/tools.py +172 -265
- prompture-0.0.42.dev1/prompture/tools_schema.py +254 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/prompture/validator.py +3 -3
- prompture-0.0.42.dev1/prompture.egg-info/PKG-INFO +369 -0
- prompture-0.0.42.dev1/prompture.egg-info/SOURCES.txt +134 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/prompture.egg-info/requires.txt +20 -0
- prompture-0.0.42.dev1/pyproject.toml +91 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/test.py +47 -58
- {prompture-0.0.32 → prompture-0.0.42.dev1}/test_version_diagnosis.py +14 -18
- prompture-0.0.32/PKG-INFO +0 -458
- prompture-0.0.32/README.md +0 -415
- prompture-0.0.32/VERSION +0 -1
- prompture-0.0.32/docs/source/api/tools.rst +0 -487
- prompture-0.0.32/prompture/__init__.py +0 -92
- prompture-0.0.32/prompture/cli.py +0 -23
- prompture-0.0.32/prompture/discovery.py +0 -149
- prompture-0.0.32/prompture/driver.py +0 -24
- prompture-0.0.32/prompture/drivers/__init__.py +0 -120
- prompture-0.0.32/prompture/drivers/claude_driver.py +0 -84
- prompture-0.0.32/prompture/drivers/google_driver.py +0 -160
- prompture-0.0.32/prompture/drivers/lmstudio_driver.py +0 -93
- prompture-0.0.32/prompture/drivers/ollama_driver.py +0 -116
- prompture-0.0.32/prompture/drivers/openai_driver.py +0 -117
- prompture-0.0.32/prompture/drivers/openrouter_driver.py +0 -140
- prompture-0.0.32/prompture.egg-info/PKG-INFO +0 -458
- prompture-0.0.32/prompture.egg-info/SOURCES.txt +0 -68
- prompture-0.0.32/setup.py +0 -51
- {prompture-0.0.32 → prompture-0.0.42.dev1}/.github/FUNDING.yml +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/.github/scripts/update_wrapper_version.py +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/.github/workflows/dev.yml +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/LICENSE +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/docs/source/_static/custom.css +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/docs/source/api/drivers.rst +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/docs/source/api/field_definitions.rst +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/docs/source/api/runner.rst +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/docs/source/api/validator.rst +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/docs/source/contributing.rst +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/docs/source/examples.rst +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/docs/source/field_definitions_reference.rst +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/docs/source/index.rst +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/docs/source/installation.rst +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/docs/source/quickstart.rst +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/docs/source/toon_input_guide.rst +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/packages/README.md +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/packages/llm_to_json/README.md +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/packages/llm_to_json/pyproject.toml +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/packages/llm_to_toon/README.md +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/packages/llm_to_toon/llm_to_toon/__init__.py +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/packages/llm_to_toon/pyproject.toml +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/packages/llm_to_toon/test.py +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/prompture.egg-info/dependency_links.txt +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/prompture.egg-info/entry_points.txt +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/prompture.egg-info/top_level.txt +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/requirements.txt +0 -0
- {prompture-0.0.32 → prompture-0.0.42.dev1}/setup.cfg +0 -0
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: add-driver
|
|
3
|
+
description: Scaffold a new LLM provider driver for Prompture. Creates the driver class, registers it in the driver registry, adds settings, env template, setup.py extras, and package exports. Use when adding support for a new LLM provider.
|
|
4
|
+
metadata:
|
|
5
|
+
author: prompture
|
|
6
|
+
version: "1.0"
|
|
7
|
+
---
|
|
8
|
+
|
|
9
|
+
# Add a New LLM Driver
|
|
10
|
+
|
|
11
|
+
Scaffolds all files needed to integrate a new LLM provider into Prompture.
|
|
12
|
+
|
|
13
|
+
## Before Starting
|
|
14
|
+
|
|
15
|
+
Ask the user for:
|
|
16
|
+
- **Provider name** (lowercase, used as registry key and `provider/model` prefix)
|
|
17
|
+
- **SDK package name** on PyPI and minimum version
|
|
18
|
+
- **Default model ID**
|
|
19
|
+
- **Authentication** — API key env var name, endpoint URL, or both
|
|
20
|
+
- **Model pricing** — cost per 1K tokens for prompt/completion, or `0.0` for free/local
|
|
21
|
+
- **Lazy or eager import** — lazy if SDK is optional, eager if it's in `install_requires`
|
|
22
|
+
|
|
23
|
+
## Files to Create or Modify (7 total)
|
|
24
|
+
|
|
25
|
+
### 1. NEW: `prompture/drivers/{provider}_driver.py`
|
|
26
|
+
|
|
27
|
+
See [references/driver-template.md](references/driver-template.md) for the full skeleton.
|
|
28
|
+
|
|
29
|
+
Key rules:
|
|
30
|
+
- Subclass `Driver` from `..driver`
|
|
31
|
+
- `generate()` returns `{"text": str, "meta": dict}`
|
|
32
|
+
- `meta` MUST contain: `prompt_tokens`, `completion_tokens`, `total_tokens`, `cost`, `raw_response`, `model_name`
|
|
33
|
+
- Optional SDK: wrap import in try/except, raise `ImportError` pointing to `pip install prompture[{provider}]`
|
|
34
|
+
|
|
35
|
+
### 2. `prompture/drivers/__init__.py`
|
|
36
|
+
|
|
37
|
+
- Add import: `from .{provider}_driver import {Provider}Driver`
|
|
38
|
+
- Add to `DRIVER_REGISTRY`:
|
|
39
|
+
```python
|
|
40
|
+
"{provider}": lambda model=None: {Provider}Driver(
|
|
41
|
+
api_key=settings.{provider}_api_key,
|
|
42
|
+
model=model or settings.{provider}_model
|
|
43
|
+
),
|
|
44
|
+
```
|
|
45
|
+
- Add `"{Provider}Driver"` to `__all__`
|
|
46
|
+
|
|
47
|
+
### 3. `prompture/__init__.py`
|
|
48
|
+
|
|
49
|
+
- Add `{Provider}Driver` to the `.drivers` import line
|
|
50
|
+
- Add `"{Provider}Driver"` to `__all__` under `# Drivers`
|
|
51
|
+
|
|
52
|
+
### 4. `prompture/settings.py`
|
|
53
|
+
|
|
54
|
+
Add inside `Settings` class:
|
|
55
|
+
```python
|
|
56
|
+
# {Provider}
|
|
57
|
+
{provider}_api_key: Optional[str] = None
|
|
58
|
+
{provider}_model: str = "default-model"
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
### 5. `setup.py`
|
|
62
|
+
|
|
63
|
+
If optional: add `"{provider}": ["{sdk}>={version}"]` to `extras_require`.
|
|
64
|
+
If required: add to `install_requires`.
|
|
65
|
+
|
|
66
|
+
### 6. `.env.copy`
|
|
67
|
+
|
|
68
|
+
Add section:
|
|
69
|
+
```
|
|
70
|
+
# {Provider} Configuration
|
|
71
|
+
{PROVIDER}_API_KEY=your-api-key-here
|
|
72
|
+
{PROVIDER}_MODEL=default-model
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
### 7. `CLAUDE.md`
|
|
76
|
+
|
|
77
|
+
Add `{provider}` to the driver list in the Module Layout bullet.
|
|
78
|
+
|
|
79
|
+
## Verification
|
|
80
|
+
|
|
81
|
+
```bash
|
|
82
|
+
python -c "from prompture import {Provider}Driver; print('OK')"
|
|
83
|
+
python -c "from prompture.drivers import get_driver_for_model; d = get_driver_for_model('{provider}/test'); print(d.model)"
|
|
84
|
+
pytest tests/ -x -q
|
|
85
|
+
```
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
# Driver Template
|
|
2
|
+
|
|
3
|
+
Every Prompture driver follows this skeleton:
|
|
4
|
+
|
|
5
|
+
```python
|
|
6
|
+
import os
|
|
7
|
+
import logging
|
|
8
|
+
from ..driver import Driver
|
|
9
|
+
from typing import Any, Dict
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class {Provider}Driver(Driver):
|
|
15
|
+
MODEL_PRICING = {
|
|
16
|
+
# "model-name": {"prompt": cost_per_1k, "completion": cost_per_1k},
|
|
17
|
+
"default": {"prompt": 0.0, "completion": 0.0},
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
def __init__(self, api_key: str | None = None, model: str = "default-model"):
|
|
21
|
+
self.api_key = api_key or os.getenv("{PROVIDER}_API_KEY")
|
|
22
|
+
self.model = model
|
|
23
|
+
self.options: Dict[str, Any] = {}
|
|
24
|
+
|
|
25
|
+
def generate(self, prompt: str, options: Dict[str, Any] = None) -> Dict[str, Any]:
|
|
26
|
+
merged_options = self.options.copy()
|
|
27
|
+
if options:
|
|
28
|
+
merged_options.update(options)
|
|
29
|
+
|
|
30
|
+
# --- provider-specific API call here ---
|
|
31
|
+
# text = ...
|
|
32
|
+
# prompt_tokens = ...
|
|
33
|
+
# completion_tokens = ...
|
|
34
|
+
# raw = ...
|
|
35
|
+
|
|
36
|
+
# Compute cost
|
|
37
|
+
pricing = self.MODEL_PRICING.get(self.model, self.MODEL_PRICING["default"])
|
|
38
|
+
total_cost = (
|
|
39
|
+
(prompt_tokens / 1000) * pricing["prompt"]
|
|
40
|
+
+ (completion_tokens / 1000) * pricing["completion"]
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
meta = {
|
|
44
|
+
"prompt_tokens": prompt_tokens,
|
|
45
|
+
"completion_tokens": completion_tokens,
|
|
46
|
+
"total_tokens": prompt_tokens + completion_tokens,
|
|
47
|
+
"cost": round(total_cost, 6),
|
|
48
|
+
"raw_response": raw,
|
|
49
|
+
"model_name": self.model,
|
|
50
|
+
}
|
|
51
|
+
return {"text": text, "meta": meta}
|
|
52
|
+
```
|
|
53
|
+
|
|
54
|
+
## Lazy Import Pattern (for optional SDKs)
|
|
55
|
+
|
|
56
|
+
```python
|
|
57
|
+
def __init__(self, ...):
|
|
58
|
+
self._client = None
|
|
59
|
+
# defer import
|
|
60
|
+
|
|
61
|
+
def _ensure_client(self):
|
|
62
|
+
if self._client is not None:
|
|
63
|
+
return
|
|
64
|
+
try:
|
|
65
|
+
from some_sdk import Client
|
|
66
|
+
except ImportError:
|
|
67
|
+
raise ImportError(
|
|
68
|
+
"The 'some-sdk' package is required. "
|
|
69
|
+
"Install with: pip install prompture[provider]"
|
|
70
|
+
)
|
|
71
|
+
self._client = Client(api_key=self.api_key)
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
## Existing Drivers for Reference
|
|
75
|
+
|
|
76
|
+
| Driver | File | SDK | Auth |
|
|
77
|
+
|--------|------|-----|------|
|
|
78
|
+
| OpenAI | `openai_driver.py` | `openai` | API key |
|
|
79
|
+
| Claude | `claude_driver.py` | `anthropic` | API key |
|
|
80
|
+
| Google | `google_driver.py` | `google-generativeai` | API key |
|
|
81
|
+
| Groq | `groq_driver.py` | `groq` | API key |
|
|
82
|
+
| Ollama | `ollama_driver.py` | `requests` | Endpoint URL |
|
|
83
|
+
| AirLLM | `airllm_driver.py` | `airllm` (lazy) | None (local) |
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: add-example
|
|
3
|
+
description: Create a new Prompture usage example script. Follows project conventions for file naming, section structure, docstrings, and output formatting. Use when demonstrating extraction use cases or provider integrations.
|
|
4
|
+
metadata:
|
|
5
|
+
author: prompture
|
|
6
|
+
version: "1.0"
|
|
7
|
+
---
|
|
8
|
+
|
|
9
|
+
# Add an Example File
|
|
10
|
+
|
|
11
|
+
Creates a standalone runnable example in `examples/`.
|
|
12
|
+
|
|
13
|
+
## Before Starting
|
|
14
|
+
|
|
15
|
+
Ask the user:
|
|
16
|
+
- **Topic / use case** (e.g. "medical record extraction", "product review analysis")
|
|
17
|
+
- **Extraction method** — `extract_with_model`, `stepwise_extract_with_model`, `extract_and_jsonify`, `extract_from_data`, or `render_output`
|
|
18
|
+
- **Provider/model** — default: `ollama/gpt-oss:20b`
|
|
19
|
+
|
|
20
|
+
## Conventions
|
|
21
|
+
|
|
22
|
+
- File: `examples/{descriptive_name}_example.py`
|
|
23
|
+
- Standalone — no test framework imports
|
|
24
|
+
- Section dividers: `# ── N. Title ──`
|
|
25
|
+
- Always print extracted result and usage metadata
|
|
26
|
+
- Realistic sample text, not lorem ipsum
|
|
27
|
+
- Under 80 lines when possible
|
|
28
|
+
|
|
29
|
+
## Template
|
|
30
|
+
|
|
31
|
+
```python
|
|
32
|
+
"""
|
|
33
|
+
Example: {Title}
|
|
34
|
+
|
|
35
|
+
This example demonstrates:
|
|
36
|
+
1. {Feature 1}
|
|
37
|
+
2. {Feature 2}
|
|
38
|
+
|
|
39
|
+
Requirements:
|
|
40
|
+
pip install prompture
|
|
41
|
+
# Set up provider credentials in .env
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
import json
|
|
45
|
+
from pydantic import BaseModel, Field
|
|
46
|
+
from prompture import extract_with_model
|
|
47
|
+
|
|
48
|
+
# ── 1. Define the output model ──────────────────────────
|
|
49
|
+
|
|
50
|
+
class MyModel(BaseModel):
|
|
51
|
+
field1: str = Field(description="...")
|
|
52
|
+
field2: int = Field(description="...")
|
|
53
|
+
|
|
54
|
+
# ── 2. Input text ───────────────────────────────────────
|
|
55
|
+
|
|
56
|
+
text = """
|
|
57
|
+
Realistic sample text here.
|
|
58
|
+
"""
|
|
59
|
+
|
|
60
|
+
# ── 3. Extract ──────────────────────────────────────────
|
|
61
|
+
|
|
62
|
+
MODEL = "ollama/gpt-oss:20b"
|
|
63
|
+
|
|
64
|
+
result = extract_with_model(
|
|
65
|
+
model_cls=MyModel,
|
|
66
|
+
text=text,
|
|
67
|
+
model_name=MODEL,
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
# ── 4. Results ──────────────────────────────────────────
|
|
71
|
+
|
|
72
|
+
print("Extracted model:")
|
|
73
|
+
print(result["model"])
|
|
74
|
+
print()
|
|
75
|
+
print("Usage metadata:")
|
|
76
|
+
print(json.dumps(result["usage"], indent=2))
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
## Rules
|
|
80
|
+
|
|
81
|
+
- Import only from `prompture` public API
|
|
82
|
+
- Include docstring header listing features and setup requirements
|
|
83
|
+
- If provider-specific, mention the env var in the docstring
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: add-field
|
|
3
|
+
description: Add predefined field definitions to the Prompture field registry. Handles field structure, categories, template variables, enum support, and thread-safe registration. Use when adding reusable extraction fields.
|
|
4
|
+
metadata:
|
|
5
|
+
author: prompture
|
|
6
|
+
version: "1.0"
|
|
7
|
+
---
|
|
8
|
+
|
|
9
|
+
# Add Field Definitions
|
|
10
|
+
|
|
11
|
+
Adds new fields to the global `BASE_FIELD_DEFINITIONS` registry in `prompture/field_definitions.py`.
|
|
12
|
+
|
|
13
|
+
## Before Starting
|
|
14
|
+
|
|
15
|
+
Ask the user for each field:
|
|
16
|
+
- **Field name** — lowercase, underscore-separated (e.g. `linkedin_url`, `blood_type`)
|
|
17
|
+
- **Category** — Person, Contact, Professional, Financial, Location, Education, Demographic, Social Media, Task Management, or a new one
|
|
18
|
+
- **Type** — `str`, `int`, `float`, `bool`, `list`, `dict`
|
|
19
|
+
- **Description** — what this field represents
|
|
20
|
+
- **Instructions** — how the LLM should extract or compute the value
|
|
21
|
+
- **Default** — type-appropriate: `""` for str, `0` for int, `0.0` for float, `[]` for list, `False` for bool
|
|
22
|
+
- **Nullable** — `True` if the field can legitimately be absent
|
|
23
|
+
- **Enum values** (optional) — list of allowed string values
|
|
24
|
+
|
|
25
|
+
## Field Structure
|
|
26
|
+
|
|
27
|
+
```python
|
|
28
|
+
"field_name": {
|
|
29
|
+
"type": str,
|
|
30
|
+
"description": "What this field represents.",
|
|
31
|
+
"instructions": "How the LLM should extract or compute this value.",
|
|
32
|
+
"default": "",
|
|
33
|
+
"nullable": False,
|
|
34
|
+
},
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
### Optional keys
|
|
38
|
+
|
|
39
|
+
- `"enum"`: `["low", "medium", "high"]`
|
|
40
|
+
- Template variables in `instructions`: `{{current_year}}`, `{{current_date}}`, `{{current_datetime}}`, `{{current_month}}`, `{{current_day}}`, `{{current_weekday}}`, `{{current_iso_week}}`
|
|
41
|
+
|
|
42
|
+
## Steps
|
|
43
|
+
|
|
44
|
+
### 1. Edit `prompture/field_definitions.py`
|
|
45
|
+
|
|
46
|
+
Add fields to `BASE_FIELD_DEFINITIONS` under the right category comment. If the category is new, add a comment header:
|
|
47
|
+
|
|
48
|
+
```python
|
|
49
|
+
# ── Medical Fields ──────────────────────────────────
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
Alphabetical order within each category.
|
|
53
|
+
|
|
54
|
+
### 2. Verify
|
|
55
|
+
|
|
56
|
+
```bash
|
|
57
|
+
python -c "from prompture.field_definitions import get_field_definition; print(get_field_definition('field_name'))"
|
|
58
|
+
pytest tests/test_field_definitions.py -x -q
|
|
59
|
+
```
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: add-test
|
|
3
|
+
description: Add unit and integration tests for Prompture functionality. Uses pytest conventions, shared fixtures from conftest.py, and the integration marker pattern. Use when writing tests for new or existing features.
|
|
4
|
+
metadata:
|
|
5
|
+
author: prompture
|
|
6
|
+
version: "1.0"
|
|
7
|
+
---
|
|
8
|
+
|
|
9
|
+
# Add Tests
|
|
10
|
+
|
|
11
|
+
Creates tests in `tests/` following project conventions.
|
|
12
|
+
|
|
13
|
+
## Infrastructure
|
|
14
|
+
|
|
15
|
+
- Framework: **pytest**
|
|
16
|
+
- Shared fixtures: `tests/conftest.py`
|
|
17
|
+
- Default model: `DEFAULT_MODEL` (currently `"ollama/gpt-oss:20b"`)
|
|
18
|
+
- Integration marker: `@pytest.mark.integration` (skipped by default)
|
|
19
|
+
|
|
20
|
+
## Available Fixtures and Helpers
|
|
21
|
+
|
|
22
|
+
```python
|
|
23
|
+
# Fixtures
|
|
24
|
+
sample_json_schema # {"name": str, "age": int, "interests": list}
|
|
25
|
+
integration_driver # Driver from DEFAULT_MODEL (skips if unavailable)
|
|
26
|
+
|
|
27
|
+
# Assertion helpers
|
|
28
|
+
assert_valid_usage_metadata(meta) # Checks prompt_tokens, completion_tokens, total_tokens, cost, raw_response
|
|
29
|
+
assert_jsonify_response_structure(response) # Checks json_string, json_object, usage
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
## File Naming
|
|
33
|
+
|
|
34
|
+
- `tests/test_{module}.py` — maps to source module
|
|
35
|
+
- `tests/test_{feature}.py` — cross-cutting features
|
|
36
|
+
|
|
37
|
+
## Test Structure
|
|
38
|
+
|
|
39
|
+
```python
|
|
40
|
+
import pytest
|
|
41
|
+
from prompture import extract_with_model
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class TestFeatureName:
|
|
45
|
+
"""Tests for {feature}."""
|
|
46
|
+
|
|
47
|
+
def test_basic_behavior(self):
|
|
48
|
+
"""What this test verifies."""
|
|
49
|
+
result = some_function(...)
|
|
50
|
+
assert result["key"] == expected
|
|
51
|
+
|
|
52
|
+
def test_error_handling(self):
|
|
53
|
+
"""Should raise ValueError on invalid input."""
|
|
54
|
+
with pytest.raises(ValueError, match="expected"):
|
|
55
|
+
some_function(bad_input)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class TestFeatureIntegration:
|
|
59
|
+
"""Integration tests requiring live LLM access."""
|
|
60
|
+
|
|
61
|
+
@pytest.mark.integration
|
|
62
|
+
def test_live_extraction(self, integration_driver, sample_json_schema):
|
|
63
|
+
result = extract_and_jsonify(
|
|
64
|
+
text="John is 30 years old",
|
|
65
|
+
json_schema=sample_json_schema,
|
|
66
|
+
model_name=DEFAULT_MODEL,
|
|
67
|
+
)
|
|
68
|
+
assert_jsonify_response_structure(result)
|
|
69
|
+
assert_valid_usage_metadata(result["usage"])
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
## Rules
|
|
73
|
+
|
|
74
|
+
- **Unit tests** (no LLM): no marker, must always pass
|
|
75
|
+
- **Integration tests** (live LLM): must have `@pytest.mark.integration`
|
|
76
|
+
- Use conftest fixtures — don't redefine them
|
|
77
|
+
- One test class per logical group with docstrings
|
|
78
|
+
- Test both happy path and error cases
|
|
79
|
+
- Mock HTTP layer for driver unit tests
|
|
80
|
+
|
|
81
|
+
## Running
|
|
82
|
+
|
|
83
|
+
```bash
|
|
84
|
+
pytest tests/ -x -q # Unit only
|
|
85
|
+
pytest tests/ --run-integration -x -q # Include integration
|
|
86
|
+
pytest tests/test_core.py::TestClass::test_method # Single test
|
|
87
|
+
```
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: run-tests
|
|
3
|
+
description: Run Prompture's test suite with the correct pytest flags. Supports unit-only, integration, single file, single test, verbose, and credential-skip modes. Use when running or debugging tests.
|
|
4
|
+
metadata:
|
|
5
|
+
author: prompture
|
|
6
|
+
version: "1.0"
|
|
7
|
+
---
|
|
8
|
+
|
|
9
|
+
# Run Tests
|
|
10
|
+
|
|
11
|
+
## Commands
|
|
12
|
+
|
|
13
|
+
| Intent | Command |
|
|
14
|
+
|--------|---------|
|
|
15
|
+
| All unit tests | `pytest tests/ -x -q` |
|
|
16
|
+
| Include integration tests | `pytest tests/ --run-integration -x -q` |
|
|
17
|
+
| Specific file | `pytest tests/{file}.py -x -q` |
|
|
18
|
+
| Specific class | `pytest tests/{file}.py::{Class} -x -q` |
|
|
19
|
+
| Specific test | `pytest tests/{file}.py::{Class}::{test} -x -q` |
|
|
20
|
+
| Verbose output | Replace `-q` with `-v` |
|
|
21
|
+
| Show print output | Add `-s` |
|
|
22
|
+
| Pattern match | Add `-k "pattern"` |
|
|
23
|
+
| Skip missing credentials | `TEST_SKIP_NO_CREDENTIALS=true pytest tests/ --run-integration -x -q` |
|
|
24
|
+
| Legacy runner | `python test.py` |
|
|
25
|
+
|
|
26
|
+
## Flags
|
|
27
|
+
|
|
28
|
+
- `-x` stop on first failure
|
|
29
|
+
- `-q` quiet (dots + summary)
|
|
30
|
+
- `-v` verbose (each test name)
|
|
31
|
+
- `-s` show stdout/stderr
|
|
32
|
+
- `--run-integration` include `@pytest.mark.integration` tests
|
|
33
|
+
|
|
34
|
+
## After Running
|
|
35
|
+
|
|
36
|
+
- **Pass**: report count (e.g. "137 passed, 1 skipped")
|
|
37
|
+
- **Fail**: read the failure output, identify root cause, fix it
|
|
38
|
+
- Always run after modifying any file under `prompture/`
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: scaffold-extraction
|
|
3
|
+
description: Scaffold a complete extraction pipeline for a new domain — Pydantic model, field definitions, example script, and tests. Use when building a new extraction use case like medical records, invoices, or reviews.
|
|
4
|
+
metadata:
|
|
5
|
+
author: prompture
|
|
6
|
+
version: "1.0"
|
|
7
|
+
---
|
|
8
|
+
|
|
9
|
+
# Scaffold an Extraction Pipeline
|
|
10
|
+
|
|
11
|
+
Builds the full pipeline for a new extraction domain: model, fields, example, and tests.
|
|
12
|
+
|
|
13
|
+
## Before Starting
|
|
14
|
+
|
|
15
|
+
Ask the user:
|
|
16
|
+
- **Domain / use case** — what kind of data to extract
|
|
17
|
+
- **Fields** — list with types, or infer from a sample text
|
|
18
|
+
- **Provider/model** — default `ollama/gpt-oss:20b`
|
|
19
|
+
- **Method** — one-shot or stepwise (see selection guide below)
|
|
20
|
+
|
|
21
|
+
## Method Selection Guide
|
|
22
|
+
|
|
23
|
+
| Scenario | Method |
|
|
24
|
+
|----------|--------|
|
|
25
|
+
| Simple model, < 8 fields | `extract_with_model` (1 LLM call) |
|
|
26
|
+
| Complex model, 8+ fields | `stepwise_extract_with_model` (N calls, more accurate) |
|
|
27
|
+
| No Pydantic model, raw schema | `extract_and_jsonify` |
|
|
28
|
+
| Structured input (CSV, JSON) | `extract_from_data` (TOON, 45-60% token savings) |
|
|
29
|
+
| DataFrame input | `extract_from_pandas` |
|
|
30
|
+
| Non-JSON output | `render_output` |
|
|
31
|
+
|
|
32
|
+
## Deliverables
|
|
33
|
+
|
|
34
|
+
### 1. Pydantic Model
|
|
35
|
+
|
|
36
|
+
```python
|
|
37
|
+
from pydantic import BaseModel, Field
|
|
38
|
+
|
|
39
|
+
class InvoiceData(BaseModel):
|
|
40
|
+
vendor_name: str = Field(description="Company that issued the invoice")
|
|
41
|
+
total_amount: float = Field(description="Total amount due")
|
|
42
|
+
currency: str = Field(default="USD", description="ISO 4217 currency code")
|
|
43
|
+
```
|
|
44
|
+
|
|
45
|
+
Rules:
|
|
46
|
+
- `Field(description=...)` on every field — these become LLM instructions
|
|
47
|
+
- Type-appropriate defaults: `""`, `0`, `0.0`, `[]`
|
|
48
|
+
- `Optional[T] = None` only when a field genuinely might not exist
|
|
49
|
+
|
|
50
|
+
### 2. Field Definitions (optional)
|
|
51
|
+
|
|
52
|
+
Register in `prompture/field_definitions.py` only if fields are general-purpose. Use the `add-field` skill.
|
|
53
|
+
|
|
54
|
+
### 3. Example Script
|
|
55
|
+
|
|
56
|
+
Create `examples/{domain}_extraction_example.py`. Use the `add-example` skill.
|
|
57
|
+
|
|
58
|
+
### 4. Tests
|
|
59
|
+
|
|
60
|
+
Unit test for the Pydantic model + integration test for end-to-end extraction. Use the `add-test` skill.
|
|
61
|
+
|
|
62
|
+
## Sample Call
|
|
63
|
+
|
|
64
|
+
```python
|
|
65
|
+
from prompture import extract_with_model
|
|
66
|
+
|
|
67
|
+
result = extract_with_model(
|
|
68
|
+
model_cls=InvoiceData,
|
|
69
|
+
text=invoice_text,
|
|
70
|
+
model_name="ollama/gpt-oss:20b",
|
|
71
|
+
instruction_template="Extract all invoice details from the following document:",
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
invoice = result["model"] # Pydantic instance
|
|
75
|
+
usage = result["usage"] # Token counts and cost
|
|
76
|
+
```
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: update-pricing
|
|
3
|
+
description: Update LLM model pricing tables in Prompture driver files. Checks provider pricing pages, updates MODEL_PRICING dicts, adds new models, and removes discontinued ones. Use when model prices change or new models launch.
|
|
4
|
+
metadata:
|
|
5
|
+
author: prompture
|
|
6
|
+
version: "1.0"
|
|
7
|
+
---
|
|
8
|
+
|
|
9
|
+
# Update Model Pricing
|
|
10
|
+
|
|
11
|
+
## Where Pricing Lives
|
|
12
|
+
|
|
13
|
+
Each driver has a `MODEL_PRICING` class variable:
|
|
14
|
+
|
|
15
|
+
```python
|
|
16
|
+
class OpenAIDriver(Driver):
|
|
17
|
+
MODEL_PRICING = {
|
|
18
|
+
"gpt-4o": {"prompt": 0.005, "completion": 0.015},
|
|
19
|
+
"default": {"prompt": 0.002, "completion": 0.002},
|
|
20
|
+
}
|
|
21
|
+
```
|
|
22
|
+
|
|
23
|
+
Files with pricing:
|
|
24
|
+
- `prompture/drivers/openai_driver.py`
|
|
25
|
+
- `prompture/drivers/claude_driver.py`
|
|
26
|
+
- `prompture/drivers/google_driver.py`
|
|
27
|
+
- `prompture/drivers/groq_driver.py`
|
|
28
|
+
- `prompture/drivers/grok_driver.py`
|
|
29
|
+
- `prompture/drivers/openrouter_driver.py`
|
|
30
|
+
- `prompture/drivers/azure_driver.py`
|
|
31
|
+
|
|
32
|
+
Local/free drivers (ollama, lmstudio, local_http, airllm) use `0.0`.
|
|
33
|
+
|
|
34
|
+
## Steps
|
|
35
|
+
|
|
36
|
+
1. **Search the web** for the provider's current pricing page
|
|
37
|
+
2. **Read** the current `MODEL_PRICING` dict in the driver file
|
|
38
|
+
3. **Update** prices, add new models, remove discontinued ones
|
|
39
|
+
4. **Keep** the `"default"` fallback entry
|
|
40
|
+
5. **Preserve** extra keys like `"tokens_param"` or `"supports_temperature"`
|
|
41
|
+
6. **Run tests**: `pytest tests/ -x -q`
|
|
42
|
+
|
|
43
|
+
## Format
|
|
44
|
+
|
|
45
|
+
- Values: **cost per 1,000 tokens** in USD
|
|
46
|
+
- Both `"prompt"` and `"completion"` keys required
|
|
47
|
+
- Always include `"default"` as fallback
|
|
48
|
+
|
|
49
|
+
## Side Effects
|
|
50
|
+
|
|
51
|
+
`prompture/discovery.py` reads `MODEL_PRICING` keys to list available models. Adding or removing models from pricing automatically updates discovery results.
|
|
@@ -23,6 +23,7 @@ OLLAMA_MODEL=gemma:latest
|
|
|
23
23
|
# Required if AI_PROVIDER=lmstudio
|
|
24
24
|
LMSTUDIO_ENDPOINT=http://127.0.0.1:1234/v1/chat/completions
|
|
25
25
|
LMSTUDIO_MODEL=deepseek/deepseek-r1-0528-qwen3-8b
|
|
26
|
+
LMSTUDIO_API_KEY=
|
|
26
27
|
|
|
27
28
|
# Azure OpenAI Configuration
|
|
28
29
|
AZURE_API_KEY=
|
|
@@ -52,9 +53,31 @@ GROQ_MODEL=llama2-70b-4096
|
|
|
52
53
|
# OpenRouter Configuration
|
|
53
54
|
# Required if AI_PROVIDER=openrouter
|
|
54
55
|
OPENROUTER_API_KEY=your-api-key-here
|
|
55
|
-
OPENROUTER_MODEL=openai/gpt-
|
|
56
|
+
OPENROUTER_MODEL=openai/gpt-4o-mini
|
|
56
57
|
|
|
57
58
|
# Grok Configuration
|
|
58
59
|
# Required if AI_PROVIDER=grok
|
|
59
60
|
GROK_API_KEY=your-api-key-here
|
|
60
|
-
GROK_MODEL=grok-4-fast-reasoning
|
|
61
|
+
GROK_MODEL=grok-4-fast-reasoning
|
|
62
|
+
|
|
63
|
+
# Moonshot AI (Kimi) Configuration
|
|
64
|
+
# Required if AI_PROVIDER=moonshot
|
|
65
|
+
MOONSHOT_API_KEY=
|
|
66
|
+
MOONSHOT_MODEL=kimi-k2-0905-preview
|
|
67
|
+
MOONSHOT_ENDPOINT=https://api.moonshot.ai/v1
|
|
68
|
+
|
|
69
|
+
# Z.ai (Zhipu AI) Configuration
|
|
70
|
+
# Required if AI_PROVIDER=zai
|
|
71
|
+
ZHIPU_API_KEY=
|
|
72
|
+
ZHIPU_MODEL=glm-4.7
|
|
73
|
+
ZHIPU_ENDPOINT=https://api.z.ai/api/paas/v4
|
|
74
|
+
|
|
75
|
+
# ModelScope (Alibaba Cloud) Configuration
|
|
76
|
+
# Required if AI_PROVIDER=modelscope
|
|
77
|
+
MODELSCOPE_API_KEY=
|
|
78
|
+
MODELSCOPE_MODEL=Qwen/Qwen3-235B-A22B-Instruct-2507
|
|
79
|
+
MODELSCOPE_ENDPOINT=https://api-inference.modelscope.cn/v1
|
|
80
|
+
|
|
81
|
+
# AirLLM Configuration
|
|
82
|
+
AIRLLM_MODEL=meta-llama/Llama-2-7b-hf
|
|
83
|
+
AIRLLM_COMPRESSION=
|