prompture 0.0.33.dev1__tar.gz → 0.0.33.dev2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. prompture-0.0.33.dev2/.claude/skills/add-driver.md +121 -0
  2. prompture-0.0.33.dev2/.claude/skills/add-example.md +76 -0
  3. prompture-0.0.33.dev2/.claude/skills/add-field.md +72 -0
  4. prompture-0.0.33.dev2/.claude/skills/add-test.md +87 -0
  5. prompture-0.0.33.dev2/.claude/skills/run-tests.md +32 -0
  6. prompture-0.0.33.dev2/.claude/skills/scaffold-extraction.md +71 -0
  7. prompture-0.0.33.dev2/.claude/skills/update-pricing.md +46 -0
  8. prompture-0.0.33.dev2/.mcp.json +19 -0
  9. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/PKG-INFO +1 -1
  10. prompture-0.0.33.dev2/ROADMAP.md +295 -0
  11. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/__init__.py +5 -0
  12. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/discovery.py +11 -1
  13. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/drivers/azure_driver.py +10 -4
  14. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/drivers/claude_driver.py +10 -4
  15. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/drivers/google_driver.py +14 -6
  16. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/drivers/grok_driver.py +10 -4
  17. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/drivers/groq_driver.py +10 -4
  18. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/drivers/openai_driver.py +10 -4
  19. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/drivers/openrouter_driver.py +10 -4
  20. prompture-0.0.33.dev2/prompture/model_rates.py +216 -0
  21. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/settings.py +3 -0
  22. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture.egg-info/PKG-INFO +1 -1
  23. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture.egg-info/SOURCES.txt +10 -0
  24. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/.env.copy +0 -0
  25. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/.github/FUNDING.yml +0 -0
  26. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/.github/scripts/update_docs_version.py +0 -0
  27. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/.github/scripts/update_wrapper_version.py +0 -0
  28. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/.github/workflows/dev.yml +0 -0
  29. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/.github/workflows/documentation.yml +0 -0
  30. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/.github/workflows/publish.yml +0 -0
  31. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/CLAUDE.md +0 -0
  32. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/LICENSE +0 -0
  33. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/MANIFEST.in +0 -0
  34. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/README.md +0 -0
  35. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/VERSION +0 -0
  36. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/docs/source/_static/custom.css +0 -0
  37. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/docs/source/api/core.rst +0 -0
  38. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/docs/source/api/drivers.rst +0 -0
  39. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/docs/source/api/field_definitions.rst +0 -0
  40. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/docs/source/api/index.rst +0 -0
  41. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/docs/source/api/runner.rst +0 -0
  42. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/docs/source/api/tools.rst +0 -0
  43. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/docs/source/api/validator.rst +0 -0
  44. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/docs/source/conf.py +0 -0
  45. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/docs/source/contributing.rst +0 -0
  46. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/docs/source/examples.rst +0 -0
  47. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/docs/source/field_definitions_reference.rst +0 -0
  48. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/docs/source/index.rst +0 -0
  49. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/docs/source/installation.rst +0 -0
  50. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/docs/source/quickstart.rst +0 -0
  51. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/docs/source/toon_input_guide.rst +0 -0
  52. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/packages/README.md +0 -0
  53. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/packages/llm_to_json/README.md +0 -0
  54. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/packages/llm_to_json/llm_to_json/__init__.py +0 -0
  55. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/packages/llm_to_json/pyproject.toml +0 -0
  56. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/packages/llm_to_json/test.py +0 -0
  57. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/packages/llm_to_toon/README.md +0 -0
  58. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/packages/llm_to_toon/llm_to_toon/__init__.py +0 -0
  59. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/packages/llm_to_toon/pyproject.toml +0 -0
  60. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/packages/llm_to_toon/test.py +0 -0
  61. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/cli.py +0 -0
  62. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/core.py +0 -0
  63. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/driver.py +0 -0
  64. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/drivers/__init__.py +0 -0
  65. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/drivers/airllm_driver.py +0 -0
  66. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/drivers/hugging_driver.py +0 -0
  67. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/drivers/lmstudio_driver.py +0 -0
  68. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/drivers/local_http_driver.py +0 -0
  69. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/drivers/ollama_driver.py +0 -0
  70. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/field_definitions.py +0 -0
  71. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/runner.py +0 -0
  72. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/tools.py +0 -0
  73. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture/validator.py +0 -0
  74. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture.egg-info/dependency_links.txt +0 -0
  75. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture.egg-info/entry_points.txt +0 -0
  76. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture.egg-info/requires.txt +0 -0
  77. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/prompture.egg-info/top_level.txt +0 -0
  78. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/requirements.txt +0 -0
  79. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/setup.cfg +0 -0
  80. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/setup.py +0 -0
  81. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/test.py +0 -0
  82. {prompture-0.0.33.dev1 → prompture-0.0.33.dev2}/test_version_diagnosis.py +0 -0
@@ -0,0 +1,121 @@
1
+ # Skill: Add a New LLM Driver
2
+
3
+ When the user asks to add a new driver (provider), follow this checklist exactly. Ask the user for any values marked **[ASK]** before writing code.
4
+
5
+ ## Information to Gather
6
+
7
+ - **Provider name** (lowercase, used as registry key and `provider/model` prefix): [ASK]
8
+ - **SDK package name** on PyPI (e.g. `openai`, `anthropic`): [ASK]
9
+ - **Minimum SDK version**: [ASK]
10
+ - **Default model ID**: [ASK]
11
+ - **Authentication**: API key env var name, or endpoint URL, or both: [ASK]
12
+ - **Model pricing**: dict of model names to `{"prompt": cost_per_1k, "completion": cost_per_1k}`, or `0.0` for free/local: [ASK]
13
+ - **Lazy or eager import**: Use lazy import (try/except inside methods) if the SDK is an optional dependency. Use eager import if the SDK is in `install_requires`.
14
+
15
+ ## Files to Touch (in order)
16
+
17
+ ### 1. `prompture/drivers/{provider}_driver.py` (NEW)
18
+
19
+ Follow this exact skeleton — match the style of existing drivers:
20
+
21
+ ```python
22
+ import os
23
+ import logging
24
+ from ..driver import Driver
25
+ from typing import Any, Dict
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+
30
+ class {Provider}Driver(Driver):
31
+ MODEL_PRICING = {
32
+ # "model-name": {"prompt": 0.00, "completion": 0.00},
33
+ "default": {"prompt": 0.0, "completion": 0.0},
34
+ }
35
+
36
+ def __init__(self, api_key: str | None = None, model: str = "default-model"):
37
+ self.api_key = api_key or os.getenv("{PROVIDER}_API_KEY")
38
+ self.model = model
39
+ self.options: Dict[str, Any] = {}
40
+
41
+ def generate(self, prompt: str, options: Dict[str, Any] = None) -> Dict[str, Any]:
42
+ merged_options = self.options.copy()
43
+ if options:
44
+ merged_options.update(options)
45
+
46
+ # --- provider-specific call here ---
47
+
48
+ meta = {
49
+ "prompt_tokens": prompt_tokens,
50
+ "completion_tokens": completion_tokens,
51
+ "total_tokens": prompt_tokens + completion_tokens,
52
+ "cost": round(total_cost, 6),
53
+ "raw_response": raw,
54
+ "model_name": self.model,
55
+ }
56
+ return {"text": text, "meta": meta}
57
+ ```
58
+
59
+ Key rules:
60
+ - Subclass `Driver` from `..driver`.
61
+ - `generate()` returns `{"text": str, "meta": dict}`.
62
+ - `meta` MUST contain: `prompt_tokens`, `completion_tokens`, `total_tokens`, `cost`, `raw_response`, `model_name`.
63
+ - If the SDK is optional, wrap its import inside `generate()` or `__init__` in try/except and raise a clear `ImportError` pointing to `pip install prompture[{provider}]`.
64
+
65
+ ### 2. `prompture/drivers/__init__.py`
66
+
67
+ - Add import at top: `from .{provider}_driver import {Provider}Driver`
68
+ - Add entry to `DRIVER_REGISTRY`:
69
+ ```python
70
+ "{provider}": lambda model=None: {Provider}Driver(
71
+ api_key=settings.{provider}_api_key,
72
+ model=model or settings.{provider}_model
73
+ ),
74
+ ```
75
+ - Add `"{Provider}Driver"` to `__all__`.
76
+
77
+ ### 3. `prompture/__init__.py`
78
+
79
+ - Add `{Provider}Driver` to the import line from `.drivers`.
80
+ - Add `"{Provider}Driver"` to `__all__` in the `# Drivers` section.
81
+
82
+ ### 4. `prompture/settings.py`
83
+
84
+ Add settings fields inside the `Settings` class, following the existing pattern:
85
+
86
+ ```python
87
+ # {Provider}
88
+ {provider}_api_key: Optional[str] = None
89
+ {provider}_model: str = "default-model"
90
+ ```
91
+
92
+ Add endpoint fields too if the provider uses a configurable URL.
93
+
94
+ ### 5. `setup.py`
95
+
96
+ If the SDK is an optional dependency, add to `extras_require`:
97
+ ```python
98
+ "{provider}": ["{sdk-package}>={min-version}"],
99
+ ```
100
+
101
+ If the SDK should always be installed, add to `install_requires` instead.
102
+
103
+ ### 6. `.env.copy`
104
+
105
+ Add a section at the end:
106
+ ```
107
+ # {Provider} Configuration
108
+ {PROVIDER}_API_KEY=your-api-key-here
109
+ {PROVIDER}_MODEL=default-model
110
+ ```
111
+
112
+ ### 7. `CLAUDE.md`
113
+
114
+ Add `{provider}` to the driver list in the **Module Layout** bullet for `prompture/drivers/`.
115
+
116
+ ## Verification
117
+
118
+ After all files are written:
119
+ 1. Run `python -c "from prompture import {Provider}Driver; print('OK')"` to confirm clean import.
120
+ 2. Run `python -c "from prompture.drivers import get_driver_for_model; d = get_driver_for_model('{provider}/test'); print(d.model)"` to confirm registry resolution.
121
+ 3. Run `pytest tests/ -x -q` to confirm no regressions.
@@ -0,0 +1,76 @@
1
+ # Skill: Add an Example File
2
+
3
+ When the user asks to create a new usage example, follow this template and conventions.
4
+
5
+ ## Information to Gather
6
+
7
+ - **Topic / use case** (e.g. "medical record extraction", "product review analysis"): [ASK]
8
+ - **Which extraction method** to demonstrate: `extract_with_model`, `stepwise_extract_with_model`, `extract_and_jsonify`, `extract_from_data`, or `render_output`: [ASK if unclear]
9
+ - **Which provider/model** to target (default: `ollama/gpt-oss:20b`): [ASK or use default]
10
+
11
+ ## File Conventions
12
+
13
+ - Location: `examples/{descriptive_name}_example.py`
14
+ - Filename: lowercase, underscores, ends with `_example.py`
15
+ - Must be a standalone runnable script (no test framework dependency)
16
+
17
+ ## Template
18
+
19
+ ```python
20
+ """
21
+ Example: {Title}
22
+
23
+ This example demonstrates:
24
+ 1. {Feature 1}
25
+ 2. {Feature 2}
26
+ 3. {Feature 3}
27
+
28
+ Requirements:
29
+ pip install prompture
30
+ # Set up your provider credentials in .env
31
+ """
32
+
33
+ import json
34
+ from pydantic import BaseModel, Field
35
+ from prompture import extract_with_model # or whichever function
36
+
37
+ # ── 1. Define the output model ──────────────────────────
38
+
39
+ class MyModel(BaseModel):
40
+ field1: str = Field(description="...")
41
+ field2: int = Field(description="...")
42
+
43
+ # ── 2. Input text ───────────────────────────────────────
44
+
45
+ text = """
46
+ Paste realistic sample text here.
47
+ """
48
+
49
+ # ── 3. Extract ──────────────────────────────────────────
50
+
51
+ MODEL = "ollama/gpt-oss:20b"
52
+
53
+ result = extract_with_model(
54
+ model_cls=MyModel,
55
+ text=text,
56
+ model_name=MODEL,
57
+ )
58
+
59
+ # ── 4. Results ──────────────────────────────────────────
60
+
61
+ print("Extracted model:")
62
+ print(result["model"])
63
+ print()
64
+ print("Usage metadata:")
65
+ print(json.dumps(result["usage"], indent=2))
66
+ ```
67
+
68
+ ## Rules
69
+
70
+ - Use section comments with the `# ── N. Title ──` format to divide the script
71
+ - Always print both the extracted result and the usage metadata
72
+ - Use realistic sample text, not placeholder lorem ipsum
73
+ - Import only from `prompture` public API (never internal modules)
74
+ - Include a docstring header listing what the example demonstrates and any setup requirements
75
+ - If the example needs a specific provider, mention the env var in the docstring
76
+ - Keep it under 80 lines if possible — examples should be concise
@@ -0,0 +1,72 @@
1
+ # Skill: Add Field Definitions
2
+
3
+ When the user asks to add new field definitions to the registry, follow this process.
4
+
5
+ ## Information to Gather
6
+
7
+ Ask the user:
8
+ - **Field name(s)** — lowercase, underscore-separated (e.g. `linkedin_url`, `blood_type`)
9
+ - **Category** — which logical group (Person, Contact, Professional, Financial, etc.) or a new category
10
+ - **For each field**: type, description, instructions, default value, nullable, and optionally enum values
11
+
12
+ ## Where Fields Live
13
+
14
+ All predefined fields are in `prompture/field_definitions.py` inside the `BASE_FIELD_DEFINITIONS` dict, organized by category comments.
15
+
16
+ ## Field Definition Structure
17
+
18
+ Every field follows this exact shape:
19
+
20
+ ```python
21
+ "field_name": {
22
+ "type": str, # Python type: str, int, float, bool, list, dict
23
+ "description": "What this field represents.",
24
+ "instructions": "How the LLM should extract or compute this value.",
25
+ "default": "", # Type-appropriate default (0 for int, "" for str, [] for list, False for bool)
26
+ "nullable": False, # True if the field can be None/null
27
+ },
28
+ ```
29
+
30
+ ### Optional keys
31
+
32
+ - `"enum"`: list of allowed string values (e.g. `["low", "medium", "high"]`)
33
+ - Template variables in `instructions`: `{{current_year}}`, `{{current_date}}`, `{{current_datetime}}`, `{{current_month}}`, `{{current_day}}`, `{{current_weekday}}`, `{{current_iso_week}}`
34
+
35
+ ## Steps
36
+
37
+ ### 1. Edit `prompture/field_definitions.py`
38
+
39
+ Add the new field(s) to `BASE_FIELD_DEFINITIONS` under the appropriate category comment block. If the category is new, add a clear comment header like:
40
+
41
+ ```python
42
+ # ── Medical Fields ──────────────────────────────────
43
+ ```
44
+
45
+ Place the entries in alphabetical order within their category.
46
+
47
+ ### 2. Verify
48
+
49
+ Run:
50
+ ```bash
51
+ python -c "from prompture.field_definitions import get_field_definition; print(get_field_definition('field_name'))"
52
+ ```
53
+
54
+ Then run the field definitions tests:
55
+ ```bash
56
+ pytest tests/test_field_definitions.py -x -q
57
+ ```
58
+
59
+ ## Example
60
+
61
+ Adding a `blood_type` field:
62
+
63
+ ```python
64
+ "blood_type": {
65
+ "type": str,
66
+ "description": "Blood type classification (ABO system with Rh factor).",
67
+ "instructions": "Extract the blood type. Use standard notation: A+, A-, B+, B-, AB+, AB-, O+, O-.",
68
+ "default": "",
69
+ "nullable": True,
70
+ "enum": ["A+", "A-", "B+", "B-", "AB+", "AB-", "O+", "O-"],
71
+ },
72
+ ```
@@ -0,0 +1,87 @@
1
+ # Skill: Add Tests
2
+
3
+ When the user asks to add tests for new or existing functionality, follow these conventions.
4
+
5
+ ## Test Infrastructure
6
+
7
+ - Framework: `pytest`
8
+ - Test directory: `tests/`
9
+ - Shared fixtures and helpers: `tests/conftest.py`
10
+ - Default test model: `DEFAULT_MODEL` from `conftest.py` (currently `"ollama/gpt-oss:20b"`)
11
+ - Integration marker: `@pytest.mark.integration` (skipped by default, run with `--run-integration`)
12
+
13
+ ## Key Fixtures and Helpers (from conftest.py)
14
+
15
+ ```python
16
+ # Fixtures
17
+ sample_json_schema # Standard {"name": str, "age": int, "interests": list} schema
18
+ integration_driver # Driver instance from DEFAULT_MODEL (skips if unavailable)
19
+
20
+ # Assertion helpers
21
+ assert_valid_usage_metadata(meta) # Validates prompt_tokens, completion_tokens, total_tokens, cost, raw_response
22
+ assert_jsonify_response_structure(response) # Validates json_string, json_object, usage keys
23
+ ```
24
+
25
+ ## Test File Naming
26
+
27
+ - `tests/test_{module}.py` — maps to source module (e.g. `test_core.py`, `test_field_definitions.py`)
28
+ - `tests/test_{feature}.py` — for cross-cutting features (e.g. `test_toon_input.py`, `test_enhanced_extraction.py`)
29
+
30
+ ## Test Class and Function Conventions
31
+
32
+ ```python
33
+ import pytest
34
+ from prompture import extract_with_model, get_driver_for_model
35
+ from tests.conftest import DEFAULT_MODEL # if needed
36
+
37
+
38
+ class TestFeatureName:
39
+ """Tests for {feature description}."""
40
+
41
+ def test_basic_behavior(self):
42
+ """What the test verifies in plain English."""
43
+ result = some_function(...)
44
+ assert result["key"] == expected
45
+
46
+ def test_edge_case(self):
47
+ """Edge case: empty input."""
48
+ ...
49
+
50
+ def test_error_handling(self):
51
+ """Should raise ValueError on invalid input."""
52
+ with pytest.raises(ValueError, match="expected message"):
53
+ some_function(bad_input)
54
+
55
+
56
+ class TestFeatureIntegration:
57
+ """Integration tests requiring live LLM access."""
58
+
59
+ @pytest.mark.integration
60
+ def test_live_extraction(self, integration_driver, sample_json_schema):
61
+ """End-to-end extraction with live model."""
62
+ result = extract_and_jsonify(
63
+ text="John is 30 years old",
64
+ json_schema=sample_json_schema,
65
+ model_name=DEFAULT_MODEL,
66
+ )
67
+ assert_jsonify_response_structure(result)
68
+ assert_valid_usage_metadata(result["usage"])
69
+ ```
70
+
71
+ ## Rules
72
+
73
+ - **Unit tests** (no LLM call): No marker needed, should always pass
74
+ - **Integration tests** (live LLM call): Must have `@pytest.mark.integration`
75
+ - Use `conftest.py` fixtures and helpers — don't redefine them
76
+ - One test class per logical group, clear docstrings
77
+ - Test both happy path and error cases
78
+ - For driver tests, mock the HTTP layer (use `unittest.mock.patch` or `responses` library) for unit tests
79
+
80
+ ## Running
81
+
82
+ ```bash
83
+ pytest tests/ # Unit tests only
84
+ pytest tests/ --run-integration # Include integration tests
85
+ pytest tests/test_core.py -x -q # Single file, stop on first failure
86
+ pytest tests/test_core.py::TestClass::test_method # Single test
87
+ ```
@@ -0,0 +1,32 @@
1
+ # Skill: Run Tests
2
+
3
+ When the user asks to run tests, use the appropriate command based on what they want.
4
+
5
+ ## Commands
6
+
7
+ | Intent | Command |
8
+ |--------|---------|
9
+ | All unit tests (default) | `pytest tests/ -x -q` |
10
+ | All tests including integration | `pytest tests/ --run-integration -x -q` |
11
+ | Specific test file | `pytest tests/{file}.py -x -q` |
12
+ | Specific test class | `pytest tests/{file}.py::{ClassName} -x -q` |
13
+ | Specific test function | `pytest tests/{file}.py::{ClassName}::{test_name} -x -q` |
14
+ | With verbose output | Add `-v` instead of `-q` |
15
+ | Show print output | Add `-s` |
16
+ | Skip integration if no creds | `TEST_SKIP_NO_CREDENTIALS=true pytest tests/ --run-integration -x -q` |
17
+ | Using test.py runner | `python test.py` |
18
+
19
+ ## Flags Reference
20
+
21
+ - `-x` — Stop on first failure
22
+ - `-q` — Quiet output (just dots and summary)
23
+ - `-v` — Verbose (show each test name)
24
+ - `-s` — Show stdout/stderr (print statements)
25
+ - `--run-integration` — Include `@pytest.mark.integration` tests
26
+ - `-k "pattern"` — Run tests matching name pattern
27
+
28
+ ## After Running
29
+
30
+ - If tests pass: Report the count (e.g. "137 passed, 1 skipped")
31
+ - If tests fail: Read the failure output, identify the root cause, and fix it
32
+ - Always run tests after making changes to any source file under `prompture/`
@@ -0,0 +1,71 @@
1
+ # Skill: Scaffold an Extraction Pipeline
2
+
3
+ When the user wants to build a new extraction use case (e.g. "extract medical records", "parse invoices", "analyze reviews"), scaffold the full pipeline: Pydantic model, field definitions, extraction call, and test.
4
+
5
+ ## Information to Gather
6
+
7
+ - **Domain / use case**: What kind of data are we extracting? [ASK]
8
+ - **Fields**: List of fields with types, or let me infer from a sample text [ASK]
9
+ - **Provider/model**: Which model to target (default: `ollama/gpt-oss:20b`) [ASK or use default]
10
+ - **Extraction method**: One-shot (`extract_with_model`) or stepwise (`stepwise_extract_with_model`) [ASK or recommend based on complexity]
11
+
12
+ ## Output Files
13
+
14
+ ### 1. Pydantic Model (add to existing or new module)
15
+
16
+ ```python
17
+ from pydantic import BaseModel, Field
18
+
19
+ class InvoiceData(BaseModel):
20
+ vendor_name: str = Field(description="Company or person that issued the invoice")
21
+ invoice_number: str = Field(description="Unique invoice identifier")
22
+ total_amount: float = Field(description="Total amount due in the invoice currency")
23
+ currency: str = Field(default="USD", description="ISO 4217 currency code")
24
+ line_items: list = Field(default_factory=list, description="List of individual items")
25
+ ```
26
+
27
+ Rules for the model:
28
+ - Use `Field(description=...)` on every field — these become LLM instructions
29
+ - Use type-appropriate defaults: `""` for str, `0` for int, `0.0` for float, `[]` for list
30
+ - Use `Optional[T] = None` only when a field genuinely might not exist in the source
31
+
32
+ ### 2. Field Definitions (optional, if reuse is desired)
33
+
34
+ Register fields in `prompture/field_definitions.py` using the add-field skill pattern. Only do this if the fields are general-purpose enough to reuse across projects.
35
+
36
+ ### 3. Example Script
37
+
38
+ Create `examples/{domain}_extraction_example.py` following the add-example skill template.
39
+
40
+ ### 4. Tests
41
+
42
+ Create tests following the add-test skill pattern:
43
+ - Unit test: Validate the Pydantic model accepts expected data shapes
44
+ - Integration test: End-to-end extraction from sample text
45
+
46
+ ## Extraction Method Selection Guide
47
+
48
+ | Scenario | Recommended Method |
49
+ |----------|-------------------|
50
+ | Simple model, < 8 fields | `extract_with_model` (one-shot, 1 LLM call) |
51
+ | Complex model, 8+ fields | `stepwise_extract_with_model` (per-field, N calls but more accurate) |
52
+ | No Pydantic model, raw schema | `extract_and_jsonify` |
53
+ | Structured input data (CSV, JSON) | `extract_from_data` (TOON input, saves 45-60% tokens) |
54
+ | DataFrame input | `extract_from_pandas` |
55
+ | Non-JSON output (text, HTML, markdown) | `render_output` |
56
+
57
+ ## Sample Extraction Call
58
+
59
+ ```python
60
+ from prompture import extract_with_model
61
+
62
+ result = extract_with_model(
63
+ model_cls=InvoiceData,
64
+ text=invoice_text,
65
+ model_name="ollama/gpt-oss:20b",
66
+ instruction_template="Extract all invoice details from the following document:",
67
+ )
68
+
69
+ invoice = result["model"] # Pydantic model instance
70
+ usage = result["usage"] # Token counts and cost
71
+ ```
@@ -0,0 +1,46 @@
1
+ # Skill: Update Model Pricing
2
+
3
+ When the user asks to update pricing for a provider's models, follow this process.
4
+
5
+ ## Where Pricing Lives
6
+
7
+ Each driver has a `MODEL_PRICING` class variable — a dict mapping model names to cost-per-1K-token values:
8
+
9
+ ```python
10
+ class OpenAIDriver(Driver):
11
+ MODEL_PRICING = {
12
+ "gpt-4o": {"prompt": 0.005, "completion": 0.015},
13
+ "gpt-4o-mini": {"prompt": 0.00015, "completion": 0.0006},
14
+ "default": {"prompt": 0.002, "completion": 0.002},
15
+ }
16
+ ```
17
+
18
+ Files to check:
19
+ - `prompture/drivers/openai_driver.py`
20
+ - `prompture/drivers/claude_driver.py`
21
+ - `prompture/drivers/google_driver.py`
22
+ - `prompture/drivers/groq_driver.py`
23
+ - `prompture/drivers/grok_driver.py`
24
+ - `prompture/drivers/openrouter_driver.py`
25
+ - `prompture/drivers/azure_driver.py`
26
+
27
+ Local/free drivers (ollama, lmstudio, local_http, airllm) all use `0.0`.
28
+
29
+ ## Steps
30
+
31
+ 1. **Search the web** for the latest pricing page for the provider (e.g. "OpenAI API pricing 2026")
32
+ 2. **Read** the current `MODEL_PRICING` dict in the driver file
33
+ 3. **Update** prices, add new models, remove discontinued models
34
+ 4. **Verify** the `"default"` entry still makes sense as a fallback
35
+ 5. **Run tests**: `pytest tests/ -x -q`
36
+
37
+ ## Pricing Format
38
+
39
+ - Values are **cost per 1,000 tokens** in USD
40
+ - Always include both `"prompt"` and `"completion"` keys
41
+ - Always keep a `"default"` entry as fallback
42
+ - Some drivers have extra keys like `"tokens_param"` or `"supports_temperature"` — preserve those
43
+
44
+ ## Also check `prompture/discovery.py`
45
+
46
+ The discovery module reads `MODEL_PRICING` keys to list available models. Adding/removing models from pricing automatically updates discovery.
@@ -0,0 +1,19 @@
1
+ {
2
+ "mcpServers": {
3
+ "context7": {
4
+ "type": "stdio",
5
+ "command": "npx",
6
+ "args": ["-y", "@upstash/context7-mcp@latest"]
7
+ },
8
+ "fetch": {
9
+ "type": "stdio",
10
+ "command": "npx",
11
+ "args": ["-y", "@modelcontextprotocol/server-fetch"]
12
+ },
13
+ "sequential-thinking": {
14
+ "type": "stdio",
15
+ "command": "npx",
16
+ "args": ["-y", "@modelcontextprotocol/server-sequentialthinking"]
17
+ }
18
+ }
19
+ }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: prompture
3
- Version: 0.0.33.dev1
3
+ Version: 0.0.33.dev2
4
4
  Summary: Ask LLMs to return structured JSON and run cross-model tests. API-first.
5
5
  Home-page: https://github.com/jhd3197/prompture
6
6
  Author: Juan Denis