prompture 0.0.46.dev1__tar.gz → 0.0.47__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prompture-0.0.47/.claude/skills/add-driver/SKILL.md +221 -0
- prompture-0.0.47/.claude/skills/add-driver/references/driver-template.md +364 -0
- prompture-0.0.47/.claude/skills/add-example/SKILL.md +185 -0
- prompture-0.0.47/.claude/skills/add-persona/SKILL.md +277 -0
- prompture-0.0.47/.claude/skills/add-tool/SKILL.md +222 -0
- prompture-0.0.47/.claude/skills/update-pricing/SKILL.md +136 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/PKG-INFO +35 -2
- {prompture-0.0.46.dev1 → prompture-0.0.47}/README.md +34 -1
- prompture-0.0.47/VERSION +1 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/_version.py +2 -2
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/async_conversation.py +87 -2
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/conversation.py +87 -2
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/async_azure_driver.py +77 -0
- prompture-0.0.47/prompture/drivers/async_grok_driver.py +201 -0
- prompture-0.0.47/prompture/drivers/async_groq_driver.py +180 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/async_lmstudio_driver.py +10 -2
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/async_moonshot_driver.py +32 -12
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/async_ollama_driver.py +85 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/async_openrouter_driver.py +43 -17
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/azure_driver.py +77 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/grok_driver.py +101 -2
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/groq_driver.py +92 -2
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/lmstudio_driver.py +11 -2
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/moonshot_driver.py +32 -12
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/ollama_driver.py +91 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/openrouter_driver.py +34 -10
- prompture-0.0.47/prompture/simulated_tools.py +115 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/tools_schema.py +22 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture.egg-info/PKG-INFO +35 -2
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture.egg-info/SOURCES.txt +4 -0
- prompture-0.0.46.dev1/.claude/skills/add-driver/SKILL.md +0 -85
- prompture-0.0.46.dev1/.claude/skills/add-driver/references/driver-template.md +0 -83
- prompture-0.0.46.dev1/.claude/skills/add-example/SKILL.md +0 -83
- prompture-0.0.46.dev1/.claude/skills/update-pricing/SKILL.md +0 -51
- prompture-0.0.46.dev1/prompture/drivers/async_grok_driver.py +0 -97
- prompture-0.0.46.dev1/prompture/drivers/async_groq_driver.py +0 -90
- {prompture-0.0.46.dev1 → prompture-0.0.47}/.claude/skills/add-field/SKILL.md +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/.claude/skills/add-test/SKILL.md +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/.claude/skills/run-tests/SKILL.md +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/.claude/skills/scaffold-extraction/SKILL.md +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/.env.copy +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/.github/FUNDING.yml +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/.github/scripts/update_docs_version.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/.github/scripts/update_wrapper_version.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/.github/workflows/dev.yml +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/.github/workflows/documentation.yml +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/.github/workflows/publish.yml +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/CLAUDE.md +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/LICENSE +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/MANIFEST.in +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/ROADMAP.md +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/docs/source/_static/custom.css +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/docs/source/_templates/footer.html +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/docs/source/api/core.rst +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/docs/source/api/drivers.rst +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/docs/source/api/field_definitions.rst +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/docs/source/api/index.rst +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/docs/source/api/runner.rst +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/docs/source/api/tools.rst +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/docs/source/api/validator.rst +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/docs/source/conf.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/docs/source/contributing.rst +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/docs/source/examples.rst +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/docs/source/field_definitions_reference.rst +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/docs/source/index.rst +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/docs/source/installation.rst +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/docs/source/quickstart.rst +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/docs/source/toon_input_guide.rst +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/packages/README.md +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/packages/llm_to_json/README.md +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/packages/llm_to_json/llm_to_json/__init__.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/packages/llm_to_json/pyproject.toml +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/packages/llm_to_json/test.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/packages/llm_to_toon/README.md +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/packages/llm_to_toon/llm_to_toon/__init__.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/packages/llm_to_toon/pyproject.toml +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/packages/llm_to_toon/test.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/__init__.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/agent.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/agent_types.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/aio/__init__.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/async_agent.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/async_core.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/async_driver.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/async_groups.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/cache.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/callbacks.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/cli.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/core.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/cost_mixin.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/discovery.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/driver.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/__init__.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/airllm_driver.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/async_airllm_driver.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/async_claude_driver.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/async_google_driver.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/async_hugging_driver.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/async_local_http_driver.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/async_modelscope_driver.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/async_openai_driver.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/async_registry.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/async_zai_driver.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/claude_driver.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/google_driver.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/hugging_driver.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/local_http_driver.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/modelscope_driver.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/openai_driver.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/registry.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/vision_helpers.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/drivers/zai_driver.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/field_definitions.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/group_types.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/groups.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/image.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/ledger.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/logging.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/model_rates.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/persistence.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/persona.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/runner.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/scaffold/__init__.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/scaffold/generator.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/scaffold/templates/Dockerfile.j2 +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/scaffold/templates/README.md.j2 +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/scaffold/templates/config.py.j2 +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/scaffold/templates/env.example.j2 +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/scaffold/templates/main.py.j2 +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/scaffold/templates/models.py.j2 +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/scaffold/templates/requirements.txt.j2 +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/serialization.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/server.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/session.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/settings.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/tools.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture/validator.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture.egg-info/dependency_links.txt +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture.egg-info/entry_points.txt +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture.egg-info/requires.txt +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/prompture.egg-info/top_level.txt +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/pyproject.toml +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/requirements.txt +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/setup.cfg +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/test.py +0 -0
- {prompture-0.0.46.dev1 → prompture-0.0.47}/test_version_diagnosis.py +0 -0
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: add-driver
|
|
3
|
+
description: Scaffold a new LLM provider driver for Prompture. Creates sync + async driver classes, registers them in the driver registry, adds settings, env template, setup.py extras, package exports, discovery integration, and models.dev pricing. Use when adding support for a new LLM provider.
|
|
4
|
+
metadata:
|
|
5
|
+
author: prompture
|
|
6
|
+
version: "2.0"
|
|
7
|
+
---
|
|
8
|
+
|
|
9
|
+
# Add a New LLM Driver
|
|
10
|
+
|
|
11
|
+
Scaffolds all files needed to integrate a new LLM provider into Prompture.
|
|
12
|
+
|
|
13
|
+
## Before Starting
|
|
14
|
+
|
|
15
|
+
Ask the user for:
|
|
16
|
+
- **Provider name** (lowercase, used as registry key and `provider/model` prefix)
|
|
17
|
+
- **SDK package name** on PyPI and minimum version (or `requests`/`httpx` for raw HTTP)
|
|
18
|
+
- **Default model ID**
|
|
19
|
+
- **Authentication** — API key env var name, endpoint URL, or both
|
|
20
|
+
- **API compatibility** — OpenAI-compatible (`/v1/chat/completions`), custom SDK, or proprietary HTTP
|
|
21
|
+
- **Lazy or eager import** — lazy if SDK is optional, eager if it's in `install_requires`
|
|
22
|
+
|
|
23
|
+
Also look up the provider on [models.dev](https://models.dev) to determine:
|
|
24
|
+
- **models.dev provider name** (e.g., `"anthropic"` for Claude, `"xai"` for Grok, `"moonshotai"` for Moonshot)
|
|
25
|
+
- **Whether models.dev has entries** — if yes, pricing comes from models.dev live data (set `MODEL_PRICING = {}`). If no, add hardcoded pricing.
|
|
26
|
+
|
|
27
|
+
## Files to Create or Modify (11 total)
|
|
28
|
+
|
|
29
|
+
### 1. NEW: `prompture/drivers/{provider}_driver.py` (sync driver)
|
|
30
|
+
|
|
31
|
+
See [references/driver-template.md](references/driver-template.md) for the full skeleton.
|
|
32
|
+
|
|
33
|
+
Key rules:
|
|
34
|
+
- Subclass `CostMixin, Driver` (NOT just `Driver`)
|
|
35
|
+
- Set class-level capability flags: `supports_json_mode`, `supports_json_schema`, `supports_tool_use`, `supports_streaming`, `supports_vision`, `supports_messages`
|
|
36
|
+
- Use `self._get_model_config(provider, model)` to get per-model `tokens_param` and `supports_temperature` from models.dev
|
|
37
|
+
- Use `self._calculate_cost(provider, model, prompt_tokens, completion_tokens)` — do NOT manually compute costs
|
|
38
|
+
- Use `self._validate_model_capabilities(provider, model, ...)` before API calls to warn about unsupported features
|
|
39
|
+
- If models.dev has this provider's data, set `MODEL_PRICING = {}` (empty — pricing comes live from models.dev)
|
|
40
|
+
- `generate()` returns `{"text": str, "meta": dict}`
|
|
41
|
+
- `meta` MUST contain: `prompt_tokens`, `completion_tokens`, `total_tokens`, `cost`, `raw_response`, `model_name`
|
|
42
|
+
- Implement `generate_messages()`, `generate_messages_with_tools()`, and `generate_messages_stream()` for full feature support
|
|
43
|
+
- Optional SDK: wrap import in try/except, raise `ImportError` pointing to `pip install prompture[{provider}]`
|
|
44
|
+
|
|
45
|
+
### 2. NEW: `prompture/drivers/async_{provider}_driver.py` (async driver)
|
|
46
|
+
|
|
47
|
+
Mirror of the sync driver using `AsyncDriver` base class:
|
|
48
|
+
- Subclass `CostMixin, AsyncDriver`
|
|
49
|
+
- Same capability flags as the sync driver
|
|
50
|
+
- Share `MODEL_PRICING` from the sync driver: `MODEL_PRICING = {Provider}Driver.MODEL_PRICING`
|
|
51
|
+
- Use `httpx.AsyncClient` for HTTP calls (or async SDK methods)
|
|
52
|
+
- All generate methods are `async def`
|
|
53
|
+
- Streaming returns `AsyncIterator[dict[str, Any]]`
|
|
54
|
+
|
|
55
|
+
### 3. `prompture/drivers/__init__.py`
|
|
56
|
+
|
|
57
|
+
- Add sync import: `from .{provider}_driver import {Provider}Driver`
|
|
58
|
+
- Add async import: `from .async_{provider}_driver import Async{Provider}Driver`
|
|
59
|
+
- Register sync driver with `register_driver()`:
|
|
60
|
+
```python
|
|
61
|
+
register_driver(
|
|
62
|
+
"{provider}",
|
|
63
|
+
lambda model=None: {Provider}Driver(
|
|
64
|
+
api_key=settings.{provider}_api_key,
|
|
65
|
+
model=model or settings.{provider}_model,
|
|
66
|
+
),
|
|
67
|
+
overwrite=True,
|
|
68
|
+
)
|
|
69
|
+
```
|
|
70
|
+
- Add `"{Provider}Driver"` and `"Async{Provider}Driver"` to `__all__`
|
|
71
|
+
|
|
72
|
+
### 4. `prompture/__init__.py`
|
|
73
|
+
|
|
74
|
+
- Add `{Provider}Driver` to the `.drivers` import line
|
|
75
|
+
- Add `"{Provider}Driver"` to `__all__` under `# Drivers`
|
|
76
|
+
|
|
77
|
+
### 5. `prompture/settings.py`
|
|
78
|
+
|
|
79
|
+
Add inside `Settings` class:
|
|
80
|
+
```python
|
|
81
|
+
# {Provider}
|
|
82
|
+
{provider}_api_key: Optional[str] = None
|
|
83
|
+
{provider}_model: str = "default-model"
|
|
84
|
+
# Add endpoint if the provider supports custom endpoints:
|
|
85
|
+
# {provider}_endpoint: str = "https://api.example.com/v1"
|
|
86
|
+
```
|
|
87
|
+
|
|
88
|
+
### 6. `prompture/discovery.py`
|
|
89
|
+
|
|
90
|
+
Two changes required:
|
|
91
|
+
|
|
92
|
+
**a) Add to `provider_classes` dict and configuration check:**
|
|
93
|
+
- Import the driver class at the top of the file
|
|
94
|
+
- Add to `provider_classes`: `"{provider}": {Provider}Driver`
|
|
95
|
+
- Add configuration check in the `is_configured` block:
|
|
96
|
+
```python
|
|
97
|
+
elif provider == "{provider}":
|
|
98
|
+
if settings.{provider}_api_key or os.getenv("{PROVIDER}_API_KEY"):
|
|
99
|
+
is_configured = True
|
|
100
|
+
```
|
|
101
|
+
For local/endpoint-only providers (like ollama), use endpoint presence instead.
|
|
102
|
+
|
|
103
|
+
**b) This ensures `get_available_models()` returns the provider's models** from both:
|
|
104
|
+
- Static detection: `MODEL_PRICING` keys (or empty if pricing is from models.dev)
|
|
105
|
+
- models.dev enrichment: via `PROVIDER_MAP` in `model_rates.py` (see step 7)
|
|
106
|
+
|
|
107
|
+
### 7. `prompture/model_rates.py` — `PROVIDER_MAP`
|
|
108
|
+
|
|
109
|
+
If models.dev has this provider's data, add the mapping:
|
|
110
|
+
```python
|
|
111
|
+
PROVIDER_MAP: dict[str, str] = {
|
|
112
|
+
...
|
|
113
|
+
"{provider}": "{models_dev_name}", # e.g., "moonshot": "moonshotai"
|
|
114
|
+
}
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
This enables:
|
|
118
|
+
- **Live pricing** via `get_model_rates()` — used by `CostMixin._calculate_cost()`
|
|
119
|
+
- **Capability metadata** via `get_model_capabilities()` — used by `_get_model_config()` and `_validate_model_capabilities()`
|
|
120
|
+
- **Model discovery** via `get_all_provider_models()` — called by `discovery.py` to list all available models
|
|
121
|
+
|
|
122
|
+
To find the correct models.dev name, check: `https://models.dev/{models_dev_name}`
|
|
123
|
+
|
|
124
|
+
If models.dev does NOT have this provider, skip this step. The driver will use hardcoded `MODEL_PRICING` for costs and return `None` for capabilities.
|
|
125
|
+
|
|
126
|
+
### 8. `setup.py` / `pyproject.toml`
|
|
127
|
+
|
|
128
|
+
If optional: add `"{provider}": ["{sdk}>={version}"]` to `extras_require`.
|
|
129
|
+
If required: add to `install_requires`.
|
|
130
|
+
|
|
131
|
+
### 9. `.env.copy`
|
|
132
|
+
|
|
133
|
+
Add section:
|
|
134
|
+
```
|
|
135
|
+
# {Provider} Configuration
|
|
136
|
+
{PROVIDER}_API_KEY=your-api-key-here
|
|
137
|
+
{PROVIDER}_MODEL=default-model
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
### 10. `CLAUDE.md`
|
|
141
|
+
|
|
142
|
+
Add `{provider}` to the driver list in the Module Layout bullet.
|
|
143
|
+
|
|
144
|
+
### 11. OPTIONAL: `examples/{provider}_example.py`
|
|
145
|
+
|
|
146
|
+
Follow the existing example pattern (see `grok_example.py` or `groq_example.py`):
|
|
147
|
+
- Two extraction examples: default instruction + custom instruction
|
|
148
|
+
- Show different models if available
|
|
149
|
+
- Print JSON output and token usage statistics
|
|
150
|
+
|
|
151
|
+
## Important: Reasoning Model Handling
|
|
152
|
+
|
|
153
|
+
If the provider has reasoning models (models with `reasoning: true` on models.dev):
|
|
154
|
+
- Check `caps.is_reasoning` before sending `response_format` — reasoning models often don't support it
|
|
155
|
+
- Handle `reasoning_content` field in responses (both regular and streaming)
|
|
156
|
+
- Some reasoning models don't support `temperature` — respect `supports_temperature` from `_get_model_config()`
|
|
157
|
+
|
|
158
|
+
Example pattern (see `moonshot_driver.py`):
|
|
159
|
+
```python
|
|
160
|
+
if options.get("json_mode"):
|
|
161
|
+
from ..model_rates import get_model_capabilities
|
|
162
|
+
|
|
163
|
+
caps = get_model_capabilities("{provider}", model)
|
|
164
|
+
is_reasoning = caps is not None and caps.is_reasoning is True
|
|
165
|
+
model_supports_structured = (
|
|
166
|
+
caps is None or caps.supports_structured_output is not False
|
|
167
|
+
) and not is_reasoning
|
|
168
|
+
|
|
169
|
+
if model_supports_structured:
|
|
170
|
+
# Send response_format
|
|
171
|
+
...
|
|
172
|
+
```
|
|
173
|
+
|
|
174
|
+
## How models.dev Integration Works
|
|
175
|
+
|
|
176
|
+
```
|
|
177
|
+
User calls extract_and_jsonify("moonshot/kimi-k2.5", ...)
|
|
178
|
+
│
|
|
179
|
+
├─► core.py checks driver.supports_json_mode → decides json_mode
|
|
180
|
+
│
|
|
181
|
+
├─► driver._get_model_config("moonshot", "kimi-k2.5")
|
|
182
|
+
│ └─► model_rates.get_model_capabilities("moonshot", "kimi-k2.5")
|
|
183
|
+
│ └─► PROVIDER_MAP["moonshot"] → "moonshotai"
|
|
184
|
+
│ └─► models.dev data["moonshotai"]["models"]["kimi-k2.5"]
|
|
185
|
+
│ └─► Returns: supports_temperature, is_reasoning, context_window, etc.
|
|
186
|
+
│
|
|
187
|
+
├─► driver._calculate_cost("moonshot", "kimi-k2.5", tokens...)
|
|
188
|
+
│ └─► model_rates.get_model_rates("moonshot", "kimi-k2.5")
|
|
189
|
+
│ └─► Same lookup → returns {input: 0.6, output: 3.0} per 1M tokens
|
|
190
|
+
│
|
|
191
|
+
└─► discovery.get_available_models()
|
|
192
|
+
└─► Iterates PROVIDER_MAP → get_all_provider_models("moonshotai")
|
|
193
|
+
└─► Returns all model IDs under the provider
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
## Model Name Resolution
|
|
197
|
+
|
|
198
|
+
Model names are **always provider-scoped**. The format is `"provider/model_id"`.
|
|
199
|
+
|
|
200
|
+
- `get_driver_for_model("openrouter/qwen-2.5")` → looks up `"openrouter"` in the driver registry
|
|
201
|
+
- `get_model_capabilities("openrouter", "qwen-2.5")` → looks in models.dev under `data["openrouter"]["models"]["qwen-2.5"]`
|
|
202
|
+
- `get_model_capabilities("modelscope", "qwen-2.5")` → looks in models.dev under `data["modelscope"]["models"]["qwen-2.5"]`
|
|
203
|
+
|
|
204
|
+
The same model ID under different providers is **not ambiguous** — each provider has its own namespace in both the driver registry and models.dev data.
|
|
205
|
+
|
|
206
|
+
## Verification
|
|
207
|
+
|
|
208
|
+
```bash
|
|
209
|
+
# Import check
|
|
210
|
+
python -c "from prompture import {Provider}Driver; print('OK')"
|
|
211
|
+
python -c "from prompture.drivers import Async{Provider}Driver; print('OK')"
|
|
212
|
+
|
|
213
|
+
# Registry check
|
|
214
|
+
python -c "from prompture.drivers import get_driver_for_model; d = get_driver_for_model('{provider}/test'); print(type(d).__name__, d.model)"
|
|
215
|
+
|
|
216
|
+
# Discovery check
|
|
217
|
+
python -c "from prompture import get_available_models; ms = [m for m in get_available_models() if m.startswith('{provider}/')]; print(f'Found {{len(ms)}} models'); print(ms[:5])"
|
|
218
|
+
|
|
219
|
+
# Run tests
|
|
220
|
+
pytest tests/ -x -q
|
|
221
|
+
```
|
|
@@ -0,0 +1,364 @@
|
|
|
1
|
+
# Driver Template
|
|
2
|
+
|
|
3
|
+
Every Prompture driver follows this skeleton. The sync driver uses `requests`,
|
|
4
|
+
the async driver uses `httpx`.
|
|
5
|
+
|
|
6
|
+
## Sync Driver — `prompture/drivers/{provider}_driver.py`
|
|
7
|
+
|
|
8
|
+
```python
|
|
9
|
+
"""{Provider} driver implementation.
|
|
10
|
+
Requires the `requests` package. Uses {PROVIDER}_API_KEY env var.
|
|
11
|
+
|
|
12
|
+
All pricing comes from models.dev (provider: "{models_dev_name}") — no hardcoded pricing.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
import logging
|
|
17
|
+
import os
|
|
18
|
+
from collections.abc import Iterator
|
|
19
|
+
from typing import Any
|
|
20
|
+
|
|
21
|
+
import requests
|
|
22
|
+
|
|
23
|
+
from ..cost_mixin import CostMixin, prepare_strict_schema
|
|
24
|
+
from ..driver import Driver
|
|
25
|
+
|
|
26
|
+
logger = logging.getLogger(__name__)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class {Provider}Driver(CostMixin, Driver):
|
|
30
|
+
supports_json_mode = True
|
|
31
|
+
supports_json_schema = True
|
|
32
|
+
supports_tool_use = True
|
|
33
|
+
supports_streaming = True
|
|
34
|
+
supports_vision = False # set True if the provider supports image input
|
|
35
|
+
supports_messages = True
|
|
36
|
+
|
|
37
|
+
# All pricing resolved live from models.dev (provider: "{models_dev_name}")
|
|
38
|
+
# If models.dev does NOT have this provider, add hardcoded pricing:
|
|
39
|
+
# MODEL_PRICING = {
|
|
40
|
+
# "model-name": {"prompt": 0.001, "completion": 0.002},
|
|
41
|
+
# }
|
|
42
|
+
MODEL_PRICING: dict[str, dict[str, Any]] = {}
|
|
43
|
+
|
|
44
|
+
def __init__(
|
|
45
|
+
self,
|
|
46
|
+
api_key: str | None = None,
|
|
47
|
+
model: str = "default-model",
|
|
48
|
+
endpoint: str = "https://api.example.com/v1",
|
|
49
|
+
):
|
|
50
|
+
self.api_key = api_key or os.getenv("{PROVIDER}_API_KEY")
|
|
51
|
+
if not self.api_key:
|
|
52
|
+
raise ValueError("{Provider} API key not found. Set {PROVIDER}_API_KEY env var.")
|
|
53
|
+
|
|
54
|
+
self.model = model
|
|
55
|
+
self.base_url = endpoint.rstrip("/")
|
|
56
|
+
self.headers = {
|
|
57
|
+
"Authorization": f"Bearer {self.api_key}",
|
|
58
|
+
"Content-Type": "application/json",
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
def generate(self, prompt: str, options: dict[str, Any]) -> dict[str, Any]:
|
|
62
|
+
messages = [{"role": "user", "content": prompt}]
|
|
63
|
+
return self._do_generate(messages, options)
|
|
64
|
+
|
|
65
|
+
def generate_messages(self, messages: list[dict[str, str]], options: dict[str, Any]) -> dict[str, Any]:
|
|
66
|
+
return self._do_generate(messages, options)
|
|
67
|
+
|
|
68
|
+
def _do_generate(self, messages: list[dict[str, str]], options: dict[str, Any]) -> dict[str, Any]:
|
|
69
|
+
model = options.get("model", self.model)
|
|
70
|
+
|
|
71
|
+
# Per-model config from models.dev (tokens_param, supports_temperature, etc.)
|
|
72
|
+
model_config = self._get_model_config("{provider}", model)
|
|
73
|
+
tokens_param = model_config["tokens_param"]
|
|
74
|
+
supports_temperature = model_config["supports_temperature"]
|
|
75
|
+
|
|
76
|
+
# Validate capabilities (logs warnings if model doesn't support requested features)
|
|
77
|
+
self._validate_model_capabilities(
|
|
78
|
+
"{provider}",
|
|
79
|
+
model,
|
|
80
|
+
using_json_schema=bool(options.get("json_schema")),
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
opts = {"temperature": 1.0, "max_tokens": 512, **options}
|
|
84
|
+
|
|
85
|
+
data: dict[str, Any] = {
|
|
86
|
+
"model": model,
|
|
87
|
+
"messages": messages,
|
|
88
|
+
}
|
|
89
|
+
data[tokens_param] = opts.get("max_tokens", 512)
|
|
90
|
+
|
|
91
|
+
if supports_temperature and "temperature" in opts:
|
|
92
|
+
data["temperature"] = opts["temperature"]
|
|
93
|
+
|
|
94
|
+
# Native JSON mode — check per-model capabilities before sending response_format
|
|
95
|
+
if options.get("json_mode"):
|
|
96
|
+
from ..model_rates import get_model_capabilities
|
|
97
|
+
|
|
98
|
+
caps = get_model_capabilities("{provider}", model)
|
|
99
|
+
is_reasoning = caps is not None and caps.is_reasoning is True
|
|
100
|
+
model_supports_structured = (
|
|
101
|
+
caps is None or caps.supports_structured_output is not False
|
|
102
|
+
) and not is_reasoning
|
|
103
|
+
|
|
104
|
+
if model_supports_structured:
|
|
105
|
+
json_schema = options.get("json_schema")
|
|
106
|
+
if json_schema:
|
|
107
|
+
schema_copy = prepare_strict_schema(json_schema)
|
|
108
|
+
data["response_format"] = {
|
|
109
|
+
"type": "json_schema",
|
|
110
|
+
"json_schema": {
|
|
111
|
+
"name": "extraction",
|
|
112
|
+
"strict": True,
|
|
113
|
+
"schema": schema_copy,
|
|
114
|
+
},
|
|
115
|
+
}
|
|
116
|
+
else:
|
|
117
|
+
data["response_format"] = {"type": "json_object"}
|
|
118
|
+
|
|
119
|
+
try:
|
|
120
|
+
response = requests.post(
|
|
121
|
+
f"{self.base_url}/chat/completions",
|
|
122
|
+
headers=self.headers,
|
|
123
|
+
json=data,
|
|
124
|
+
timeout=120,
|
|
125
|
+
)
|
|
126
|
+
response.raise_for_status()
|
|
127
|
+
resp = response.json()
|
|
128
|
+
except requests.exceptions.HTTPError as e:
|
|
129
|
+
raise RuntimeError(f"{Provider} API request failed: {e!s}") from e
|
|
130
|
+
except requests.exceptions.RequestException as e:
|
|
131
|
+
raise RuntimeError(f"{Provider} API request failed: {e!s}") from e
|
|
132
|
+
|
|
133
|
+
usage = resp.get("usage", {})
|
|
134
|
+
prompt_tokens = usage.get("prompt_tokens", 0)
|
|
135
|
+
completion_tokens = usage.get("completion_tokens", 0)
|
|
136
|
+
total_tokens = usage.get("total_tokens", 0)
|
|
137
|
+
|
|
138
|
+
# Cost calculated from models.dev live rates, falling back to MODEL_PRICING
|
|
139
|
+
total_cost = self._calculate_cost("{provider}", model, prompt_tokens, completion_tokens)
|
|
140
|
+
|
|
141
|
+
meta = {
|
|
142
|
+
"prompt_tokens": prompt_tokens,
|
|
143
|
+
"completion_tokens": completion_tokens,
|
|
144
|
+
"total_tokens": total_tokens,
|
|
145
|
+
"cost": round(total_cost, 6),
|
|
146
|
+
"raw_response": resp,
|
|
147
|
+
"model_name": model,
|
|
148
|
+
}
|
|
149
|
+
|
|
150
|
+
message = resp["choices"][0]["message"]
|
|
151
|
+
text = message.get("content") or ""
|
|
152
|
+
|
|
153
|
+
# Reasoning models may return content in reasoning_content when content is empty
|
|
154
|
+
if not text and message.get("reasoning_content"):
|
|
155
|
+
text = message["reasoning_content"]
|
|
156
|
+
|
|
157
|
+
return {"text": text, "meta": meta}
|
|
158
|
+
|
|
159
|
+
# ------------------------------------------------------------------
|
|
160
|
+
# Tool use
|
|
161
|
+
# ------------------------------------------------------------------
|
|
162
|
+
|
|
163
|
+
def generate_messages_with_tools(
|
|
164
|
+
self,
|
|
165
|
+
messages: list[dict[str, Any]],
|
|
166
|
+
tools: list[dict[str, Any]],
|
|
167
|
+
options: dict[str, Any],
|
|
168
|
+
) -> dict[str, Any]:
|
|
169
|
+
"""Generate a response that may include tool calls."""
|
|
170
|
+
model = options.get("model", self.model)
|
|
171
|
+
model_config = self._get_model_config("{provider}", model)
|
|
172
|
+
tokens_param = model_config["tokens_param"]
|
|
173
|
+
supports_temperature = model_config["supports_temperature"]
|
|
174
|
+
|
|
175
|
+
self._validate_model_capabilities("{provider}", model, using_tool_use=True)
|
|
176
|
+
|
|
177
|
+
opts = {"temperature": 1.0, "max_tokens": 512, **options}
|
|
178
|
+
|
|
179
|
+
data: dict[str, Any] = {
|
|
180
|
+
"model": model,
|
|
181
|
+
"messages": messages,
|
|
182
|
+
"tools": tools,
|
|
183
|
+
}
|
|
184
|
+
data[tokens_param] = opts.get("max_tokens", 512)
|
|
185
|
+
|
|
186
|
+
if supports_temperature and "temperature" in opts:
|
|
187
|
+
data["temperature"] = opts["temperature"]
|
|
188
|
+
|
|
189
|
+
if "tool_choice" in options:
|
|
190
|
+
data["tool_choice"] = options["tool_choice"]
|
|
191
|
+
|
|
192
|
+
try:
|
|
193
|
+
response = requests.post(
|
|
194
|
+
f"{self.base_url}/chat/completions",
|
|
195
|
+
headers=self.headers,
|
|
196
|
+
json=data,
|
|
197
|
+
timeout=120,
|
|
198
|
+
)
|
|
199
|
+
response.raise_for_status()
|
|
200
|
+
resp = response.json()
|
|
201
|
+
except requests.exceptions.HTTPError as e:
|
|
202
|
+
raise RuntimeError(f"{Provider} API request failed: {e!s}") from e
|
|
203
|
+
except requests.exceptions.RequestException as e:
|
|
204
|
+
raise RuntimeError(f"{Provider} API request failed: {e!s}") from e
|
|
205
|
+
|
|
206
|
+
usage = resp.get("usage", {})
|
|
207
|
+
prompt_tokens = usage.get("prompt_tokens", 0)
|
|
208
|
+
completion_tokens = usage.get("completion_tokens", 0)
|
|
209
|
+
total_tokens = usage.get("total_tokens", 0)
|
|
210
|
+
total_cost = self._calculate_cost("{provider}", model, prompt_tokens, completion_tokens)
|
|
211
|
+
|
|
212
|
+
meta = {
|
|
213
|
+
"prompt_tokens": prompt_tokens,
|
|
214
|
+
"completion_tokens": completion_tokens,
|
|
215
|
+
"total_tokens": total_tokens,
|
|
216
|
+
"cost": round(total_cost, 6),
|
|
217
|
+
"raw_response": resp,
|
|
218
|
+
"model_name": model,
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
choice = resp["choices"][0]
|
|
222
|
+
text = choice["message"].get("content") or ""
|
|
223
|
+
stop_reason = choice.get("finish_reason")
|
|
224
|
+
|
|
225
|
+
tool_calls_out: list[dict[str, Any]] = []
|
|
226
|
+
for tc in choice["message"].get("tool_calls", []):
|
|
227
|
+
try:
|
|
228
|
+
args = json.loads(tc["function"]["arguments"])
|
|
229
|
+
except (json.JSONDecodeError, TypeError):
|
|
230
|
+
args = {}
|
|
231
|
+
tool_calls_out.append({
|
|
232
|
+
"id": tc["id"],
|
|
233
|
+
"name": tc["function"]["name"],
|
|
234
|
+
"arguments": args,
|
|
235
|
+
})
|
|
236
|
+
|
|
237
|
+
return {
|
|
238
|
+
"text": text,
|
|
239
|
+
"meta": meta,
|
|
240
|
+
"tool_calls": tool_calls_out,
|
|
241
|
+
"stop_reason": stop_reason,
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
# ------------------------------------------------------------------
|
|
245
|
+
# Streaming
|
|
246
|
+
# ------------------------------------------------------------------
|
|
247
|
+
|
|
248
|
+
def generate_messages_stream(
|
|
249
|
+
self,
|
|
250
|
+
messages: list[dict[str, Any]],
|
|
251
|
+
options: dict[str, Any],
|
|
252
|
+
) -> Iterator[dict[str, Any]]:
|
|
253
|
+
"""Yield response chunks via streaming API."""
|
|
254
|
+
model = options.get("model", self.model)
|
|
255
|
+
model_config = self._get_model_config("{provider}", model)
|
|
256
|
+
tokens_param = model_config["tokens_param"]
|
|
257
|
+
supports_temperature = model_config["supports_temperature"]
|
|
258
|
+
|
|
259
|
+
opts = {"temperature": 1.0, "max_tokens": 512, **options}
|
|
260
|
+
|
|
261
|
+
data: dict[str, Any] = {
|
|
262
|
+
"model": model,
|
|
263
|
+
"messages": messages,
|
|
264
|
+
"stream": True,
|
|
265
|
+
"stream_options": {"include_usage": True},
|
|
266
|
+
}
|
|
267
|
+
data[tokens_param] = opts.get("max_tokens", 512)
|
|
268
|
+
|
|
269
|
+
if supports_temperature and "temperature" in opts:
|
|
270
|
+
data["temperature"] = opts["temperature"]
|
|
271
|
+
|
|
272
|
+
response = requests.post(
|
|
273
|
+
f"{self.base_url}/chat/completions",
|
|
274
|
+
headers=self.headers,
|
|
275
|
+
json=data,
|
|
276
|
+
stream=True,
|
|
277
|
+
timeout=120,
|
|
278
|
+
)
|
|
279
|
+
response.raise_for_status()
|
|
280
|
+
|
|
281
|
+
full_text = ""
|
|
282
|
+
prompt_tokens = 0
|
|
283
|
+
completion_tokens = 0
|
|
284
|
+
|
|
285
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
286
|
+
if not line or not line.startswith("data: "):
|
|
287
|
+
continue
|
|
288
|
+
payload = line[len("data: "):]
|
|
289
|
+
if payload.strip() == "[DONE]":
|
|
290
|
+
break
|
|
291
|
+
try:
|
|
292
|
+
chunk = json.loads(payload)
|
|
293
|
+
except json.JSONDecodeError:
|
|
294
|
+
continue
|
|
295
|
+
|
|
296
|
+
usage = chunk.get("usage")
|
|
297
|
+
if usage:
|
|
298
|
+
prompt_tokens = usage.get("prompt_tokens", 0)
|
|
299
|
+
completion_tokens = usage.get("completion_tokens", 0)
|
|
300
|
+
|
|
301
|
+
choices = chunk.get("choices", [])
|
|
302
|
+
if choices:
|
|
303
|
+
delta = choices[0].get("delta", {})
|
|
304
|
+
content = delta.get("content") or ""
|
|
305
|
+
# Reasoning models stream thinking via reasoning_content
|
|
306
|
+
if not content:
|
|
307
|
+
content = delta.get("reasoning_content") or ""
|
|
308
|
+
if content:
|
|
309
|
+
full_text += content
|
|
310
|
+
yield {"type": "delta", "text": content}
|
|
311
|
+
|
|
312
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
313
|
+
total_cost = self._calculate_cost("{provider}", model, prompt_tokens, completion_tokens)
|
|
314
|
+
|
|
315
|
+
yield {
|
|
316
|
+
"type": "done",
|
|
317
|
+
"text": full_text,
|
|
318
|
+
"meta": {
|
|
319
|
+
"prompt_tokens": prompt_tokens,
|
|
320
|
+
"completion_tokens": completion_tokens,
|
|
321
|
+
"total_tokens": total_tokens,
|
|
322
|
+
"cost": round(total_cost, 6),
|
|
323
|
+
"raw_response": {},
|
|
324
|
+
"model_name": model,
|
|
325
|
+
},
|
|
326
|
+
}
|
|
327
|
+
```
|
|
328
|
+
|
|
329
|
+
## Lazy Import Pattern (for optional SDKs)
|
|
330
|
+
|
|
331
|
+
```python
|
|
332
|
+
def __init__(self, ...):
|
|
333
|
+
self._client = None
|
|
334
|
+
# defer import
|
|
335
|
+
|
|
336
|
+
def _ensure_client(self):
|
|
337
|
+
if self._client is not None:
|
|
338
|
+
return
|
|
339
|
+
try:
|
|
340
|
+
from some_sdk import Client
|
|
341
|
+
except ImportError:
|
|
342
|
+
raise ImportError(
|
|
343
|
+
"The 'some-sdk' package is required. "
|
|
344
|
+
"Install with: pip install prompture[provider]"
|
|
345
|
+
)
|
|
346
|
+
self._client = Client(api_key=self.api_key)
|
|
347
|
+
```
|
|
348
|
+
|
|
349
|
+
## Existing Drivers for Reference
|
|
350
|
+
|
|
351
|
+
| Driver | File | SDK | Auth | models.dev |
|
|
352
|
+
|--------|------|-----|------|------------|
|
|
353
|
+
| OpenAI | `openai_driver.py` | `openai` | API key | `openai` |
|
|
354
|
+
| Claude | `claude_driver.py` | `anthropic` | API key | `anthropic` |
|
|
355
|
+
| Google | `google_driver.py` | `google-generativeai` | API key | `google` |
|
|
356
|
+
| Groq | `groq_driver.py` | `groq` | API key | `groq` |
|
|
357
|
+
| Grok | `grok_driver.py` | `requests` | API key | `xai` |
|
|
358
|
+
| Moonshot | `moonshot_driver.py` | `requests` | API key + endpoint | `moonshotai` |
|
|
359
|
+
| Z.ai | `zai_driver.py` | `requests` | API key + endpoint | `zai` |
|
|
360
|
+
| ModelScope | `modelscope_driver.py` | `requests` | API key + endpoint | — |
|
|
361
|
+
| OpenRouter | `openrouter_driver.py` | `requests` | API key | `openrouter` |
|
|
362
|
+
| Ollama | `ollama_driver.py` | `requests` | Endpoint URL | — |
|
|
363
|
+
| LM Studio | `lmstudio_driver.py` | `requests` | Endpoint URL | — |
|
|
364
|
+
| AirLLM | `airllm_driver.py` | `airllm` (lazy) | None (local) | — |
|