anymodel-py 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. anymodel_py-0.1.0/.github/workflows/ci.yml +32 -0
  2. anymodel_py-0.1.0/.github/workflows/publish.yml +30 -0
  3. anymodel_py-0.1.0/.gitignore +8 -0
  4. anymodel_py-0.1.0/LICENSE +21 -0
  5. anymodel_py-0.1.0/PKG-INFO +303 -0
  6. anymodel_py-0.1.0/README.md +268 -0
  7. anymodel_py-0.1.0/examples/basic.py +196 -0
  8. anymodel_py-0.1.0/pyproject.toml +67 -0
  9. anymodel_py-0.1.0/src/anymodel/__init__.py +56 -0
  10. anymodel_py-0.1.0/src/anymodel/_cli.py +51 -0
  11. anymodel_py-0.1.0/src/anymodel/_client.py +235 -0
  12. anymodel_py-0.1.0/src/anymodel/_config.py +97 -0
  13. anymodel_py-0.1.0/src/anymodel/_router.py +246 -0
  14. anymodel_py-0.1.0/src/anymodel/_server.py +116 -0
  15. anymodel_py-0.1.0/src/anymodel/_types.py +392 -0
  16. anymodel_py-0.1.0/src/anymodel/batch/__init__.py +4 -0
  17. anymodel_py-0.1.0/src/anymodel/batch/_manager.py +309 -0
  18. anymodel_py-0.1.0/src/anymodel/batch/_store.py +108 -0
  19. anymodel_py-0.1.0/src/anymodel/providers/__init__.py +21 -0
  20. anymodel_py-0.1.0/src/anymodel/providers/_adapter.py +61 -0
  21. anymodel_py-0.1.0/src/anymodel/providers/_anthropic.py +351 -0
  22. anymodel_py-0.1.0/src/anymodel/providers/_custom.py +71 -0
  23. anymodel_py-0.1.0/src/anymodel/providers/_google.py +366 -0
  24. anymodel_py-0.1.0/src/anymodel/providers/_openai.py +215 -0
  25. anymodel_py-0.1.0/src/anymodel/providers/_registry.py +38 -0
  26. anymodel_py-0.1.0/src/anymodel/py.typed +0 -0
  27. anymodel_py-0.1.0/src/anymodel/utils/__init__.py +40 -0
  28. anymodel_py-0.1.0/src/anymodel/utils/_fs_io.py +132 -0
  29. anymodel_py-0.1.0/src/anymodel/utils/_generation_stats.py +34 -0
  30. anymodel_py-0.1.0/src/anymodel/utils/_id.py +8 -0
  31. anymodel_py-0.1.0/src/anymodel/utils/_model_parser.py +30 -0
  32. anymodel_py-0.1.0/src/anymodel/utils/_rate_limiter.py +77 -0
  33. anymodel_py-0.1.0/src/anymodel/utils/_retry.py +41 -0
  34. anymodel_py-0.1.0/src/anymodel/utils/_transforms.py +63 -0
  35. anymodel_py-0.1.0/src/anymodel/utils/_validate.py +29 -0
  36. anymodel_py-0.1.0/tests/__init__.py +0 -0
  37. anymodel_py-0.1.0/tests/conftest.py +17 -0
  38. anymodel_py-0.1.0/tests/providers/__init__.py +0 -0
  39. anymodel_py-0.1.0/tests/test_batch_store.py +91 -0
  40. anymodel_py-0.1.0/tests/test_model_parser.py +30 -0
  41. anymodel_py-0.1.0/tests/test_transforms.py +33 -0
  42. anymodel_py-0.1.0/tests/test_types.py +23 -0
  43. anymodel_py-0.1.0/tests/test_validate.py +38 -0
@@ -0,0 +1,32 @@
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ pull_request:
7
+ branches: [main]
8
+
9
+ jobs:
10
+ test:
11
+ runs-on: ubuntu-latest
12
+
13
+ strategy:
14
+ matrix:
15
+ python-version: ["3.10", "3.11", "3.12", "3.13"]
16
+
17
+ steps:
18
+ - uses: actions/checkout@v4
19
+
20
+ - name: Set up Python ${{ matrix.python-version }}
21
+ uses: actions/setup-python@v5
22
+ with:
23
+ python-version: ${{ matrix.python-version }}
24
+
25
+ - name: Install dependencies
26
+ run: pip install -e ".[dev,server]"
27
+
28
+ - name: Lint
29
+ run: ruff check src/ tests/
30
+
31
+ - name: Test
32
+ run: pytest
@@ -0,0 +1,30 @@
1
+ name: Publish to PyPI
2
+
3
+ on:
4
+ release:
5
+ types: [published]
6
+
7
+ jobs:
8
+ publish:
9
+ runs-on: ubuntu-latest
10
+ environment: pypi
11
+ permissions:
12
+ id-token: write
13
+
14
+ steps:
15
+ - uses: actions/checkout@v4
16
+
17
+ - uses: actions/setup-python@v5
18
+ with:
19
+ python-version: "3.12"
20
+
21
+ - name: Install build tools
22
+ run: pip install build
23
+
24
+ - name: Build
25
+ run: python -m build
26
+
27
+ - name: Publish to PyPI
28
+ uses: pypa/gh-action-pypi-publish@release/v1
29
+ with:
30
+ attestations: false
@@ -0,0 +1,8 @@
1
+ .venv/
2
+ .pytest_cache/
3
+ .ruff_cache/
4
+ __pycache__/
5
+ *.pyc
6
+ dist/
7
+ *.egg-info/
8
+ .anymodel/
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 probeo
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,303 @@
1
+ Metadata-Version: 2.4
2
+ Name: anymodel-py
3
+ Version: 0.1.0
4
+ Summary: OpenRouter-compatible LLM router with unified batch support. Route requests across OpenAI, Anthropic, and Google with a single API.
5
+ Project-URL: Homepage, https://github.com/probeo-io/anymodel-py
6
+ Project-URL: Repository, https://github.com/probeo-io/anymodel-py
7
+ Project-URL: Issues, https://github.com/probeo-io/anymodel-py/issues
8
+ Author-email: Probeo <dev@probeo.io>
9
+ License-Expression: MIT
10
+ License-File: LICENSE
11
+ Keywords: ai,anthropic,batch,gemini,llm,openai,router
12
+ Classifier: Development Status :: 3 - Alpha
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: License :: OSI Approved :: MIT License
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Programming Language :: Python :: 3.13
20
+ Classifier: Topic :: Software Development :: Libraries
21
+ Classifier: Typing :: Typed
22
+ Requires-Python: >=3.10
23
+ Requires-Dist: aiofiles>=24.1
24
+ Requires-Dist: httpx>=0.27
25
+ Provides-Extra: dev
26
+ Requires-Dist: mypy>=1.13; extra == 'dev'
27
+ Requires-Dist: pytest-asyncio>=0.24; extra == 'dev'
28
+ Requires-Dist: pytest>=8.0; extra == 'dev'
29
+ Requires-Dist: respx>=0.22; extra == 'dev'
30
+ Requires-Dist: ruff>=0.8; extra == 'dev'
31
+ Provides-Extra: server
32
+ Requires-Dist: starlette>=0.40; extra == 'server'
33
+ Requires-Dist: uvicorn>=0.30; extra == 'server'
34
+ Description-Content-Type: text/markdown
35
+
36
+ # anymodel
37
+
38
+ OpenRouter-compatible LLM router with unified batch support for Python. Self-hosted, zero fees.
39
+
40
+ Route requests across OpenAI, Anthropic, and Google with a single API. Add any OpenAI-compatible provider. Run as an SDK or standalone HTTP server.
41
+
42
+ ## Install
43
+
44
+ ```bash
45
+ pip install anymodel
46
+ ```
47
+
48
+ ## Quick Start
49
+
50
+ Set your API keys as environment variables:
51
+
52
+ ```bash
53
+ export OPENAI_API_KEY=sk-...
54
+ export ANTHROPIC_API_KEY=sk-ant-...
55
+ export GOOGLE_API_KEY=AIza...
56
+ ```
57
+
58
+ ### SDK Usage
59
+
60
+ ```python
61
+ import asyncio
62
+ from anymodel import AnyModel
63
+
64
+ async def main():
65
+ client = AnyModel()
66
+
67
+ response = await client.chat.completions.create(
68
+ model="anthropic/claude-sonnet-4-6",
69
+ messages=[{"role": "user", "content": "Hello!"}],
70
+ )
71
+ print(response["choices"][0]["message"]["content"])
72
+
73
+ asyncio.run(main())
74
+ ```
75
+
76
+ ### Streaming
77
+
78
+ ```python
79
+ stream = await client.chat.completions.create(
80
+ model="openai/gpt-4o",
81
+ messages=[{"role": "user", "content": "Write a haiku"}],
82
+ stream=True,
83
+ )
84
+
85
+ async for chunk in stream:
86
+ content = chunk["choices"][0].get("delta", {}).get("content", "")
87
+ print(content, end="", flush=True)
88
+ ```
89
+
90
+ ## Supported Providers
91
+
92
+ Set the env var and go. Models are auto-discovered from each provider's API.
93
+
94
+ | Provider | Env Var | Example Model |
95
+ |----------|---------|---------------|
96
+ | OpenAI | `OPENAI_API_KEY` | `openai/gpt-4o` |
97
+ | Anthropic | `ANTHROPIC_API_KEY` | `anthropic/claude-sonnet-4-6` |
98
+ | Google | `GOOGLE_API_KEY` | `google/gemini-2.5-pro` |
99
+ | Mistral | `MISTRAL_API_KEY` | `mistral/mistral-large-latest` |
100
+ | Groq | `GROQ_API_KEY` | `groq/llama-3.3-70b-versatile` |
101
+ | DeepSeek | `DEEPSEEK_API_KEY` | `deepseek/deepseek-chat` |
102
+ | xAI | `XAI_API_KEY` | `xai/grok-3` |
103
+ | Together | `TOGETHER_API_KEY` | `together/meta-llama/Llama-3.3-70B-Instruct-Turbo` |
104
+ | Fireworks | `FIREWORKS_API_KEY` | `fireworks/accounts/fireworks/models/llama-v3p3-70b-instruct` |
105
+ | Perplexity | `PERPLEXITY_API_KEY` | `perplexity/sonar-pro` |
106
+ | Ollama | `OLLAMA_BASE_URL` | `ollama/llama3.3` |
107
+
108
+ ## Fallback Routing
109
+
110
+ Try multiple models in order. If one fails, the next is attempted:
111
+
112
+ ```python
113
+ response = await client.chat.completions.create(
114
+ model="",
115
+ models=[
116
+ "anthropic/claude-sonnet-4-6",
117
+ "openai/gpt-4o",
118
+ "google/gemini-2.5-pro",
119
+ ],
120
+ route="fallback",
121
+ messages=[{"role": "user", "content": "Hello"}],
122
+ )
123
+ ```
124
+
125
+ ## Tool Calling
126
+
127
+ ```python
128
+ response = await client.chat.completions.create(
129
+ model="anthropic/claude-sonnet-4-6",
130
+ messages=[{"role": "user", "content": "What's the weather in NYC?"}],
131
+ tools=[{
132
+ "type": "function",
133
+ "function": {
134
+ "name": "get_weather",
135
+ "description": "Get current weather for a location",
136
+ "parameters": {
137
+ "type": "object",
138
+ "properties": {"location": {"type": "string"}},
139
+ "required": ["location"],
140
+ },
141
+ },
142
+ }],
143
+ tool_choice="auto",
144
+ )
145
+
146
+ for call in response["choices"][0]["message"].get("tool_calls", []):
147
+ print(call["function"]["name"], call["function"]["arguments"])
148
+ ```
149
+
150
+ ## Batch Processing
151
+
152
+ Process many requests with native provider batch APIs or concurrent fallback.
153
+
154
+ ### Submit and wait
155
+
156
+ ```python
157
+ results = await client.batches.create_and_poll({
158
+ "model": "openai/gpt-4o-mini",
159
+ "requests": [
160
+ {"custom_id": "req-1", "messages": [{"role": "user", "content": "Summarize AI"}]},
161
+ {"custom_id": "req-2", "messages": [{"role": "user", "content": "Summarize ML"}]},
162
+ ],
163
+ })
164
+
165
+ for result in results["results"]:
166
+ print(result["custom_id"], result["response"]["choices"][0]["message"]["content"])
167
+ ```
168
+
169
+ ### Submit now, check later
170
+
171
+ ```python
172
+ # Submit and get the batch ID
173
+ batch = await client.batches.create({
174
+ "model": "anthropic/claude-haiku-4-5",
175
+ "requests": [
176
+ {"custom_id": "req-1", "messages": [{"role": "user", "content": "Summarize AI"}]},
177
+ ],
178
+ })
179
+ print(batch["id"]) # "batch-abc123"
180
+
181
+ # Check status any time
182
+ status = await client.batches.get("batch-abc123")
183
+ print(status["status"]) # "pending", "processing", "completed"
184
+
185
+ # Wait for results when ready
186
+ results = await client.batches.poll("batch-abc123")
187
+
188
+ # List all batches
189
+ all_batches = await client.batches.list()
190
+
191
+ # Cancel a batch
192
+ await client.batches.cancel("batch-abc123")
193
+ ```
194
+
195
+ ### Batch configuration
196
+
197
+ ```python
198
+ client = AnyModel({
199
+ "batch": {
200
+ "poll_interval": 10.0, # default poll interval in seconds
201
+ "concurrency_fallback": 10, # concurrent request limit for non-native providers
202
+ },
203
+ "io": {
204
+ "read_concurrency": 30, # concurrent file reads (default: 20)
205
+ "write_concurrency": 15, # concurrent file writes (default: 10)
206
+ },
207
+ })
208
+ ```
209
+
210
+ ## Configuration
211
+
212
+ ```python
213
+ client = AnyModel({
214
+ "anthropic": {"api_key": "sk-ant-..."},
215
+ "openai": {"api_key": "sk-..."},
216
+ "aliases": {
217
+ "default": "anthropic/claude-sonnet-4-6",
218
+ "fast": "anthropic/claude-haiku-4-5",
219
+ "smart": "anthropic/claude-opus-4-6",
220
+ },
221
+ "defaults": {
222
+ "temperature": 0.7,
223
+ "max_tokens": 4096,
224
+ "retries": 2,
225
+ },
226
+ })
227
+
228
+ # Use aliases as model names
229
+ response = await client.chat.completions.create(
230
+ model="fast",
231
+ messages=[{"role": "user", "content": "Quick answer"}],
232
+ )
233
+ ```
234
+
235
+ ### Config File
236
+
237
+ Create `anymodel.config.json` in your project root:
238
+
239
+ ```json
240
+ {
241
+ "anthropic": {
242
+ "api_key": "${ANTHROPIC_API_KEY}"
243
+ },
244
+ "aliases": {
245
+ "default": "anthropic/claude-sonnet-4-6"
246
+ },
247
+ "defaults": {
248
+ "temperature": 0.7,
249
+ "max_tokens": 4096
250
+ }
251
+ }
252
+ ```
253
+
254
+ `${ENV_VAR}` references are interpolated from environment variables.
255
+
256
+ ## Custom Providers
257
+
258
+ Add any OpenAI-compatible endpoint:
259
+
260
+ ```python
261
+ client = AnyModel({
262
+ "custom": {
263
+ "ollama": {
264
+ "base_url": "http://localhost:11434/v1",
265
+ "models": ["llama3.3", "mistral"],
266
+ },
267
+ },
268
+ })
269
+
270
+ response = await client.chat.completions.create(
271
+ model="ollama/llama3.3",
272
+ messages=[{"role": "user", "content": "Hello from Ollama"}],
273
+ )
274
+ ```
275
+
276
+ ## Server Mode
277
+
278
+ Run as a standalone HTTP server compatible with the OpenAI SDK:
279
+
280
+ ```bash
281
+ pip install anymodel[server]
282
+ anymodel serve --port 4141
283
+ ```
284
+
285
+ Then point any OpenAI-compatible client at it:
286
+
287
+ ```python
288
+ from openai import OpenAI
289
+
290
+ client = OpenAI(base_url="http://localhost:4141/api/v1", api_key="unused")
291
+ response = client.chat.completions.create(
292
+ model="anthropic/claude-sonnet-4-6",
293
+ messages=[{"role": "user", "content": "Hello via server"}],
294
+ )
295
+ ```
296
+
297
+ ## Also Available
298
+
299
+ - **Node.js**: [`@probeo/anymodel`](https://github.com/probeo-io/anymodel) on npm
300
+
301
+ ## License
302
+
303
+ MIT
@@ -0,0 +1,268 @@
1
+ # anymodel
2
+
3
+ OpenRouter-compatible LLM router with unified batch support for Python. Self-hosted, zero fees.
4
+
5
+ Route requests across OpenAI, Anthropic, and Google with a single API. Add any OpenAI-compatible provider. Run as an SDK or standalone HTTP server.
6
+
7
+ ## Install
8
+
9
+ ```bash
10
+ pip install anymodel
11
+ ```
12
+
13
+ ## Quick Start
14
+
15
+ Set your API keys as environment variables:
16
+
17
+ ```bash
18
+ export OPENAI_API_KEY=sk-...
19
+ export ANTHROPIC_API_KEY=sk-ant-...
20
+ export GOOGLE_API_KEY=AIza...
21
+ ```
22
+
23
+ ### SDK Usage
24
+
25
+ ```python
26
+ import asyncio
27
+ from anymodel import AnyModel
28
+
29
+ async def main():
30
+ client = AnyModel()
31
+
32
+ response = await client.chat.completions.create(
33
+ model="anthropic/claude-sonnet-4-6",
34
+ messages=[{"role": "user", "content": "Hello!"}],
35
+ )
36
+ print(response["choices"][0]["message"]["content"])
37
+
38
+ asyncio.run(main())
39
+ ```
40
+
41
+ ### Streaming
42
+
43
+ ```python
44
+ stream = await client.chat.completions.create(
45
+ model="openai/gpt-4o",
46
+ messages=[{"role": "user", "content": "Write a haiku"}],
47
+ stream=True,
48
+ )
49
+
50
+ async for chunk in stream:
51
+ content = chunk["choices"][0].get("delta", {}).get("content", "")
52
+ print(content, end="", flush=True)
53
+ ```
54
+
55
+ ## Supported Providers
56
+
57
+ Set the env var and go. Models are auto-discovered from each provider's API.
58
+
59
+ | Provider | Env Var | Example Model |
60
+ |----------|---------|---------------|
61
+ | OpenAI | `OPENAI_API_KEY` | `openai/gpt-4o` |
62
+ | Anthropic | `ANTHROPIC_API_KEY` | `anthropic/claude-sonnet-4-6` |
63
+ | Google | `GOOGLE_API_KEY` | `google/gemini-2.5-pro` |
64
+ | Mistral | `MISTRAL_API_KEY` | `mistral/mistral-large-latest` |
65
+ | Groq | `GROQ_API_KEY` | `groq/llama-3.3-70b-versatile` |
66
+ | DeepSeek | `DEEPSEEK_API_KEY` | `deepseek/deepseek-chat` |
67
+ | xAI | `XAI_API_KEY` | `xai/grok-3` |
68
+ | Together | `TOGETHER_API_KEY` | `together/meta-llama/Llama-3.3-70B-Instruct-Turbo` |
69
+ | Fireworks | `FIREWORKS_API_KEY` | `fireworks/accounts/fireworks/models/llama-v3p3-70b-instruct` |
70
+ | Perplexity | `PERPLEXITY_API_KEY` | `perplexity/sonar-pro` |
71
+ | Ollama | `OLLAMA_BASE_URL` | `ollama/llama3.3` |
72
+
73
+ ## Fallback Routing
74
+
75
+ Try multiple models in order. If one fails, the next is attempted:
76
+
77
+ ```python
78
+ response = await client.chat.completions.create(
79
+ model="",
80
+ models=[
81
+ "anthropic/claude-sonnet-4-6",
82
+ "openai/gpt-4o",
83
+ "google/gemini-2.5-pro",
84
+ ],
85
+ route="fallback",
86
+ messages=[{"role": "user", "content": "Hello"}],
87
+ )
88
+ ```
89
+
90
+ ## Tool Calling
91
+
92
+ ```python
93
+ response = await client.chat.completions.create(
94
+ model="anthropic/claude-sonnet-4-6",
95
+ messages=[{"role": "user", "content": "What's the weather in NYC?"}],
96
+ tools=[{
97
+ "type": "function",
98
+ "function": {
99
+ "name": "get_weather",
100
+ "description": "Get current weather for a location",
101
+ "parameters": {
102
+ "type": "object",
103
+ "properties": {"location": {"type": "string"}},
104
+ "required": ["location"],
105
+ },
106
+ },
107
+ }],
108
+ tool_choice="auto",
109
+ )
110
+
111
+ for call in response["choices"][0]["message"].get("tool_calls", []):
112
+ print(call["function"]["name"], call["function"]["arguments"])
113
+ ```
114
+
115
+ ## Batch Processing
116
+
117
+ Process many requests with native provider batch APIs or concurrent fallback.
118
+
119
+ ### Submit and wait
120
+
121
+ ```python
122
+ results = await client.batches.create_and_poll({
123
+ "model": "openai/gpt-4o-mini",
124
+ "requests": [
125
+ {"custom_id": "req-1", "messages": [{"role": "user", "content": "Summarize AI"}]},
126
+ {"custom_id": "req-2", "messages": [{"role": "user", "content": "Summarize ML"}]},
127
+ ],
128
+ })
129
+
130
+ for result in results["results"]:
131
+ print(result["custom_id"], result["response"]["choices"][0]["message"]["content"])
132
+ ```
133
+
134
+ ### Submit now, check later
135
+
136
+ ```python
137
+ # Submit and get the batch ID
138
+ batch = await client.batches.create({
139
+ "model": "anthropic/claude-haiku-4-5",
140
+ "requests": [
141
+ {"custom_id": "req-1", "messages": [{"role": "user", "content": "Summarize AI"}]},
142
+ ],
143
+ })
144
+ print(batch["id"]) # "batch-abc123"
145
+
146
+ # Check status any time
147
+ status = await client.batches.get("batch-abc123")
148
+ print(status["status"]) # "pending", "processing", "completed"
149
+
150
+ # Wait for results when ready
151
+ results = await client.batches.poll("batch-abc123")
152
+
153
+ # List all batches
154
+ all_batches = await client.batches.list()
155
+
156
+ # Cancel a batch
157
+ await client.batches.cancel("batch-abc123")
158
+ ```
159
+
160
+ ### Batch configuration
161
+
162
+ ```python
163
+ client = AnyModel({
164
+ "batch": {
165
+ "poll_interval": 10.0, # default poll interval in seconds
166
+ "concurrency_fallback": 10, # concurrent request limit for non-native providers
167
+ },
168
+ "io": {
169
+ "read_concurrency": 30, # concurrent file reads (default: 20)
170
+ "write_concurrency": 15, # concurrent file writes (default: 10)
171
+ },
172
+ })
173
+ ```
174
+
175
+ ## Configuration
176
+
177
+ ```python
178
+ client = AnyModel({
179
+ "anthropic": {"api_key": "sk-ant-..."},
180
+ "openai": {"api_key": "sk-..."},
181
+ "aliases": {
182
+ "default": "anthropic/claude-sonnet-4-6",
183
+ "fast": "anthropic/claude-haiku-4-5",
184
+ "smart": "anthropic/claude-opus-4-6",
185
+ },
186
+ "defaults": {
187
+ "temperature": 0.7,
188
+ "max_tokens": 4096,
189
+ "retries": 2,
190
+ },
191
+ })
192
+
193
+ # Use aliases as model names
194
+ response = await client.chat.completions.create(
195
+ model="fast",
196
+ messages=[{"role": "user", "content": "Quick answer"}],
197
+ )
198
+ ```
199
+
200
+ ### Config File
201
+
202
+ Create `anymodel.config.json` in your project root:
203
+
204
+ ```json
205
+ {
206
+ "anthropic": {
207
+ "api_key": "${ANTHROPIC_API_KEY}"
208
+ },
209
+ "aliases": {
210
+ "default": "anthropic/claude-sonnet-4-6"
211
+ },
212
+ "defaults": {
213
+ "temperature": 0.7,
214
+ "max_tokens": 4096
215
+ }
216
+ }
217
+ ```
218
+
219
+ `${ENV_VAR}` references are interpolated from environment variables.
220
+
221
+ ## Custom Providers
222
+
223
+ Add any OpenAI-compatible endpoint:
224
+
225
+ ```python
226
+ client = AnyModel({
227
+ "custom": {
228
+ "ollama": {
229
+ "base_url": "http://localhost:11434/v1",
230
+ "models": ["llama3.3", "mistral"],
231
+ },
232
+ },
233
+ })
234
+
235
+ response = await client.chat.completions.create(
236
+ model="ollama/llama3.3",
237
+ messages=[{"role": "user", "content": "Hello from Ollama"}],
238
+ )
239
+ ```
240
+
241
+ ## Server Mode
242
+
243
+ Run as a standalone HTTP server compatible with the OpenAI SDK:
244
+
245
+ ```bash
246
+ pip install anymodel[server]
247
+ anymodel serve --port 4141
248
+ ```
249
+
250
+ Then point any OpenAI-compatible client at it:
251
+
252
+ ```python
253
+ from openai import OpenAI
254
+
255
+ client = OpenAI(base_url="http://localhost:4141/api/v1", api_key="unused")
256
+ response = client.chat.completions.create(
257
+ model="anthropic/claude-sonnet-4-6",
258
+ messages=[{"role": "user", "content": "Hello via server"}],
259
+ )
260
+ ```
261
+
262
+ ## Also Available
263
+
264
+ - **Node.js**: [`@probeo/anymodel`](https://github.com/probeo-io/anymodel) on npm
265
+
266
+ ## License
267
+
268
+ MIT