alltoken 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,46 @@
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ *.egg
7
+ *.egg-info/
8
+ .eggs/
9
+
10
+ # Build / dist
11
+ build/
12
+ dist/
13
+ wheels/
14
+ *.whl
15
+
16
+ # Virtualenvs
17
+ .venv/
18
+ venv/
19
+ env/
20
+ ENV/
21
+
22
+ # Test / coverage
23
+ .pytest_cache/
24
+ .coverage
25
+ .coverage.*
26
+ htmlcov/
27
+ .tox/
28
+ .nox/
29
+ coverage.xml
30
+
31
+ # Type checkers
32
+ .mypy_cache/
33
+ .pyright/
34
+ .pytype/
35
+
36
+ # Linters
37
+ .ruff_cache/
38
+
39
+ # Env
40
+ .env
41
+ .env.local
42
+
43
+ # OS / editor
44
+ .DS_Store
45
+ .vscode/
46
+ .idea/
alltoken-0.2.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 AllToken
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,118 @@
1
+ Metadata-Version: 2.4
2
+ Name: alltoken
3
+ Version: 0.2.0
4
+ Summary: Official Python SDK for AllToken — one API for OpenAI, Anthropic, and 100+ models.
5
+ Project-URL: Homepage, https://alltoken.ai
6
+ Project-URL: Repository, https://github.com/alltoken-ai/alltoken-python
7
+ Project-URL: Documentation, https://alltoken.ai/docs
8
+ Project-URL: Issues, https://github.com/alltoken-ai/alltoken-python/issues
9
+ Author-email: AllToken <dev@alltoken.ai>
10
+ License: MIT
11
+ License-File: LICENSE
12
+ Keywords: ai,alltoken,anthropic,api,llm,openai
13
+ Classifier: Development Status :: 3 - Alpha
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: License :: OSI Approved :: MIT License
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Classifier: Programming Language :: Python :: 3.12
20
+ Classifier: Programming Language :: Python :: 3.13
21
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
22
+ Requires-Python: >=3.10
23
+ Requires-Dist: httpx>=0.27.0
24
+ Requires-Dist: pydantic>=2.0.0
25
+ Provides-Extra: dev
26
+ Requires-Dist: datamodel-code-generator[ruff]>=0.26.0; extra == 'dev'
27
+ Requires-Dist: mypy>=1.11.0; extra == 'dev'
28
+ Requires-Dist: pytest>=8.0.0; extra == 'dev'
29
+ Requires-Dist: ruff>=0.6.0; extra == 'dev'
30
+ Description-Content-Type: text/markdown
31
+
32
+ # alltoken-ai
33
+
34
+ Official Python SDK for [AllToken](https://alltoken.ai) — one API for OpenAI, Anthropic, and 100+ models.
35
+
36
+ ```bash
37
+ pip install alltoken-ai
38
+ ```
39
+
40
+ Requires **Python 3.10+**.
41
+
42
+ ## Quick start
43
+
44
+ ```python
45
+ from alltoken import AllToken
46
+
47
+ client = AllToken(api_key="...") # or os.environ["ALLTOKEN_API_KEY"]
48
+
49
+ # OpenAI-compatible surface (maps to /v1)
50
+ resp = client.openai.raw.post(
51
+ "/chat/completions",
52
+ json={
53
+ "model": "gpt-4o",
54
+ "messages": [{"role": "user", "content": "Hello!"}],
55
+ },
56
+ )
57
+ print(resp.json())
58
+
59
+ # Anthropic-compatible surface (maps to /anthropic)
60
+ resp = client.anthropic.raw.post(
61
+ "/messages",
62
+ json={
63
+ "model": "claude-sonnet-4",
64
+ "max_tokens": 1024,
65
+ "messages": [{"role": "user", "content": "Hello!"}],
66
+ },
67
+ )
68
+ print(resp.json())
69
+ ```
70
+
71
+ The same API key works for both surfaces. Model catalog: [alltoken.ai/models](https://alltoken.ai/models).
72
+
73
+ ## Configuration
74
+
75
+ ```python
76
+ AllToken(
77
+ api_key="...", # required
78
+ base_url="https://api.alltoken.ai", # optional, defaults to production
79
+ default_headers={"X-My-Tag": "a"}, # optional, merged into every request
80
+ )
81
+ ```
82
+
83
+ ## API surface
84
+
85
+ | Field | Spec | Base URL |
86
+ |---|---|---|
87
+ | `client.openai.raw` | `chat.yml` (OpenAI-compatible) | `https://api.alltoken.ai/v1` |
88
+ | `client.anthropic.raw` | `anthropic.yml` | `https://api.alltoken.ai/anthropic` |
89
+
90
+ `.raw` is a pre-configured [httpx.Client](https://www.python-httpx.org/api/#client) — base URL + auth are set, call `.get()` / `.post()` / `.stream()` directly. Pydantic models for request/response bodies are generated from the OpenAPI specs into `alltoken.generated.chat` and `alltoken.generated.anthropic`.
91
+
92
+ ## Status
93
+
94
+ **v0.1.0 — Scaffold.** Pydantic models are generated from the spec, the wrapper surface is minimal. Expect breaking changes in 0.x. Ergonomic helpers (`client.chat.completions.create(...)`, async streaming iterators, retries, etc.) are coming in 0.2.x.
95
+
96
+ ## Contributing / Local development
97
+
98
+ ```bash
99
+ # Clone megaopenrouter as a sibling (for the OpenAPI specs)
100
+ git clone git@gitlab.53site.com:ai-innovation-lab/megaopenrouter.git ../megaopenrouter
101
+
102
+ # Install with dev deps
103
+ pip install -e ".[dev]"
104
+
105
+ # Regenerate pydantic models from specs
106
+ python scripts/generate.py
107
+
108
+ # Test + lint
109
+ pytest
110
+ ruff check
111
+ mypy .
112
+ ```
113
+
114
+ Generated models live in `src/alltoken/generated/{chat,anthropic}.py` — these are **committed** so users who install from PyPI don't need to run codegen.
115
+
116
+ ## License
117
+
118
+ [MIT](./LICENSE)
@@ -0,0 +1,87 @@
1
+ # alltoken-ai
2
+
3
+ Official Python SDK for [AllToken](https://alltoken.ai) — one API for OpenAI, Anthropic, and 100+ models.
4
+
5
+ ```bash
6
+ pip install alltoken-ai
7
+ ```
8
+
9
+ Requires **Python 3.10+**.
10
+
11
+ ## Quick start
12
+
13
+ ```python
14
+ from alltoken import AllToken
15
+
16
+ client = AllToken(api_key="...") # or os.environ["ALLTOKEN_API_KEY"]
17
+
18
+ # OpenAI-compatible surface (maps to /v1)
19
+ resp = client.openai.raw.post(
20
+ "/chat/completions",
21
+ json={
22
+ "model": "gpt-4o",
23
+ "messages": [{"role": "user", "content": "Hello!"}],
24
+ },
25
+ )
26
+ print(resp.json())
27
+
28
+ # Anthropic-compatible surface (maps to /anthropic)
29
+ resp = client.anthropic.raw.post(
30
+ "/messages",
31
+ json={
32
+ "model": "claude-sonnet-4",
33
+ "max_tokens": 1024,
34
+ "messages": [{"role": "user", "content": "Hello!"}],
35
+ },
36
+ )
37
+ print(resp.json())
38
+ ```
39
+
40
+ The same API key works for both surfaces. Model catalog: [alltoken.ai/models](https://alltoken.ai/models).
41
+
42
+ ## Configuration
43
+
44
+ ```python
45
+ AllToken(
46
+ api_key="...", # required
47
+ base_url="https://api.alltoken.ai", # optional, defaults to production
48
+ default_headers={"X-My-Tag": "a"}, # optional, merged into every request
49
+ )
50
+ ```
51
+
52
+ ## API surface
53
+
54
+ | Field | Spec | Base URL |
55
+ |---|---|---|
56
+ | `client.openai.raw` | `chat.yml` (OpenAI-compatible) | `https://api.alltoken.ai/v1` |
57
+ | `client.anthropic.raw` | `anthropic.yml` | `https://api.alltoken.ai/anthropic` |
58
+
59
+ `.raw` is a pre-configured [httpx.Client](https://www.python-httpx.org/api/#client) — base URL + auth are set, call `.get()` / `.post()` / `.stream()` directly. Pydantic models for request/response bodies are generated from the OpenAPI specs into `alltoken.generated.chat` and `alltoken.generated.anthropic`.
60
+
61
+ ## Status
62
+
63
+ **v0.1.0 — Scaffold.** Pydantic models are generated from the spec, the wrapper surface is minimal. Expect breaking changes in 0.x. Ergonomic helpers (`client.chat.completions.create(...)`, async streaming iterators, retries, etc.) are coming in 0.2.x.
64
+
65
+ ## Contributing / Local development
66
+
67
+ ```bash
68
+ # Clone megaopenrouter as a sibling (for the OpenAPI specs)
69
+ git clone git@gitlab.53site.com:ai-innovation-lab/megaopenrouter.git ../megaopenrouter
70
+
71
+ # Install with dev deps
72
+ pip install -e ".[dev]"
73
+
74
+ # Regenerate pydantic models from specs
75
+ python scripts/generate.py
76
+
77
+ # Test + lint
78
+ pytest
79
+ ruff check
80
+ mypy .
81
+ ```
82
+
83
+ Generated models live in `src/alltoken/generated/{chat,anthropic}.py` — these are **committed** so users who install from PyPI don't need to run codegen.
84
+
85
+ ## License
86
+
87
+ [MIT](./LICENSE)
@@ -0,0 +1,11 @@
1
+ # alltoken-ai
2
+
3
+ This is an alias package that installs [alltoken](https://pypi.org/project/alltoken/), the official Python SDK for [AllToken](https://alltoken.ai).
4
+
5
+ ```bash
6
+ pip install alltoken-ai
7
+ # or equivalently:
8
+ pip install alltoken
9
+ ```
10
+
11
+ See the [alltoken package](https://pypi.org/project/alltoken/) for full documentation.
@@ -0,0 +1,60 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "alltoken"
7
+ version = "0.2.0"
8
+ description = "Official Python SDK for AllToken — one API for OpenAI, Anthropic, and 100+ models."
9
+ readme = "README.md"
10
+ license = { text = "MIT" }
11
+ requires-python = ">=3.10"
12
+ authors = [{ name = "AllToken", email = "dev@alltoken.ai" }]
13
+ keywords = ["alltoken", "openai", "anthropic", "llm", "ai", "api"]
14
+ classifiers = [
15
+ "Development Status :: 3 - Alpha",
16
+ "Intended Audience :: Developers",
17
+ "License :: OSI Approved :: MIT License",
18
+ "Programming Language :: Python :: 3",
19
+ "Programming Language :: Python :: 3.10",
20
+ "Programming Language :: Python :: 3.11",
21
+ "Programming Language :: Python :: 3.12",
22
+ "Programming Language :: Python :: 3.13",
23
+ "Topic :: Software Development :: Libraries :: Python Modules",
24
+ ]
25
+ dependencies = [
26
+ "httpx>=0.27.0",
27
+ "pydantic>=2.0.0",
28
+ ]
29
+
30
+ [project.urls]
31
+ Homepage = "https://alltoken.ai"
32
+ Repository = "https://github.com/alltoken-ai/alltoken-python"
33
+ Documentation = "https://alltoken.ai/docs"
34
+ Issues = "https://github.com/alltoken-ai/alltoken-python/issues"
35
+
36
+ [project.optional-dependencies]
37
+ dev = [
38
+ "datamodel-code-generator[ruff]>=0.26.0",
39
+ "pytest>=8.0.0",
40
+ "ruff>=0.6.0",
41
+ "mypy>=1.11.0",
42
+ ]
43
+
44
+ [tool.hatch.build.targets.wheel]
45
+ packages = ["src/alltoken"]
46
+
47
+ [tool.hatch.build.targets.sdist]
48
+ include = ["src/alltoken", "README.md", "LICENSE"]
49
+
50
+ [tool.ruff]
51
+ line-length = 100
52
+ target-version = "py310"
53
+
54
+ [tool.ruff.lint]
55
+ select = ["E", "F", "I", "N", "UP", "B", "A", "C4", "SIM"]
56
+
57
+ [tool.mypy]
58
+ python_version = "3.10"
59
+ strict = true
60
+ files = ["src/alltoken"]
@@ -0,0 +1,63 @@
1
+ """alltoken-ai — Official Python SDK for AllToken.
2
+
3
+ One API for OpenAI, Anthropic, and 100+ models.
4
+
5
+ Example:
6
+ >>> from alltoken import AllToken
7
+ >>> client = AllToken(api_key="...")
8
+ >>> resp = client.openai.raw.post(
9
+ ... "/chat/completions",
10
+ ... json={"model": "gpt-4o", "messages": [{"role": "user", "content": "Hi"}]},
11
+ ... )
12
+ >>> print(resp.json())
13
+ """
14
+ from __future__ import annotations
15
+
16
+ from ._anthropic import AnthropicClient
17
+ from ._client import AllTokenConfig
18
+ from ._error import AllTokenError
19
+ from ._openai import OpenAIClient
20
+ from ._streaming import AllTokenStream
21
+ from ._types import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage
22
+
23
+ __version__ = "0.1.0"
24
+
25
+
26
+ class AllToken:
27
+ """Unified client exposing both OpenAI-compatible and Anthropic-compatible surfaces.
28
+
29
+ Both sub-clients share the same API key and base URL; only the path suffix differs
30
+ (/v1 vs /anthropic).
31
+ """
32
+
33
+ openai: OpenAIClient
34
+ anthropic: AnthropicClient
35
+
36
+ def __init__(
37
+ self,
38
+ *,
39
+ api_key: str,
40
+ base_url: str = "https://api.alltoken.ai",
41
+ default_headers: dict[str, str] | None = None,
42
+ ) -> None:
43
+ config = AllTokenConfig(
44
+ api_key=api_key,
45
+ base_url=base_url,
46
+ default_headers=default_headers or {},
47
+ )
48
+ self.openai = OpenAIClient(config)
49
+ self.anthropic = AnthropicClient(config)
50
+
51
+
52
+ __all__ = [
53
+ "AllToken",
54
+ "AllTokenConfig",
55
+ "AllTokenError",
56
+ "AllTokenStream",
57
+ "AnthropicClient",
58
+ "ChatCompletion",
59
+ "ChatCompletionChunk",
60
+ "ChatCompletionMessage",
61
+ "OpenAIClient",
62
+ "__version__",
63
+ ]
@@ -0,0 +1,46 @@
1
+ """Anthropic-compatible client.
2
+
3
+ Backed by ``anthropic.yml``. Base URL: ``https://api.alltoken.ai/anthropic``.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ import httpx
8
+
9
+ from ._client import AllTokenConfig, build_headers, join_base_url
10
+
11
+ _ANTHROPIC_PATH = "/anthropic"
12
+
13
+
14
+ class AnthropicClient:
15
+ """Anthropic-compatible client.
16
+
17
+ The underlying pre-configured ``httpx.Client`` is exposed as ``.raw``:
18
+
19
+ >>> resp = client.anthropic.raw.post(
20
+ ... "/messages",
21
+ ... json={
22
+ ... "model": "claude-sonnet-4",
23
+ ... "max_tokens": 1024,
24
+ ... "messages": [{"role": "user", "content": "Hi"}],
25
+ ... },
26
+ ... )
27
+ """
28
+
29
+ raw: httpx.Client
30
+
31
+ def __init__(self, config: AllTokenConfig) -> None:
32
+ self.raw = httpx.Client(
33
+ base_url=join_base_url(config.base_url, _ANTHROPIC_PATH),
34
+ headers=build_headers(config),
35
+ timeout=httpx.Timeout(60.0, connect=10.0),
36
+ )
37
+
38
+ def close(self) -> None:
39
+ """Close the underlying HTTP connection pool."""
40
+ self.raw.close()
41
+
42
+ def __enter__(self) -> AnthropicClient:
43
+ return self
44
+
45
+ def __exit__(self, *args: object) -> None:
46
+ self.close()
@@ -0,0 +1,36 @@
1
+ """Shared base configuration for the OpenAI and Anthropic sub-clients.
2
+
3
+ Both sub-clients share the same API key and base URL; only the path suffix
4
+ differs (/v1 vs /anthropic).
5
+ """
6
+ from __future__ import annotations
7
+
8
+ from dataclasses import dataclass, field
9
+
10
+
11
+ @dataclass
12
+ class AllTokenConfig:
13
+ """Configuration for AllToken sub-clients."""
14
+
15
+ api_key: str
16
+ """API key from alltoken.ai. Found in Settings → API Keys."""
17
+
18
+ base_url: str = "https://api.alltoken.ai"
19
+ """Override the API base URL. Each sub-client appends its own path."""
20
+
21
+ default_headers: dict[str, str] = field(default_factory=dict)
22
+ """Extra headers sent on every request."""
23
+
24
+
25
+ def build_headers(config: AllTokenConfig) -> dict[str, str]:
26
+ """Build the auth + default headers for every request."""
27
+ return {
28
+ "Authorization": f"Bearer {config.api_key}",
29
+ "Content-Type": "application/json",
30
+ **config.default_headers,
31
+ }
32
+
33
+
34
+ def join_base_url(base_url: str, path: str) -> str:
35
+ """Join base URL + path, normalising trailing slashes."""
36
+ return base_url.rstrip("/") + path
@@ -0,0 +1,9 @@
1
+ """AllToken API error type."""
2
+ from __future__ import annotations
3
+
4
+
5
+ class AllTokenError(Exception):
6
+ def __init__(self, status_code: int, body: str) -> None:
7
+ self.status_code = status_code
8
+ self.body = body
9
+ super().__init__(f"AllToken API error {status_code}: {body}")
@@ -0,0 +1,45 @@
1
+ """OpenAI-compatible client.
2
+
3
+ Backed by ``chat.yml``. Base URL: ``https://api.alltoken.ai/v1``.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ import httpx
8
+
9
+ from ._client import AllTokenConfig, build_headers, join_base_url
10
+ from ._resources.chat import Chat
11
+
12
+ _OPENAI_PATH = "/v1"
13
+
14
+
15
+ class OpenAIClient:
16
+ """OpenAI-compatible client.
17
+
18
+ The underlying pre-configured ``httpx.Client`` is exposed as ``.raw``:
19
+
20
+ >>> resp = client.openai.raw.post(
21
+ ... "/chat/completions",
22
+ ... json={"model": "gpt-4o", "messages": [{"role": "user", "content": "Hi"}]},
23
+ ... )
24
+ """
25
+
26
+ raw: httpx.Client
27
+ chat: Chat
28
+
29
+ def __init__(self, config: AllTokenConfig) -> None:
30
+ self.raw = httpx.Client(
31
+ base_url=join_base_url(config.base_url, _OPENAI_PATH),
32
+ headers=build_headers(config),
33
+ timeout=httpx.Timeout(60.0, connect=10.0),
34
+ )
35
+ self.chat = Chat(self.raw)
36
+
37
+ def close(self) -> None:
38
+ """Close the underlying HTTP connection pool."""
39
+ self.raw.close()
40
+
41
+ def __enter__(self) -> OpenAIClient:
42
+ return self
43
+
44
+ def __exit__(self, *args: object) -> None:
45
+ self.close()
File without changes
@@ -0,0 +1,50 @@
1
+ """Chat completions resource with typed convenience methods."""
2
+ from __future__ import annotations
3
+
4
+ from typing import Any, Literal, overload
5
+
6
+ import httpx
7
+
8
+ from .._error import AllTokenError
9
+ from .._streaming import AllTokenStream
10
+ from .._types import ChatCompletion, ChatCompletionChunk
11
+
12
+
13
+ class ChatCompletions:
14
+ def __init__(self, client: httpx.Client) -> None:
15
+ self._client = client
16
+
17
+ @overload
18
+ def create(
19
+ self, *, stream: Literal[True], **kwargs: Any
20
+ ) -> AllTokenStream[ChatCompletionChunk]: ...
21
+
22
+ @overload
23
+ def create(
24
+ self, *, stream: Literal[False] = ..., **kwargs: Any
25
+ ) -> ChatCompletion: ...
26
+
27
+ def create(
28
+ self, *, stream: bool = False, **kwargs: Any
29
+ ) -> ChatCompletion | AllTokenStream[ChatCompletionChunk]:
30
+ body = {**kwargs, "stream": stream}
31
+ if stream:
32
+ request = self._client.build_request(
33
+ "POST", "/chat/completions", json=body
34
+ )
35
+ response = self._client.send(request, stream=True)
36
+ if response.status_code != 200:
37
+ body_text = response.read().decode()
38
+ response.close()
39
+ raise AllTokenError(response.status_code, body_text)
40
+ return AllTokenStream(response, ChatCompletionChunk)
41
+ else:
42
+ response = self._client.post("/chat/completions", json=body)
43
+ if response.status_code != 200:
44
+ raise AllTokenError(response.status_code, response.text)
45
+ return ChatCompletion.from_dict(response.json())
46
+
47
+
48
+ class Chat:
49
+ def __init__(self, client: httpx.Client) -> None:
50
+ self.completions = ChatCompletions(client)
@@ -0,0 +1,42 @@
1
+ """SSE stream iterator for AllToken streaming responses."""
2
+ from __future__ import annotations
3
+
4
+ import json
5
+ from collections.abc import Iterator
6
+ from typing import Any, Generic, Protocol, TypeVar
7
+
8
+ import httpx
9
+
10
+ T = TypeVar("T")
11
+
12
+
13
+ class _FromDict(Protocol):
14
+ @classmethod
15
+ def from_dict(cls, data: dict[str, Any]) -> Any: ...
16
+
17
+
18
+ class AllTokenStream(Generic[T]):
19
+ """Wraps an httpx streaming response, parses SSE lines into typed objects."""
20
+
21
+ def __init__(self, response: httpx.Response, parser: _FromDict) -> None:
22
+ self._response = response
23
+ self._parser = parser
24
+
25
+ def __iter__(self) -> Iterator[T]:
26
+ for line in self._response.iter_lines():
27
+ if not line.startswith("data: "):
28
+ continue
29
+ data = line[len("data: "):]
30
+ if data.strip() == "[DONE]":
31
+ break
32
+ obj = json.loads(data)
33
+ yield self._parser.from_dict(obj)
34
+
35
+ def close(self) -> None:
36
+ self._response.close()
37
+
38
+ def __enter__(self) -> AllTokenStream[T]:
39
+ return self
40
+
41
+ def __exit__(self, *args: object) -> None:
42
+ self.close()
@@ -0,0 +1,102 @@
1
+ """Convenience dataclass types for chat completions."""
2
+ from __future__ import annotations
3
+
4
+ from dataclasses import dataclass
5
+ from typing import Any
6
+
7
+
8
+ @dataclass
9
+ class ChatCompletionMessage:
10
+ role: str
11
+ content: str | None = None
12
+ tool_calls: list[Any] | None = None
13
+
14
+
15
+ @dataclass
16
+ class ChatCompletionChoice:
17
+ index: int
18
+ message: ChatCompletionMessage
19
+ finish_reason: str | None = None
20
+
21
+
22
+ @dataclass
23
+ class ChatCompletionUsage:
24
+ prompt_tokens: int
25
+ completion_tokens: int
26
+ total_tokens: int
27
+
28
+
29
+ @dataclass
30
+ class ChatCompletion:
31
+ id: str
32
+ object: str
33
+ created: int
34
+ model: str
35
+ choices: list[ChatCompletionChoice]
36
+ usage: ChatCompletionUsage | None = None
37
+
38
+ @classmethod
39
+ def from_dict(cls, data: dict[str, Any]) -> ChatCompletion:
40
+ choices = [
41
+ ChatCompletionChoice(
42
+ index=c["index"],
43
+ message=ChatCompletionMessage(**c["message"]),
44
+ finish_reason=c.get("finish_reason"),
45
+ )
46
+ for c in data["choices"]
47
+ ]
48
+ usage = ChatCompletionUsage(**data["usage"]) if data.get("usage") else None
49
+ return cls(
50
+ id=data["id"],
51
+ object=data["object"],
52
+ created=data["created"],
53
+ model=data["model"],
54
+ choices=choices,
55
+ usage=usage,
56
+ )
57
+
58
+
59
+ # Streaming chunk types
60
+
61
+
62
+ @dataclass
63
+ class ChatCompletionChunkDelta:
64
+ role: str | None = None
65
+ content: str | None = None
66
+
67
+
68
+ @dataclass
69
+ class ChatCompletionChunkChoice:
70
+ index: int
71
+ delta: ChatCompletionChunkDelta
72
+ finish_reason: str | None = None
73
+
74
+
75
+ @dataclass
76
+ class ChatCompletionChunk:
77
+ id: str
78
+ object: str
79
+ created: int
80
+ model: str
81
+ choices: list[ChatCompletionChunkChoice]
82
+ usage: ChatCompletionUsage | None = None
83
+
84
+ @classmethod
85
+ def from_dict(cls, data: dict[str, Any]) -> ChatCompletionChunk:
86
+ choices = [
87
+ ChatCompletionChunkChoice(
88
+ index=c["index"],
89
+ delta=ChatCompletionChunkDelta(**c.get("delta", {})),
90
+ finish_reason=c.get("finish_reason"),
91
+ )
92
+ for c in data["choices"]
93
+ ]
94
+ usage = ChatCompletionUsage(**data["usage"]) if data.get("usage") else None
95
+ return cls(
96
+ id=data["id"],
97
+ object=data["object"],
98
+ created=data["created"],
99
+ model=data["model"],
100
+ choices=choices,
101
+ usage=usage,
102
+ )
@@ -0,0 +1,6 @@
1
+ """Pydantic models generated from the AllToken OpenAPI specs.
2
+
3
+ Run ``python scripts/generate.py`` from the repo root to regenerate after the
4
+ specs move. These modules are committed so users who install from PyPI don't
5
+ need to run codegen.
6
+ """
@@ -0,0 +1,201 @@
1
+ # generated by datamodel-codegen:
2
+ # filename: anthropic.yml
3
+ # timestamp: 2026-04-15T09:50:44+00:00
4
+
5
+ from __future__ import annotations
6
+
7
+ from enum import Enum
8
+ from typing import Annotated, Any
9
+
10
+ from pydantic import AwareDatetime, BaseModel, Field, RootModel
11
+
12
+
13
+ class Metadata(BaseModel):
14
+ user_id: str | None = None
15
+ """
16
+ 终端用户标识
17
+ """
18
+
19
+
20
+ class Type(Enum):
21
+ enabled = "enabled"
22
+
23
+
24
+ class Thinking(BaseModel):
25
+ """
26
+ Extended thinking 配置
27
+ """
28
+
29
+ type: Type | None = None
30
+ budget_tokens: int | None = None
31
+
32
+
33
+ class Role(Enum):
34
+ user = "user"
35
+ assistant = "assistant"
36
+
37
+
38
+ class Type1(Enum):
39
+ text = "text"
40
+ image = "image"
41
+ tool_use = "tool_use"
42
+ tool_result = "tool_result"
43
+
44
+
45
+ class ContentBlock(BaseModel):
46
+ type: Type1
47
+ text: str | None = None
48
+ """
49
+ type=text 时的文本内容
50
+ """
51
+ id: str | None = None
52
+ """
53
+ type=tool_use 时的工具调用 ID
54
+ """
55
+ name: str | None = None
56
+ """
57
+ type=tool_use 时的工具名
58
+ """
59
+ input: Any | None = None
60
+ """
61
+ type=tool_use 时的工具参数(任意 JSON)
62
+ """
63
+ cache_control: dict[str, Any] | None = None
64
+ """
65
+ 块级缓存控制,如 `{type: ephemeral, ttl: 1h}`
66
+ """
67
+
68
+
69
+ class Tool(BaseModel):
70
+ name: str
71
+ description: str | None = None
72
+ input_schema: dict[str, Any]
73
+ """
74
+ JSON Schema 对象
75
+ """
76
+ cache_control: dict[str, Any] | None = None
77
+
78
+
79
+ class Type2(Enum):
80
+ message = "message"
81
+
82
+
83
+ class Role1(Enum):
84
+ assistant = "assistant"
85
+
86
+
87
+ class StopReasonEnum(Enum):
88
+ end_turn = "end_turn"
89
+ max_tokens = "max_tokens"
90
+ stop_sequence = "stop_sequence"
91
+ tool_use = "tool_use"
92
+
93
+
94
+ class StopReason(RootModel[StopReasonEnum | None]):
95
+ root: StopReasonEnum | None = None
96
+
97
+
98
+ class Usage(BaseModel):
99
+ input_tokens: int
100
+ output_tokens: int
101
+ cache_creation_input_tokens: int | None = None
102
+ cache_read_input_tokens: int | None = None
103
+
104
+
105
+ class Type3(Enum):
106
+ model = "model"
107
+
108
+
109
+ class ModelInfo(BaseModel):
110
+ id: str
111
+ type: Type3
112
+ display_name: str | None = None
113
+ created_at: AwareDatetime | None = None
114
+
115
+
116
+ class Type4(Enum):
117
+ error = "error"
118
+
119
+
120
+ class Type5(Enum):
121
+ invalid_request_error = "invalid_request_error"
122
+ authentication_error = "authentication_error"
123
+ permission_error = "permission_error"
124
+ not_found_error = "not_found_error"
125
+ rate_limit_error = "rate_limit_error"
126
+ api_error = "api_error"
127
+ overloaded_error = "overloaded_error"
128
+
129
+
130
+ class Error(BaseModel):
131
+ type: Type5
132
+ message: str
133
+
134
+
135
+ class ErrorResponse(BaseModel):
136
+ type: Type4
137
+ error: Error
138
+
139
+
140
+ class InputMessage(BaseModel):
141
+ role: Role
142
+ content: str | list[ContentBlock]
143
+ """
144
+ 字符串或 ContentBlock 数组
145
+ """
146
+
147
+
148
+ class MessageResponse(BaseModel):
149
+ id: Annotated[str, Field(examples=["msg_01XFDUDYJgAACzvnptvVoYEL"])]
150
+ type: Type2
151
+ role: Role1
152
+ content: list[ContentBlock]
153
+ model: str
154
+ stop_reason: StopReason
155
+ stop_sequence: str | None = None
156
+ usage: Usage
157
+
158
+
159
+ class ModelList(BaseModel):
160
+ data: list[ModelInfo]
161
+ has_more: bool | None = None
162
+ first_id: str | None = None
163
+ last_id: str | None = None
164
+
165
+
166
+ class MessageRequest(BaseModel):
167
+ model: Annotated[str, Field(examples=["claude-sonnet-4-5"])]
168
+ """
169
+ 模型 ID(见 GET /v1/models)
170
+ """
171
+ max_tokens: Annotated[int, Field(examples=[1024], ge=1)]
172
+ """
173
+ 生成的最大 token 数
174
+ """
175
+ messages: list[InputMessage]
176
+ system: str | list[ContentBlock] | None = None
177
+ """
178
+ System prompt(字符串 或 ContentBlock 数组)
179
+ """
180
+ stream: bool | None = False
181
+ """
182
+ 是否以 SSE 流式返回
183
+ """
184
+ temperature: Annotated[float | None, Field(ge=0.0, le=1.0)] = None
185
+ top_p: Annotated[float | None, Field(ge=0.0, le=1.0)] = None
186
+ top_k: Annotated[int | None, Field(ge=0)] = None
187
+ stop_sequences: list[str] | None = None
188
+ tools: list[Tool] | None = None
189
+ tool_choice: Any | None = None
190
+ """
191
+ 工具选择策略:`{type: auto}` / `{type: any}` / `{type: tool, name: ...}`
192
+ """
193
+ metadata: Metadata | None = None
194
+ thinking: Thinking | None = None
195
+ """
196
+ Extended thinking 配置
197
+ """
198
+ cache_control: dict[str, Any] | None = None
199
+ """
200
+ 顶层自动缓存控制,如 `{type: ephemeral, ttl: 1h}`
201
+ """
@@ -0,0 +1,426 @@
1
+ # generated by datamodel-codegen:
2
+ # filename: chat.yml
3
+ # timestamp: 2026-04-15T09:50:43+00:00
4
+
5
+ from __future__ import annotations
6
+
7
+ from enum import Enum
8
+ from typing import Annotated, Any
9
+
10
+ from pydantic import AnyUrl, AwareDatetime, BaseModel, Field, RootModel
11
+
12
+
13
+ class StreamOptions(BaseModel):
14
+ include_usage: bool | None = None
15
+
16
+
17
+ class Type(Enum):
18
+ text = "text"
19
+ json_object = "json_object"
20
+
21
+
22
+ class ResponseFormat(BaseModel):
23
+ type: Type | None = None
24
+
25
+
26
+ class Role(Enum):
27
+ system = "system"
28
+ user = "user"
29
+ assistant = "assistant"
30
+ tool = "tool"
31
+
32
+
33
+ class Type1(Enum):
34
+ text = "text"
35
+ image_url = "image_url"
36
+
37
+
38
+ class Detail(Enum):
39
+ auto = "auto"
40
+ low = "low"
41
+ high = "high"
42
+
43
+
44
+ class ImageUrl(BaseModel):
45
+ """
46
+ type=image_url 时的图片对象
47
+ """
48
+
49
+ url: AnyUrl | None = None
50
+ detail: Detail | None = None
51
+
52
+
53
+ class ContentPart(BaseModel):
54
+ """
55
+ 多模态消息内容块(文本 / 图片 URL)
56
+ """
57
+
58
+ type: Type1
59
+ text: str | None = None
60
+ """
61
+ type=text 时的文本内容
62
+ """
63
+ image_url: ImageUrl | None = None
64
+ """
65
+ type=image_url 时的图片对象
66
+ """
67
+
68
+
69
+ class Type2(Enum):
70
+ function = "function"
71
+
72
+
73
+ class Function(BaseModel):
74
+ name: str
75
+ description: str | None = None
76
+ parameters: dict[str, Any] | None = None
77
+ """
78
+ JSON Schema
79
+ """
80
+
81
+
82
+ class Tool(BaseModel):
83
+ type: Type2
84
+ function: Function
85
+
86
+
87
+ class Function1(BaseModel):
88
+ name: str | None = None
89
+ arguments: str | None = None
90
+ """
91
+ JSON 字符串
92
+ """
93
+
94
+
95
+ class ToolCall(BaseModel):
96
+ id: str
97
+ type: Type2
98
+ function: Function1
99
+
100
+
101
+ class Object(Enum):
102
+ chat_completion = "chat.completion"
103
+
104
+
105
+ class FinishReasonEnum(Enum):
106
+ stop = "stop"
107
+ length = "length"
108
+ tool_calls = "tool_calls"
109
+ content_filter = "content_filter"
110
+
111
+
112
+ class FinishReason(RootModel[FinishReasonEnum | None]):
113
+ root: FinishReasonEnum | None = None
114
+
115
+
116
+ class PromptTokensDetails(BaseModel):
117
+ cached_tokens: int | None = None
118
+ cache_creation_input_tokens: int | None = None
119
+
120
+
121
+ class CompletionTokensDetails(BaseModel):
122
+ reasoning_tokens: int | None = None
123
+
124
+
125
+ class Usage(BaseModel):
126
+ prompt_tokens: int
127
+ completion_tokens: int
128
+ total_tokens: int
129
+ prompt_tokens_details: PromptTokensDetails | None = None
130
+ completion_tokens_details: CompletionTokensDetails | None = None
131
+ cache_read_input_tokens: int | None = None
132
+ cache_creation_by_ttl: dict[str, int] | None = None
133
+ """
134
+ Cache write 按 TTL 拆分,key=秒数(如 300、3600)
135
+ """
136
+ server_tool_use: dict[str, int] | None = None
137
+
138
+
139
+ class Object1(Enum):
140
+ list = "list"
141
+
142
+
143
+ class Object2(Enum):
144
+ model = "model"
145
+
146
+
147
+ class ModelInfo(BaseModel):
148
+ id: str
149
+ object: Object2
150
+ created: int | None = None
151
+ owned_by: str | None = None
152
+
153
+
154
+ class Ratio(Enum):
155
+ field_16_9 = "16:9"
156
+ field_9_16 = "9:16"
157
+ field_4_3 = "4:3"
158
+ field_3_4 = "3:4"
159
+ field_21_9 = "21:9"
160
+ field_1_1 = "1:1"
161
+ adaptive = "adaptive"
162
+
163
+
164
+ class Resolution(Enum):
165
+ field_480p = "480p"
166
+ field_720p = "720p"
167
+ field_1080p = "1080p"
168
+
169
+
170
+ class ServiceTier(Enum):
171
+ default = "default"
172
+ flex = "flex"
173
+
174
+
175
+ class Type4(Enum):
176
+ web_search = "web_search"
177
+
178
+
179
+ class Tool1(BaseModel):
180
+ type: Type4 | None = None
181
+
182
+
183
+ class Type5(Enum):
184
+ text = "text"
185
+ image_url = "image_url"
186
+ video_url = "video_url"
187
+ audio_url = "audio_url"
188
+ draft_task = "draft_task"
189
+ image = "image"
190
+ video = "video"
191
+ audio = "audio"
192
+
193
+
194
+ class ImageUrl1(BaseModel):
195
+ url: AnyUrl | None = None
196
+
197
+
198
+ class VideoUrl(BaseModel):
199
+ url: AnyUrl | None = None
200
+
201
+
202
+ class AudioUrl(BaseModel):
203
+ url: AnyUrl | None = None
204
+
205
+
206
+ class DraftTask(BaseModel):
207
+ id: str | None = None
208
+
209
+
210
+ class Role1(Enum):
211
+ first_frame = "first_frame"
212
+ last_frame = "last_frame"
213
+ reference_image = "reference_image"
214
+ reference_video = "reference_video"
215
+ reference_audio = "reference_audio"
216
+
217
+
218
+ class VideoContentItem(BaseModel):
219
+ type: Type5
220
+ text: str | None = None
221
+ image_url: ImageUrl1 | None = None
222
+ video_url: VideoUrl | None = None
223
+ audio_url: AudioUrl | None = None
224
+ draft_task: DraftTask | None = None
225
+ role: Role1 | None = None
226
+ url: str | None = None
227
+ """
228
+ 简化格式(type=image/video/audio 时)
229
+ """
230
+
231
+
232
+ class Status(Enum):
233
+ queued = "queued"
234
+ processing = "processing"
235
+ completed = "completed"
236
+ failed = "failed"
237
+ expired = "expired"
238
+ cancelled = "cancelled"
239
+
240
+
241
+ class InputType(Enum):
242
+ text = "text"
243
+ image_first_frame = "image_first_frame"
244
+ image_first_last_frame = "image_first_last_frame"
245
+ image_reference = "image_reference"
246
+ video_reference = "video_reference"
247
+ multimodal = "multimodal"
248
+ draft_task = "draft_task"
249
+
250
+
251
+ class ToolUsage(BaseModel):
252
+ web_search: int | None = None
253
+
254
+
255
+ class Usage1(BaseModel):
256
+ completion_tokens: int | None = None
257
+ total_tokens: int | None = None
258
+ tool_usage: ToolUsage | None = None
259
+
260
+
261
+ class Error(BaseModel):
262
+ code: str | None = None
263
+ message: str | None = None
264
+
265
+
266
+ class VideoTaskResponse(BaseModel):
267
+ id: Annotated[str, Field(examples=["vgen_01abc"])]
268
+ status: Status
269
+ model: str
270
+ input_type: InputType | None = None
271
+ video_url: AnyUrl | None = None
272
+ video_url_expires_at: AwareDatetime | None = None
273
+ video_url_ttl: int | None = None
274
+ """
275
+ 视频 URL 剩余有效秒数
276
+ """
277
+ last_frame_url: AnyUrl | None = None
278
+ duration: int | None = None
279
+ frames: int | None = None
280
+ fps: int | None = None
281
+ resolution: str | None = None
282
+ ratio: str | None = None
283
+ seed: int | None = None
284
+ generate_audio: bool | None = None
285
+ draft: bool | None = None
286
+ draft_task_id: str | None = None
287
+ service_tier: str | None = None
288
+ execution_expires_after: int | None = None
289
+ safety_identifier: str | None = None
290
+ tools: list[dict[str, Any]] | None = None
291
+ usage: Usage1 | None = None
292
+ error: Error | None = None
293
+ created_at: AwareDatetime
294
+ updated_at: AwareDatetime | None = None
295
+ completed_at: AwareDatetime | None = None
296
+
297
+
298
+ class VideoListResponse(BaseModel):
299
+ items: list[VideoTaskResponse]
300
+ total: int
301
+
302
+
303
+ class Error1(BaseModel):
304
+ code: str | None = None
305
+ message: str
306
+ param: str | None = None
307
+ type: str
308
+ request_id: str | None = None
309
+
310
+
311
+ class ErrorResponse(BaseModel):
312
+ error: Error1
313
+
314
+
315
+ class ChatMessage(BaseModel):
316
+ role: Role
317
+ content: str | list[ContentPart] | None = None
318
+ """
319
+ 消息内容:字符串,或 ContentPart 数组(多模态),或 null(assistant 返回 tool_calls 时)。
320
+ JSON 层允许 null;SDK 用 Union/any 类型接收。
321
+
322
+ """
323
+ name: str | None = None
324
+ tool_calls: list[ToolCall] | None = None
325
+ tool_call_id: str | None = None
326
+ """
327
+ role=tool 时必填
328
+ """
329
+
330
+
331
+ class Choice(BaseModel):
332
+ index: int
333
+ message: ChatMessage
334
+ finish_reason: FinishReason | None = None
335
+
336
+
337
+ class ModelList(BaseModel):
338
+ object: Object1
339
+ data: list[ModelInfo]
340
+
341
+
342
+ class VideoGenerationRequest(BaseModel):
343
+ model: Annotated[str, Field(examples=["seedance-1.5-pro"])]
344
+ prompt: str
345
+ content: list[VideoContentItem] | None = None
346
+ ratio: Ratio | None = None
347
+ duration: int | None = None
348
+ """
349
+ 秒数;-1 表示智能时长
350
+ """
351
+ resolution: Resolution | None = None
352
+ frames: int | None = None
353
+ """
354
+ 帧数(优先级高于 duration)
355
+ """
356
+ generate_audio: bool | None = True
357
+ seed: int | None = None
358
+ camera_fixed: bool | None = None
359
+ watermark: bool | None = False
360
+ callback_url: AnyUrl | None = None
361
+ return_last_frame: bool | None = None
362
+ service_tier: ServiceTier | None = None
363
+ execution_expires_after: Annotated[int | None, Field(ge=3600, le=259200)] = None
364
+ draft: bool | None = None
365
+ tools: list[Tool1] | None = None
366
+ safety_identifier: Annotated[str | None, Field(max_length=64)] = None
367
+
368
+
369
+ class ChatCompletionRequest(BaseModel):
370
+ model: Annotated[str, Field(examples=["gpt-4o-mini"])]
371
+ messages: list[ChatMessage]
372
+ stream: bool | None = False
373
+ stream_options: StreamOptions | None = None
374
+ max_tokens: Annotated[int | None, Field(ge=1)] = None
375
+ temperature: Annotated[float | None, Field(ge=0.0, le=2.0)] = None
376
+ top_p: float | None = None
377
+ top_k: int | None = None
378
+ stop: list[str] | None = None
379
+ user: str | None = None
380
+ tools: list[Tool] | None = None
381
+ tool_choice: Any | None = None
382
+ """
383
+ `auto` / `none` / `required` / `{type: function, function: {name: ...}}`
384
+ """
385
+ response_format: ResponseFormat | None = None
386
+ enable_search: bool | None = None
387
+ """
388
+ 网关扩展 —— 启用联网搜索
389
+ """
390
+ frequency_penalty: float | None = None
391
+ presence_penalty: float | None = None
392
+ n: Annotated[int | None, Field(ge=1)] = None
393
+ thinking: bool | None = None
394
+ """
395
+ 网关扩展 —— 启用思考链
396
+ """
397
+ thinking_budget: int | None = None
398
+ """
399
+ 网关扩展 —— 思考链 token 预算
400
+ """
401
+ conversation_id: int | None = None
402
+ """
403
+ 网关扩展 —— 关联网关托管的会话 ID(自动持久化消息)
404
+ """
405
+
406
+
407
+ class ChatCompletionResponse(BaseModel):
408
+ id: str
409
+ object: Object
410
+ created: int
411
+ model: str
412
+ choices: list[Choice]
413
+ usage: Usage | None = None
414
+ system_fingerprint: str | None = None
415
+ x_gateway_provider: Annotated[str | None, Field(alias="x-gateway-provider")] = None
416
+ """
417
+ 网关扩展 —— 实际处理请求的 provider 名
418
+ """
419
+ x_gateway_request_id: Annotated[str | None, Field(alias="x-gateway-request-id")] = None
420
+ """
421
+ 网关扩展 —— 请求 ID(便于排查)
422
+ """
423
+ conversation_id: int | None = None
424
+ """
425
+ 网关扩展 —— 若请求触发自动建会话,返回新会话 ID
426
+ """