axion-code 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- axion/__init__.py +3 -0
- axion/api/__init__.py +0 -0
- axion/api/anthropic.py +460 -0
- axion/api/client.py +259 -0
- axion/api/error.py +161 -0
- axion/api/ollama.py +597 -0
- axion/api/openai_compat.py +805 -0
- axion/api/openai_responses.py +627 -0
- axion/api/prompt_cache.py +31 -0
- axion/api/sse.py +98 -0
- axion/api/types.py +451 -0
- axion/cli/__init__.py +0 -0
- axion/cli/init_cmd.py +50 -0
- axion/cli/input.py +290 -0
- axion/cli/main.py +2953 -0
- axion/cli/render.py +489 -0
- axion/cli/tui.py +766 -0
- axion/commands/__init__.py +0 -0
- axion/commands/handlers/__init__.py +0 -0
- axion/commands/handlers/agents.py +51 -0
- axion/commands/handlers/builtin_commands.py +367 -0
- axion/commands/handlers/mcp.py +59 -0
- axion/commands/handlers/models.py +75 -0
- axion/commands/handlers/plugins.py +55 -0
- axion/commands/handlers/skills.py +61 -0
- axion/commands/parsing.py +317 -0
- axion/commands/registry.py +166 -0
- axion/compat_harness/__init__.py +0 -0
- axion/compat_harness/extractor.py +145 -0
- axion/plugins/__init__.py +0 -0
- axion/plugins/hooks.py +22 -0
- axion/plugins/manager.py +391 -0
- axion/plugins/manifest.py +270 -0
- axion/runtime/__init__.py +0 -0
- axion/runtime/bash.py +388 -0
- axion/runtime/bootstrap.py +39 -0
- axion/runtime/claude_subscription.py +300 -0
- axion/runtime/compact.py +233 -0
- axion/runtime/config.py +397 -0
- axion/runtime/conversation.py +1073 -0
- axion/runtime/file_ops.py +613 -0
- axion/runtime/git.py +213 -0
- axion/runtime/hooks.py +235 -0
- axion/runtime/image.py +212 -0
- axion/runtime/lanes.py +282 -0
- axion/runtime/lsp.py +425 -0
- axion/runtime/mcp/__init__.py +0 -0
- axion/runtime/mcp/client.py +76 -0
- axion/runtime/mcp/lifecycle.py +96 -0
- axion/runtime/mcp/stdio.py +318 -0
- axion/runtime/mcp/tool_bridge.py +79 -0
- axion/runtime/memory.py +196 -0
- axion/runtime/oauth.py +329 -0
- axion/runtime/openai_subscription.py +346 -0
- axion/runtime/permissions.py +247 -0
- axion/runtime/plan_mode.py +96 -0
- axion/runtime/policy_engine.py +259 -0
- axion/runtime/prompt.py +586 -0
- axion/runtime/recovery.py +261 -0
- axion/runtime/remote.py +28 -0
- axion/runtime/sandbox.py +68 -0
- axion/runtime/scheduler.py +231 -0
- axion/runtime/session.py +365 -0
- axion/runtime/sharing.py +159 -0
- axion/runtime/skills.py +124 -0
- axion/runtime/tasks.py +258 -0
- axion/runtime/usage.py +241 -0
- axion/runtime/workers.py +186 -0
- axion/telemetry/__init__.py +0 -0
- axion/telemetry/events.py +67 -0
- axion/telemetry/profile.py +49 -0
- axion/telemetry/sink.py +60 -0
- axion/telemetry/tracer.py +95 -0
- axion/tools/__init__.py +0 -0
- axion/tools/lane_completion.py +33 -0
- axion/tools/registry.py +853 -0
- axion/tools/tool_search.py +226 -0
- axion_code-1.0.0.dist-info/METADATA +709 -0
- axion_code-1.0.0.dist-info/RECORD +82 -0
- axion_code-1.0.0.dist-info/WHEEL +4 -0
- axion_code-1.0.0.dist-info/entry_points.txt +2 -0
- axion_code-1.0.0.dist-info/licenses/LICENSE +21 -0
axion/api/client.py
ADDED
|
@@ -0,0 +1,259 @@
|
|
|
1
|
+
"""Provider client factory - dispatches to Anthropic or OpenAI-compatible.
|
|
2
|
+
|
|
3
|
+
Maps to: rust/crates/api/src/client.rs
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import enum
|
|
9
|
+
from typing import Any, AsyncIterator
|
|
10
|
+
|
|
11
|
+
from axion.api.anthropic import AnthropicClient, AuthCredentials
|
|
12
|
+
from axion.api.error import ApiError
|
|
13
|
+
from axion.api.ollama import OllamaClient, is_ollama_model
|
|
14
|
+
from axion.api.openai_compat import OpenAiCompatClient, OpenAiCompatConfig
|
|
15
|
+
from axion.api.types import MessageRequest, MessageResponse, StreamEvent
|
|
16
|
+
|
|
17
|
+
# Model alias resolution
|
|
18
|
+
MODEL_ALIASES: dict[str, str] = {
|
|
19
|
+
# Anthropic Claude
|
|
20
|
+
"opus": "claude-opus-4-6",
|
|
21
|
+
"sonnet": "claude-sonnet-4-6",
|
|
22
|
+
"haiku": "claude-haiku-4-5",
|
|
23
|
+
"opus[1m]": "claude-opus-4-6",
|
|
24
|
+
"sonnet[1m]": "claude-sonnet-4-6",
|
|
25
|
+
"haiku[1m]": "claude-haiku-4-5",
|
|
26
|
+
|
|
27
|
+
# OpenAI — GPT-4 series
|
|
28
|
+
"gpt4": "gpt-4o",
|
|
29
|
+
"gpt4o": "gpt-4o",
|
|
30
|
+
"gpt-4": "gpt-4o",
|
|
31
|
+
"4o": "gpt-4o",
|
|
32
|
+
"gpt-4o-mini": "gpt-4o-mini",
|
|
33
|
+
"4o-mini": "gpt-4o-mini",
|
|
34
|
+
"gpt-4.1": "gpt-4.1",
|
|
35
|
+
"gpt-4.1-mini": "gpt-4.1-mini",
|
|
36
|
+
"gpt-4.1-nano": "gpt-4.1-nano",
|
|
37
|
+
|
|
38
|
+
# OpenAI — GPT-5 series
|
|
39
|
+
"gpt5": "gpt-5",
|
|
40
|
+
"gpt-5": "gpt-5",
|
|
41
|
+
"5": "gpt-5",
|
|
42
|
+
"gpt-5-mini": "gpt-5-mini",
|
|
43
|
+
"5-mini": "gpt-5-mini",
|
|
44
|
+
"gpt-5-nano": "gpt-5-nano",
|
|
45
|
+
"5-nano": "gpt-5-nano",
|
|
46
|
+
"gpt-5-pro": "gpt-5-pro",
|
|
47
|
+
"5-pro": "gpt-5-pro",
|
|
48
|
+
"gpt-5.4": "gpt-5.4",
|
|
49
|
+
"gpt-5.4-mini": "gpt-5.4-mini",
|
|
50
|
+
"gpt-5.4-nano": "gpt-5.4-nano",
|
|
51
|
+
"gpt-5.4-pro": "gpt-5.4-pro",
|
|
52
|
+
|
|
53
|
+
# OpenAI — Codex (real Codex models via the /v1/responses endpoint)
|
|
54
|
+
"codex": "gpt-5-codex",
|
|
55
|
+
"codex-mini": "gpt-5-codex-mini",
|
|
56
|
+
"gpt-5-codex": "gpt-5-codex",
|
|
57
|
+
"gpt-5-codex-mini": "gpt-5-codex-mini",
|
|
58
|
+
|
|
59
|
+
# OpenAI — o-series (reasoning)
|
|
60
|
+
"o1": "o1",
|
|
61
|
+
"o1-pro": "o1-pro",
|
|
62
|
+
"o3": "o3",
|
|
63
|
+
"o3-mini": "o3-mini",
|
|
64
|
+
"o3-pro": "o3-pro",
|
|
65
|
+
"o4-mini": "o4-mini",
|
|
66
|
+
|
|
67
|
+
# xAI
|
|
68
|
+
"grok": "grok-2",
|
|
69
|
+
"grok2": "grok-2",
|
|
70
|
+
"grok-3": "grok-3",
|
|
71
|
+
|
|
72
|
+
# Ollama / local
|
|
73
|
+
"local": "llama3.1",
|
|
74
|
+
"llama": "llama3.1",
|
|
75
|
+
"llama4": "llama4-scout",
|
|
76
|
+
"mistral": "mistral",
|
|
77
|
+
"codellama": "codellama",
|
|
78
|
+
"deepseek": "deepseek-coder-v2",
|
|
79
|
+
"phi": "phi3",
|
|
80
|
+
"gemma": "gemma2",
|
|
81
|
+
"qwen": "qwen2.5-coder",
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class ProviderKind(enum.Enum):
|
|
86
|
+
ANTHROPIC = "anthropic"
|
|
87
|
+
OPENAI = "openai"
|
|
88
|
+
OPENAI_CODEX = "openai_codex" # /v1/responses endpoint
|
|
89
|
+
XAI = "xai"
|
|
90
|
+
OLLAMA = "ollama"
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def _is_codex_model(resolved: str) -> bool:
|
|
94
|
+
"""Codex models require the /v1/responses endpoint."""
|
|
95
|
+
name = resolved.lower()
|
|
96
|
+
return "codex" in name
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def resolve_model_alias(model: str | None) -> str:
|
|
100
|
+
"""Resolve short model aliases to full model IDs.
|
|
101
|
+
|
|
102
|
+
Handles Claude Code format like "opus[1m]", "sonnet[1m]".
|
|
103
|
+
Returns default model if None.
|
|
104
|
+
"""
|
|
105
|
+
if not model:
|
|
106
|
+
return "claude-sonnet-4-6"
|
|
107
|
+
lower = model.lower().strip()
|
|
108
|
+
|
|
109
|
+
# Direct match
|
|
110
|
+
if lower in MODEL_ALIASES:
|
|
111
|
+
return MODEL_ALIASES[lower]
|
|
112
|
+
|
|
113
|
+
# Strip [context] suffix (e.g. "opus[1m]" -> "opus")
|
|
114
|
+
import re
|
|
115
|
+
stripped = re.sub(r"\[.*?\]$", "", lower).strip()
|
|
116
|
+
if stripped in MODEL_ALIASES:
|
|
117
|
+
return MODEL_ALIASES[stripped]
|
|
118
|
+
|
|
119
|
+
return model
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def detect_provider_kind(model: str) -> ProviderKind:
|
|
123
|
+
"""Detect the provider from a model name."""
|
|
124
|
+
resolved = resolve_model_alias(model)
|
|
125
|
+
if resolved.startswith("claude-"):
|
|
126
|
+
return ProviderKind.ANTHROPIC
|
|
127
|
+
if resolved.startswith("grok-"):
|
|
128
|
+
return ProviderKind.XAI
|
|
129
|
+
# Codex models route to the Responses API, not Chat Completions
|
|
130
|
+
if _is_codex_model(resolved):
|
|
131
|
+
return ProviderKind.OPENAI_CODEX
|
|
132
|
+
if any(resolved.startswith(p) for p in ("gpt-", "o1", "o3", "o4", "codex", "gpt-5")):
|
|
133
|
+
return ProviderKind.OPENAI
|
|
134
|
+
if is_ollama_model(resolved):
|
|
135
|
+
return ProviderKind.OLLAMA
|
|
136
|
+
# Default to Anthropic
|
|
137
|
+
return ProviderKind.ANTHROPIC
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
# Max tokens per model
|
|
141
|
+
MAX_TOKENS_FOR_MODEL: dict[str, int] = {
|
|
142
|
+
"claude-opus-4-6": 32_000,
|
|
143
|
+
"claude-sonnet-4-6": 64_000,
|
|
144
|
+
"claude-haiku-4-5": 64_000,
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
DEFAULT_MAX_TOKENS = 16_000
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def max_tokens_for_model(model: str) -> int:
|
|
151
|
+
"""Get the max output tokens for a model."""
|
|
152
|
+
resolved = resolve_model_alias(model)
|
|
153
|
+
return MAX_TOKENS_FOR_MODEL.get(resolved, DEFAULT_MAX_TOKENS)
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
class ProviderClient:
|
|
157
|
+
"""Unified provider client that dispatches to the correct backend.
|
|
158
|
+
|
|
159
|
+
Maps to: rust/crates/api/src/client.rs::ProviderClient
|
|
160
|
+
"""
|
|
161
|
+
|
|
162
|
+
def __init__(
|
|
163
|
+
self,
|
|
164
|
+
kind: ProviderKind,
|
|
165
|
+
anthropic: AnthropicClient | None = None,
|
|
166
|
+
openai_compat: OpenAiCompatClient | None = None,
|
|
167
|
+
openai_responses: Any = None, # OpenAiResponsesClient — Any to avoid import cycle
|
|
168
|
+
ollama: OllamaClient | None = None,
|
|
169
|
+
) -> None:
|
|
170
|
+
self._kind = kind
|
|
171
|
+
self._anthropic = anthropic
|
|
172
|
+
self._openai_compat = openai_compat
|
|
173
|
+
self._openai_responses = openai_responses
|
|
174
|
+
self._ollama = ollama
|
|
175
|
+
|
|
176
|
+
@classmethod
|
|
177
|
+
def from_model(
|
|
178
|
+
cls,
|
|
179
|
+
model: str,
|
|
180
|
+
auth: AuthCredentials | None = None,
|
|
181
|
+
) -> ProviderClient:
|
|
182
|
+
"""Create a provider client based on the model name."""
|
|
183
|
+
resolved = resolve_model_alias(model)
|
|
184
|
+
kind = detect_provider_kind(resolved)
|
|
185
|
+
|
|
186
|
+
if kind == ProviderKind.ANTHROPIC:
|
|
187
|
+
if auth is not None:
|
|
188
|
+
client = AnthropicClient(auth=auth)
|
|
189
|
+
else:
|
|
190
|
+
client = AnthropicClient.from_env()
|
|
191
|
+
return cls(kind=kind, anthropic=client)
|
|
192
|
+
|
|
193
|
+
if kind == ProviderKind.XAI:
|
|
194
|
+
client = OpenAiCompatClient.from_env(OpenAiCompatConfig.xai())
|
|
195
|
+
return cls(kind=kind, openai_compat=client)
|
|
196
|
+
|
|
197
|
+
if kind == ProviderKind.OPENAI:
|
|
198
|
+
client = OpenAiCompatClient.from_env(OpenAiCompatConfig.openai())
|
|
199
|
+
return cls(kind=kind, openai_compat=client)
|
|
200
|
+
|
|
201
|
+
if kind == ProviderKind.OPENAI_CODEX:
|
|
202
|
+
from axion.api.openai_responses import OpenAiResponsesClient
|
|
203
|
+
responses_client = OpenAiResponsesClient.from_env()
|
|
204
|
+
return cls(kind=kind, openai_responses=responses_client)
|
|
205
|
+
|
|
206
|
+
if kind == ProviderKind.OLLAMA:
|
|
207
|
+
ollama_client = OllamaClient.from_env(model=resolved)
|
|
208
|
+
return cls(kind=kind, ollama=ollama_client)
|
|
209
|
+
|
|
210
|
+
raise ApiError(f"Provider {kind.value} not yet implemented")
|
|
211
|
+
|
|
212
|
+
@property
|
|
213
|
+
def provider_kind(self) -> ProviderKind:
|
|
214
|
+
return self._kind
|
|
215
|
+
|
|
216
|
+
async def send_message(self, request: MessageRequest) -> MessageResponse:
|
|
217
|
+
"""Send a non-streaming message request."""
|
|
218
|
+
if self._anthropic is not None:
|
|
219
|
+
return await self._anthropic.send_message(request)
|
|
220
|
+
if self._openai_responses is not None:
|
|
221
|
+
return await self._openai_responses.send_message(request)
|
|
222
|
+
if self._openai_compat is not None:
|
|
223
|
+
return await self._openai_compat.send_message(request)
|
|
224
|
+
if self._ollama is not None:
|
|
225
|
+
return await self._ollama.send_message(request)
|
|
226
|
+
raise ApiError("No provider client configured")
|
|
227
|
+
|
|
228
|
+
async def stream_message(
|
|
229
|
+
self, request: MessageRequest
|
|
230
|
+
) -> AsyncIterator[StreamEvent]:
|
|
231
|
+
"""Send a streaming request and yield events."""
|
|
232
|
+
if self._anthropic is not None:
|
|
233
|
+
async for event in self._anthropic.stream_message(request):
|
|
234
|
+
yield event
|
|
235
|
+
return
|
|
236
|
+
if self._openai_responses is not None:
|
|
237
|
+
async for event in self._openai_responses.stream_message(request):
|
|
238
|
+
yield event
|
|
239
|
+
return
|
|
240
|
+
if self._openai_compat is not None:
|
|
241
|
+
async for event in self._openai_compat.stream_message(request):
|
|
242
|
+
yield event
|
|
243
|
+
return
|
|
244
|
+
if self._ollama is not None:
|
|
245
|
+
async for event in self._ollama.stream_message(request):
|
|
246
|
+
yield event
|
|
247
|
+
return
|
|
248
|
+
raise ApiError("No provider client configured")
|
|
249
|
+
|
|
250
|
+
async def close(self) -> None:
|
|
251
|
+
"""Close underlying HTTP clients."""
|
|
252
|
+
if self._anthropic is not None:
|
|
253
|
+
await self._anthropic.close()
|
|
254
|
+
if self._openai_responses is not None:
|
|
255
|
+
await self._openai_responses.close()
|
|
256
|
+
if self._openai_compat is not None:
|
|
257
|
+
await self._openai_compat.close()
|
|
258
|
+
if self._ollama is not None:
|
|
259
|
+
await self._ollama.close()
|
axion/api/error.py
ADDED
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
"""API error types.
|
|
2
|
+
|
|
3
|
+
Maps to: rust/crates/api/src/error.rs
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
CONTEXT_WINDOW_ERROR_MARKERS = [
|
|
9
|
+
"maximum context length",
|
|
10
|
+
"context window",
|
|
11
|
+
"context length",
|
|
12
|
+
"too many tokens",
|
|
13
|
+
"prompt is too long",
|
|
14
|
+
"input is too long",
|
|
15
|
+
"request is too large",
|
|
16
|
+
]
|
|
17
|
+
|
|
18
|
+
GENERIC_FATAL_WRAPPER_MARKERS = [
|
|
19
|
+
"something went wrong while processing your request",
|
|
20
|
+
"please try again, or use /new to start a fresh session",
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class ApiError(Exception):
|
|
25
|
+
"""Base class for all API errors."""
|
|
26
|
+
|
|
27
|
+
def is_retryable(self) -> bool:
|
|
28
|
+
return False
|
|
29
|
+
|
|
30
|
+
def request_id(self) -> str | None:
|
|
31
|
+
return None
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class MissingCredentialsError(ApiError):
|
|
35
|
+
"""No API key or OAuth token available."""
|
|
36
|
+
|
|
37
|
+
def __init__(self, provider: str, env_vars: list[str]) -> None:
|
|
38
|
+
self.provider = provider
|
|
39
|
+
self.env_vars = env_vars
|
|
40
|
+
super().__init__(
|
|
41
|
+
f"Missing credentials for {provider}. "
|
|
42
|
+
f"Set one of: {', '.join(env_vars)}"
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class ContextWindowExceededError(ApiError):
|
|
47
|
+
"""Request exceeds the model's context window."""
|
|
48
|
+
|
|
49
|
+
def __init__(
|
|
50
|
+
self,
|
|
51
|
+
model: str,
|
|
52
|
+
estimated_input_tokens: int,
|
|
53
|
+
requested_output_tokens: int,
|
|
54
|
+
estimated_total_tokens: int,
|
|
55
|
+
context_window_tokens: int,
|
|
56
|
+
) -> None:
|
|
57
|
+
self.model = model
|
|
58
|
+
self.estimated_input_tokens = estimated_input_tokens
|
|
59
|
+
self.requested_output_tokens = requested_output_tokens
|
|
60
|
+
self.estimated_total_tokens = estimated_total_tokens
|
|
61
|
+
self.context_window_tokens = context_window_tokens
|
|
62
|
+
super().__init__(
|
|
63
|
+
f"Context window exceeded for {model}: "
|
|
64
|
+
f"{estimated_total_tokens} tokens > {context_window_tokens} limit"
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class ExpiredOAuthTokenError(ApiError):
|
|
69
|
+
"""OAuth token has expired."""
|
|
70
|
+
|
|
71
|
+
def __init__(self) -> None:
|
|
72
|
+
super().__init__("OAuth token has expired")
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class AuthError(ApiError):
|
|
76
|
+
"""Authentication failed."""
|
|
77
|
+
|
|
78
|
+
def __init__(self, message: str) -> None:
|
|
79
|
+
super().__init__(message)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
class HttpError(ApiError):
|
|
83
|
+
"""Low-level HTTP transport error."""
|
|
84
|
+
|
|
85
|
+
def __init__(self, message: str, cause: Exception | None = None) -> None:
|
|
86
|
+
self.cause = cause
|
|
87
|
+
super().__init__(message)
|
|
88
|
+
|
|
89
|
+
def is_retryable(self) -> bool:
|
|
90
|
+
return True
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class ApiResponseError(ApiError):
|
|
94
|
+
"""API returned an error response."""
|
|
95
|
+
|
|
96
|
+
def __init__(
|
|
97
|
+
self,
|
|
98
|
+
status: int,
|
|
99
|
+
error_type: str | None = None,
|
|
100
|
+
message: str | None = None,
|
|
101
|
+
request_id_val: str | None = None,
|
|
102
|
+
body: str = "",
|
|
103
|
+
retryable: bool = False,
|
|
104
|
+
) -> None:
|
|
105
|
+
self.status = status
|
|
106
|
+
self.error_type = error_type
|
|
107
|
+
self._message = message
|
|
108
|
+
self._request_id = request_id_val
|
|
109
|
+
self.body = body
|
|
110
|
+
self.retryable = retryable
|
|
111
|
+
detail = message or body[:200]
|
|
112
|
+
super().__init__(f"API error {status}: {detail}")
|
|
113
|
+
|
|
114
|
+
def is_retryable(self) -> bool:
|
|
115
|
+
return self.retryable
|
|
116
|
+
|
|
117
|
+
def request_id(self) -> str | None:
|
|
118
|
+
return self._request_id
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class RetriesExhaustedError(ApiError):
|
|
122
|
+
"""All retry attempts failed."""
|
|
123
|
+
|
|
124
|
+
def __init__(self, attempts: int, last_error: ApiError) -> None:
|
|
125
|
+
self.attempts = attempts
|
|
126
|
+
self.last_error = last_error
|
|
127
|
+
super().__init__(f"Exhausted {attempts} retries. Last error: {last_error}")
|
|
128
|
+
|
|
129
|
+
def is_retryable(self) -> bool:
|
|
130
|
+
return self.last_error.is_retryable()
|
|
131
|
+
|
|
132
|
+
def request_id(self) -> str | None:
|
|
133
|
+
return self.last_error.request_id()
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
class InvalidSseFrameError(ApiError):
|
|
137
|
+
"""SSE frame could not be parsed."""
|
|
138
|
+
|
|
139
|
+
def __init__(self, reason: str) -> None:
|
|
140
|
+
super().__init__(f"Invalid SSE frame: {reason}")
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
class BackoffOverflowError(ApiError):
|
|
144
|
+
"""Backoff delay calculation overflowed."""
|
|
145
|
+
|
|
146
|
+
def __init__(self, attempt: int, base_delay_ms: int) -> None:
|
|
147
|
+
self.attempt = attempt
|
|
148
|
+
self.base_delay_ms = base_delay_ms
|
|
149
|
+
super().__init__(f"Backoff overflow at attempt {attempt}")
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def looks_like_context_window_error(message: str) -> bool:
|
|
153
|
+
"""Check if an error message indicates a context window exceeded error."""
|
|
154
|
+
lower = message.lower()
|
|
155
|
+
return any(marker in lower for marker in CONTEXT_WINDOW_ERROR_MARKERS)
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def looks_like_generic_fatal_error(message: str) -> bool:
|
|
159
|
+
"""Check if an error message indicates a generic fatal wrapper error."""
|
|
160
|
+
lower = message.lower()
|
|
161
|
+
return any(marker in lower for marker in GENERIC_FATAL_WRAPPER_MARKERS)
|