voxagent 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- voxagent/__init__.py +143 -0
- voxagent/_version.py +5 -0
- voxagent/agent/__init__.py +32 -0
- voxagent/agent/abort.py +178 -0
- voxagent/agent/core.py +902 -0
- voxagent/code/__init__.py +9 -0
- voxagent/mcp/__init__.py +16 -0
- voxagent/mcp/manager.py +188 -0
- voxagent/mcp/tool.py +152 -0
- voxagent/providers/__init__.py +110 -0
- voxagent/providers/anthropic.py +498 -0
- voxagent/providers/augment.py +293 -0
- voxagent/providers/auth.py +116 -0
- voxagent/providers/base.py +268 -0
- voxagent/providers/chatgpt.py +415 -0
- voxagent/providers/claudecode.py +162 -0
- voxagent/providers/cli_base.py +265 -0
- voxagent/providers/codex.py +183 -0
- voxagent/providers/failover.py +90 -0
- voxagent/providers/google.py +532 -0
- voxagent/providers/groq.py +96 -0
- voxagent/providers/ollama.py +425 -0
- voxagent/providers/openai.py +435 -0
- voxagent/providers/registry.py +175 -0
- voxagent/py.typed +1 -0
- voxagent/security/__init__.py +14 -0
- voxagent/security/events.py +75 -0
- voxagent/security/filter.py +169 -0
- voxagent/security/registry.py +87 -0
- voxagent/session/__init__.py +39 -0
- voxagent/session/compaction.py +237 -0
- voxagent/session/lock.py +103 -0
- voxagent/session/model.py +109 -0
- voxagent/session/storage.py +184 -0
- voxagent/streaming/__init__.py +52 -0
- voxagent/streaming/emitter.py +286 -0
- voxagent/streaming/events.py +255 -0
- voxagent/subagent/__init__.py +20 -0
- voxagent/subagent/context.py +124 -0
- voxagent/subagent/definition.py +172 -0
- voxagent/tools/__init__.py +32 -0
- voxagent/tools/context.py +50 -0
- voxagent/tools/decorator.py +175 -0
- voxagent/tools/definition.py +131 -0
- voxagent/tools/executor.py +109 -0
- voxagent/tools/policy.py +89 -0
- voxagent/tools/registry.py +89 -0
- voxagent/types/__init__.py +46 -0
- voxagent/types/messages.py +134 -0
- voxagent/types/run.py +176 -0
- voxagent-0.1.0.dist-info/METADATA +186 -0
- voxagent-0.1.0.dist-info/RECORD +53 -0
- voxagent-0.1.0.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,265 @@
|
|
|
1
|
+
"""Base class for CLI-wrapped LLM providers.
|
|
2
|
+
|
|
3
|
+
This module provides a base class for providers that wrap CLI tools like
|
|
4
|
+
auggie, codex, and claude instead of making direct HTTP API calls.
|
|
5
|
+
|
|
6
|
+
CLI providers spawn subprocesses and communicate via stdin/stdout.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import asyncio
|
|
12
|
+
import json
|
|
13
|
+
import logging
|
|
14
|
+
import shutil
|
|
15
|
+
import subprocess
|
|
16
|
+
from abc import abstractmethod
|
|
17
|
+
from collections.abc import AsyncIterator
|
|
18
|
+
from typing import Any
|
|
19
|
+
|
|
20
|
+
from voxagent.providers.base import (
|
|
21
|
+
AbortSignal,
|
|
22
|
+
BaseProvider,
|
|
23
|
+
ErrorChunk,
|
|
24
|
+
MessageEndChunk,
|
|
25
|
+
StreamChunk,
|
|
26
|
+
TextDeltaChunk,
|
|
27
|
+
)
|
|
28
|
+
from voxagent.types import Message
|
|
29
|
+
|
|
30
|
+
logger = logging.getLogger(__name__)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class CLINotFoundError(Exception):
|
|
34
|
+
"""Raised when a required CLI tool is not found."""
|
|
35
|
+
|
|
36
|
+
pass
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class CLIProvider(BaseProvider):
|
|
40
|
+
"""Base class for CLI-wrapped providers.
|
|
41
|
+
|
|
42
|
+
These providers spawn CLI tools as subprocesses rather than making HTTP calls.
|
|
43
|
+
They require the CLI tools to be installed on the system.
|
|
44
|
+
|
|
45
|
+
Subclasses must implement:
|
|
46
|
+
- cli_name: Name of the CLI executable
|
|
47
|
+
- _build_cli_args: Build command line arguments
|
|
48
|
+
- _parse_output: Parse CLI output to extract response
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
# Subclasses should set this
|
|
52
|
+
CLI_NAME: str = ""
|
|
53
|
+
ENV_KEY: str = ""
|
|
54
|
+
|
|
55
|
+
def __init__(
|
|
56
|
+
self,
|
|
57
|
+
model: str | None = None,
|
|
58
|
+
api_key: str | None = None,
|
|
59
|
+
base_url: str | None = None,
|
|
60
|
+
**kwargs: Any,
|
|
61
|
+
) -> None:
|
|
62
|
+
"""Initialize CLI provider.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
model: Model name to use.
|
|
66
|
+
api_key: Optional API key (passed to CLI if supported).
|
|
67
|
+
base_url: Optional base URL (unused for most CLIs).
|
|
68
|
+
**kwargs: Additional arguments.
|
|
69
|
+
"""
|
|
70
|
+
super().__init__(api_key=api_key, base_url=base_url, **kwargs)
|
|
71
|
+
self._model = model
|
|
72
|
+
self._cli_path: str | None = None
|
|
73
|
+
|
|
74
|
+
def _get_cli_path(self) -> str:
|
|
75
|
+
"""Get the path to the CLI executable.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Path to the CLI executable.
|
|
79
|
+
|
|
80
|
+
Raises:
|
|
81
|
+
CLINotFoundError: If the CLI is not found.
|
|
82
|
+
"""
|
|
83
|
+
if self._cli_path is None:
|
|
84
|
+
self._cli_path = shutil.which(self.CLI_NAME)
|
|
85
|
+
if not self._cli_path:
|
|
86
|
+
raise CLINotFoundError(
|
|
87
|
+
f"CLI '{self.CLI_NAME}' not found in PATH. "
|
|
88
|
+
f"Please install it first."
|
|
89
|
+
)
|
|
90
|
+
return self._cli_path
|
|
91
|
+
|
|
92
|
+
@property
|
|
93
|
+
def supports_streaming(self) -> bool:
|
|
94
|
+
"""CLI providers typically don't support true streaming."""
|
|
95
|
+
return False
|
|
96
|
+
|
|
97
|
+
@abstractmethod
|
|
98
|
+
def _build_cli_args(
|
|
99
|
+
self,
|
|
100
|
+
prompt: str,
|
|
101
|
+
system: str | None = None,
|
|
102
|
+
) -> list[str]:
|
|
103
|
+
"""Build CLI command arguments.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
prompt: The user prompt to send.
|
|
107
|
+
system: Optional system prompt.
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
List of command line arguments.
|
|
111
|
+
"""
|
|
112
|
+
...
|
|
113
|
+
|
|
114
|
+
@abstractmethod
|
|
115
|
+
def _parse_output(self, stdout: str, stderr: str) -> str:
|
|
116
|
+
"""Parse CLI output to extract response text.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
stdout: Standard output from CLI.
|
|
120
|
+
stderr: Standard error from CLI.
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
Extracted response text.
|
|
124
|
+
"""
|
|
125
|
+
...
|
|
126
|
+
|
|
127
|
+
def _messages_to_prompt(self, messages: list[Message]) -> str:
|
|
128
|
+
"""Convert message list to a single prompt string.
|
|
129
|
+
|
|
130
|
+
CLI tools typically don't support multi-turn conversations natively,
|
|
131
|
+
so we concatenate messages into a single prompt.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
messages: List of conversation messages.
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
Combined prompt string.
|
|
138
|
+
"""
|
|
139
|
+
parts: list[str] = []
|
|
140
|
+
for msg in messages:
|
|
141
|
+
if msg.role == "user" and isinstance(msg.content, str):
|
|
142
|
+
parts.append(msg.content)
|
|
143
|
+
elif msg.role == "assistant" and isinstance(msg.content, str):
|
|
144
|
+
parts.append(f"[Previous response: {msg.content}]")
|
|
145
|
+
return "\n\n".join(parts)
|
|
146
|
+
|
|
147
|
+
async def _run_cli(
|
|
148
|
+
self,
|
|
149
|
+
prompt: str,
|
|
150
|
+
system: str | None = None,
|
|
151
|
+
) -> str:
|
|
152
|
+
"""Run CLI command and return output.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
prompt: User prompt.
|
|
156
|
+
system: Optional system prompt.
|
|
157
|
+
|
|
158
|
+
Returns:
|
|
159
|
+
Parsed response text.
|
|
160
|
+
|
|
161
|
+
Raises:
|
|
162
|
+
Exception: If CLI execution fails.
|
|
163
|
+
"""
|
|
164
|
+
cli_path = self._get_cli_path()
|
|
165
|
+
args = [cli_path] + self._build_cli_args(prompt, system)
|
|
166
|
+
|
|
167
|
+
logger.debug("Running CLI: %s", " ".join(args))
|
|
168
|
+
|
|
169
|
+
proc = await asyncio.create_subprocess_exec(
|
|
170
|
+
*args,
|
|
171
|
+
stdout=asyncio.subprocess.PIPE,
|
|
172
|
+
stderr=asyncio.subprocess.PIPE,
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
stdout_bytes, stderr_bytes = await proc.communicate()
|
|
176
|
+
stdout = stdout_bytes.decode("utf-8", errors="replace")
|
|
177
|
+
stderr = stderr_bytes.decode("utf-8", errors="replace")
|
|
178
|
+
|
|
179
|
+
if proc.returncode != 0:
|
|
180
|
+
logger.warning("CLI exited with code %d: %s", proc.returncode, stderr)
|
|
181
|
+
|
|
182
|
+
return self._parse_output(stdout, stderr)
|
|
183
|
+
|
|
184
|
+
async def stream(
|
|
185
|
+
self,
|
|
186
|
+
messages: list[Message],
|
|
187
|
+
system: str | None = None,
|
|
188
|
+
tools: list[Any] | None = None,
|
|
189
|
+
abort_signal: AbortSignal | None = None,
|
|
190
|
+
) -> AsyncIterator[StreamChunk]:
|
|
191
|
+
"""Stream a response from the CLI.
|
|
192
|
+
|
|
193
|
+
CLI providers don't truly stream - we run the CLI and yield the result.
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
messages: Conversation messages.
|
|
197
|
+
system: Optional system prompt.
|
|
198
|
+
tools: Tool definitions (not supported by most CLIs).
|
|
199
|
+
abort_signal: Optional abort signal.
|
|
200
|
+
|
|
201
|
+
Yields:
|
|
202
|
+
StreamChunk objects.
|
|
203
|
+
"""
|
|
204
|
+
if tools:
|
|
205
|
+
logger.warning("Tools not supported by CLI provider %s", self.name)
|
|
206
|
+
|
|
207
|
+
try:
|
|
208
|
+
prompt = self._messages_to_prompt(messages)
|
|
209
|
+
response = await self._run_cli(prompt, system)
|
|
210
|
+
if response:
|
|
211
|
+
yield TextDeltaChunk(delta=response)
|
|
212
|
+
except CLINotFoundError as e:
|
|
213
|
+
yield ErrorChunk(error=str(e))
|
|
214
|
+
except Exception as e:
|
|
215
|
+
yield ErrorChunk(error=f"CLI error: {e}")
|
|
216
|
+
|
|
217
|
+
yield MessageEndChunk()
|
|
218
|
+
|
|
219
|
+
async def complete(
|
|
220
|
+
self,
|
|
221
|
+
messages: list[Message],
|
|
222
|
+
system: str | None = None,
|
|
223
|
+
tools: list[Any] | None = None,
|
|
224
|
+
) -> Message:
|
|
225
|
+
"""Get a complete response from the CLI.
|
|
226
|
+
|
|
227
|
+
Args:
|
|
228
|
+
messages: Conversation messages.
|
|
229
|
+
system: Optional system prompt.
|
|
230
|
+
tools: Tool definitions (not supported by most CLIs).
|
|
231
|
+
|
|
232
|
+
Returns:
|
|
233
|
+
The assistant's response message.
|
|
234
|
+
"""
|
|
235
|
+
if tools:
|
|
236
|
+
logger.warning("Tools not supported by CLI provider %s", self.name)
|
|
237
|
+
|
|
238
|
+
prompt = self._messages_to_prompt(messages)
|
|
239
|
+
response = await self._run_cli(prompt, system)
|
|
240
|
+
return Message(role="assistant", content=response)
|
|
241
|
+
|
|
242
|
+
def count_tokens(
|
|
243
|
+
self,
|
|
244
|
+
messages: list[Message],
|
|
245
|
+
system: str | None = None,
|
|
246
|
+
) -> int:
|
|
247
|
+
"""Estimate token count (rough approximation).
|
|
248
|
+
|
|
249
|
+
Args:
|
|
250
|
+
messages: Conversation messages.
|
|
251
|
+
system: Optional system prompt.
|
|
252
|
+
|
|
253
|
+
Returns:
|
|
254
|
+
Approximate token count.
|
|
255
|
+
"""
|
|
256
|
+
text = system or ""
|
|
257
|
+
for msg in messages:
|
|
258
|
+
if isinstance(msg.content, str):
|
|
259
|
+
text += msg.content
|
|
260
|
+
# Rough estimate: ~4 chars per token
|
|
261
|
+
return len(text) // 4
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
__all__ = ["CLIProvider", "CLINotFoundError"]
|
|
265
|
+
|
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
"""OpenAI Codex CLI provider.
|
|
2
|
+
|
|
3
|
+
This provider wraps the OpenAI Codex CLI (codex command).
|
|
4
|
+
It requires:
|
|
5
|
+
1. The codex CLI to be installed: npm install -g @openai/codex
|
|
6
|
+
2. Authentication via: codex login
|
|
7
|
+
|
|
8
|
+
Models available:
|
|
9
|
+
- o3: OpenAI o3
|
|
10
|
+
- o4-mini: OpenAI o4-mini (default)
|
|
11
|
+
- gpt-4.1: GPT-4.1
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
import json
|
|
17
|
+
import logging
|
|
18
|
+
from collections.abc import AsyncIterator
|
|
19
|
+
from typing import Any
|
|
20
|
+
|
|
21
|
+
from voxagent.providers.cli_base import CLINotFoundError, CLIProvider
|
|
22
|
+
from voxagent.providers.base import (
|
|
23
|
+
AbortSignal,
|
|
24
|
+
ErrorChunk,
|
|
25
|
+
MessageEndChunk,
|
|
26
|
+
StreamChunk,
|
|
27
|
+
TextDeltaChunk,
|
|
28
|
+
)
|
|
29
|
+
from voxagent.types import Message
|
|
30
|
+
|
|
31
|
+
logger = logging.getLogger(__name__)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class CodexProvider(CLIProvider):
|
|
35
|
+
"""Provider for OpenAI Codex CLI.
|
|
36
|
+
|
|
37
|
+
Uses the codex CLI in exec mode with JSON output for non-interactive use.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
CLI_NAME = "codex"
|
|
41
|
+
ENV_KEY = "OPENAI_API_KEY"
|
|
42
|
+
|
|
43
|
+
# Models that work with Codex using ChatGPT Plus account
|
|
44
|
+
# Note: Many models (o3, o4-mini, gpt-4.1) require API key, not ChatGPT Plus
|
|
45
|
+
# "default" means don't specify a model and use the CLI's default
|
|
46
|
+
SUPPORTED_MODELS = [
|
|
47
|
+
"default",
|
|
48
|
+
]
|
|
49
|
+
|
|
50
|
+
def __init__(
|
|
51
|
+
self,
|
|
52
|
+
model: str = "default",
|
|
53
|
+
api_key: str | None = None,
|
|
54
|
+
base_url: str | None = None,
|
|
55
|
+
**kwargs: Any,
|
|
56
|
+
) -> None:
|
|
57
|
+
"""Initialize Codex provider.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
model: Model name (o3, o4-mini, gpt-4.1).
|
|
61
|
+
api_key: Optional OpenAI API key.
|
|
62
|
+
base_url: Optional base URL override.
|
|
63
|
+
**kwargs: Additional arguments.
|
|
64
|
+
"""
|
|
65
|
+
super().__init__(model=model, api_key=api_key, base_url=base_url, **kwargs)
|
|
66
|
+
|
|
67
|
+
@property
|
|
68
|
+
def name(self) -> str:
|
|
69
|
+
"""Get the provider name."""
|
|
70
|
+
return "codex"
|
|
71
|
+
|
|
72
|
+
@property
|
|
73
|
+
def models(self) -> list[str]:
|
|
74
|
+
"""Get supported models."""
|
|
75
|
+
return self.SUPPORTED_MODELS
|
|
76
|
+
|
|
77
|
+
@property
|
|
78
|
+
def supports_tools(self) -> bool:
|
|
79
|
+
"""Codex has tool support but we don't expose it."""
|
|
80
|
+
return False
|
|
81
|
+
|
|
82
|
+
@property
|
|
83
|
+
def context_limit(self) -> int:
|
|
84
|
+
"""Approximate context limit."""
|
|
85
|
+
return 128000
|
|
86
|
+
|
|
87
|
+
def _build_cli_args(
|
|
88
|
+
self,
|
|
89
|
+
prompt: str,
|
|
90
|
+
system: str | None = None,
|
|
91
|
+
) -> list[str]:
|
|
92
|
+
"""Build codex CLI arguments.
|
|
93
|
+
|
|
94
|
+
Uses exec mode for non-interactive execution with JSON output.
|
|
95
|
+
"""
|
|
96
|
+
args = ["exec", "--json"]
|
|
97
|
+
|
|
98
|
+
# Only pass --model if not using "default"
|
|
99
|
+
if self._model and self._model != "default":
|
|
100
|
+
args.extend(["--model", self._model])
|
|
101
|
+
|
|
102
|
+
# Add the prompt
|
|
103
|
+
args.append(prompt)
|
|
104
|
+
|
|
105
|
+
return args
|
|
106
|
+
|
|
107
|
+
def _parse_output(self, stdout: str, stderr: str) -> str:
|
|
108
|
+
"""Parse codex CLI JSON output.
|
|
109
|
+
|
|
110
|
+
The --json flag outputs JSONL events. We look for agent_message items.
|
|
111
|
+
Format: {"type":"item.completed","item":{"type":"agent_message","text":"..."}}
|
|
112
|
+
"""
|
|
113
|
+
# Parse JSONL output and extract text from agent_message items
|
|
114
|
+
text_parts: list[str] = []
|
|
115
|
+
|
|
116
|
+
for line in stdout.strip().split("\n"):
|
|
117
|
+
if not line.strip():
|
|
118
|
+
continue
|
|
119
|
+
try:
|
|
120
|
+
event = json.loads(line)
|
|
121
|
+
# Look for item.completed events with agent_message type
|
|
122
|
+
if event.get("type") == "item.completed":
|
|
123
|
+
item = event.get("item", {})
|
|
124
|
+
if item.get("type") == "agent_message":
|
|
125
|
+
text = item.get("text", "")
|
|
126
|
+
if text:
|
|
127
|
+
text_parts.append(text)
|
|
128
|
+
except json.JSONDecodeError:
|
|
129
|
+
# If not JSON, treat as plain text
|
|
130
|
+
text_parts.append(line)
|
|
131
|
+
|
|
132
|
+
return "\n".join(text_parts) if text_parts else stdout.strip()
|
|
133
|
+
|
|
134
|
+
async def stream(
|
|
135
|
+
self,
|
|
136
|
+
messages: list[Message],
|
|
137
|
+
system: str | None = None,
|
|
138
|
+
tools: list[Any] | None = None,
|
|
139
|
+
abort_signal: AbortSignal | None = None,
|
|
140
|
+
) -> AsyncIterator[StreamChunk]:
|
|
141
|
+
"""Stream a response from Codex CLI.
|
|
142
|
+
|
|
143
|
+
Note: The codex CLI has its own tool execution capabilities.
|
|
144
|
+
Tools passed from voxDomus are not used.
|
|
145
|
+
"""
|
|
146
|
+
if tools:
|
|
147
|
+
logger.debug(
|
|
148
|
+
"Codex CLI has its own tools - ignoring %d passed tools",
|
|
149
|
+
len(tools),
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
try:
|
|
153
|
+
prompt = self._messages_to_prompt(messages)
|
|
154
|
+
response = await self._run_cli(prompt, system)
|
|
155
|
+
if response:
|
|
156
|
+
yield TextDeltaChunk(delta=response)
|
|
157
|
+
except CLINotFoundError as e:
|
|
158
|
+
yield ErrorChunk(error=str(e))
|
|
159
|
+
except Exception as e:
|
|
160
|
+
yield ErrorChunk(error=f"Codex CLI error: {e}")
|
|
161
|
+
|
|
162
|
+
yield MessageEndChunk()
|
|
163
|
+
|
|
164
|
+
async def complete(
|
|
165
|
+
self,
|
|
166
|
+
messages: list[Message],
|
|
167
|
+
system: str | None = None,
|
|
168
|
+
tools: list[Any] | None = None,
|
|
169
|
+
) -> Message:
|
|
170
|
+
"""Get a complete response from Codex CLI."""
|
|
171
|
+
text_parts: list[str] = []
|
|
172
|
+
|
|
173
|
+
async for chunk in self.stream(messages, system, tools):
|
|
174
|
+
if isinstance(chunk, TextDeltaChunk):
|
|
175
|
+
text_parts.append(chunk.delta)
|
|
176
|
+
elif isinstance(chunk, ErrorChunk):
|
|
177
|
+
raise Exception(chunk.error)
|
|
178
|
+
|
|
179
|
+
return Message(role="assistant", content="".join(text_parts))
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
__all__ = ["CodexProvider"]
|
|
183
|
+
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
"""Failover logic for provider profiles."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Awaitable, Callable, TypeVar
|
|
6
|
+
|
|
7
|
+
from voxagent.providers.auth import (
|
|
8
|
+
AuthProfile,
|
|
9
|
+
AuthProfileManager,
|
|
10
|
+
FailoverError,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
T = TypeVar("T")
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class NoProfilesAvailableError(Exception):
|
|
17
|
+
"""No profiles available for the requested provider."""
|
|
18
|
+
|
|
19
|
+
def __init__(self, provider: str | None = None) -> None:
|
|
20
|
+
self.provider = provider
|
|
21
|
+
msg = (
|
|
22
|
+
f"No profiles available for provider: {provider}"
|
|
23
|
+
if provider
|
|
24
|
+
else "No profiles available"
|
|
25
|
+
)
|
|
26
|
+
super().__init__(msg)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class FailoverExhaustedError(Exception):
|
|
30
|
+
"""All available profiles failed."""
|
|
31
|
+
|
|
32
|
+
def __init__(self, last_error: FailoverError) -> None:
|
|
33
|
+
self.last_error = last_error
|
|
34
|
+
super().__init__(f"All profiles failed. Last error: {last_error}")
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
async def run_with_failover(
|
|
38
|
+
manager: AuthProfileManager,
|
|
39
|
+
provider_name: str,
|
|
40
|
+
operation: Callable[[AuthProfile], Awaitable[T]],
|
|
41
|
+
max_retries: int | None = None,
|
|
42
|
+
) -> T:
|
|
43
|
+
"""
|
|
44
|
+
Run an operation with automatic failover across available profiles.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
manager: AuthProfileManager with configured profiles
|
|
48
|
+
provider_name: Provider to filter profiles by (e.g., "openai")
|
|
49
|
+
operation: Async callable that takes an AuthProfile and returns a result
|
|
50
|
+
max_retries: Maximum number of profiles to try (default: all available)
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
Result from successful operation
|
|
54
|
+
|
|
55
|
+
Raises:
|
|
56
|
+
NoProfilesAvailableError: If no profiles are available
|
|
57
|
+
FailoverExhaustedError: If all profiles failed with FailoverError
|
|
58
|
+
Exception: If operation raises a non-FailoverError exception
|
|
59
|
+
"""
|
|
60
|
+
available = manager.get_available_profiles(provider=provider_name)
|
|
61
|
+
|
|
62
|
+
if not available:
|
|
63
|
+
raise NoProfilesAvailableError(provider_name)
|
|
64
|
+
|
|
65
|
+
# Limit retries if specified
|
|
66
|
+
profiles_to_try = available[:max_retries] if max_retries is not None else available
|
|
67
|
+
|
|
68
|
+
last_error: FailoverError | None = None
|
|
69
|
+
|
|
70
|
+
for profile in profiles_to_try:
|
|
71
|
+
try:
|
|
72
|
+
result = await operation(profile)
|
|
73
|
+
# Success - record it and return
|
|
74
|
+
manager.record_success(profile)
|
|
75
|
+
return result
|
|
76
|
+
|
|
77
|
+
except FailoverError as e:
|
|
78
|
+
# Handle failover based on error type
|
|
79
|
+
manager.handle_failover(profile, e)
|
|
80
|
+
last_error = e
|
|
81
|
+
# Continue to next profile
|
|
82
|
+
continue
|
|
83
|
+
|
|
84
|
+
# All profiles exhausted
|
|
85
|
+
if last_error is not None:
|
|
86
|
+
raise FailoverExhaustedError(last_error)
|
|
87
|
+
else:
|
|
88
|
+
# This shouldn't happen, but handle it gracefully
|
|
89
|
+
raise NoProfilesAvailableError(provider_name)
|
|
90
|
+
|