voxagent 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- voxagent/__init__.py +143 -0
- voxagent/_version.py +5 -0
- voxagent/agent/__init__.py +32 -0
- voxagent/agent/abort.py +178 -0
- voxagent/agent/core.py +902 -0
- voxagent/code/__init__.py +9 -0
- voxagent/mcp/__init__.py +16 -0
- voxagent/mcp/manager.py +188 -0
- voxagent/mcp/tool.py +152 -0
- voxagent/providers/__init__.py +110 -0
- voxagent/providers/anthropic.py +498 -0
- voxagent/providers/augment.py +293 -0
- voxagent/providers/auth.py +116 -0
- voxagent/providers/base.py +268 -0
- voxagent/providers/chatgpt.py +415 -0
- voxagent/providers/claudecode.py +162 -0
- voxagent/providers/cli_base.py +265 -0
- voxagent/providers/codex.py +183 -0
- voxagent/providers/failover.py +90 -0
- voxagent/providers/google.py +532 -0
- voxagent/providers/groq.py +96 -0
- voxagent/providers/ollama.py +425 -0
- voxagent/providers/openai.py +435 -0
- voxagent/providers/registry.py +175 -0
- voxagent/py.typed +1 -0
- voxagent/security/__init__.py +14 -0
- voxagent/security/events.py +75 -0
- voxagent/security/filter.py +169 -0
- voxagent/security/registry.py +87 -0
- voxagent/session/__init__.py +39 -0
- voxagent/session/compaction.py +237 -0
- voxagent/session/lock.py +103 -0
- voxagent/session/model.py +109 -0
- voxagent/session/storage.py +184 -0
- voxagent/streaming/__init__.py +52 -0
- voxagent/streaming/emitter.py +286 -0
- voxagent/streaming/events.py +255 -0
- voxagent/subagent/__init__.py +20 -0
- voxagent/subagent/context.py +124 -0
- voxagent/subagent/definition.py +172 -0
- voxagent/tools/__init__.py +32 -0
- voxagent/tools/context.py +50 -0
- voxagent/tools/decorator.py +175 -0
- voxagent/tools/definition.py +131 -0
- voxagent/tools/executor.py +109 -0
- voxagent/tools/policy.py +89 -0
- voxagent/tools/registry.py +89 -0
- voxagent/types/__init__.py +46 -0
- voxagent/types/messages.py +134 -0
- voxagent/types/run.py +176 -0
- voxagent-0.1.0.dist-info/METADATA +186 -0
- voxagent-0.1.0.dist-info/RECORD +53 -0
- voxagent-0.1.0.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,415 @@
|
|
|
1
|
+
"""ChatGPT Backend API provider.
|
|
2
|
+
|
|
3
|
+
This provider uses ChatGPT's private backend API (used by Codex CLI) to access
|
|
4
|
+
models like gpt-5 and gpt-5-codex-mini using a ChatGPT Plus subscription.
|
|
5
|
+
|
|
6
|
+
API Endpoint: https://chatgpt.com/backend-api/codex/responses
|
|
7
|
+
Auth: OAuth Bearer token (from Codex CLI or voxdomus vault)
|
|
8
|
+
|
|
9
|
+
Note: This is an unofficial/undocumented API.
|
|
10
|
+
|
|
11
|
+
Tool Format:
|
|
12
|
+
The ChatGPT backend API uses a flat tool format different from the standard
|
|
13
|
+
OpenAI nested format. This provider automatically converts between formats:
|
|
14
|
+
|
|
15
|
+
Standard OpenAI format (what voxagent uses):
|
|
16
|
+
{"type": "function", "function": {"name": "...", "description": "...", "parameters": {...}}}
|
|
17
|
+
|
|
18
|
+
ChatGPT backend format (flat):
|
|
19
|
+
{"type": "function", "name": "...", "description": "...", "parameters": {...}}
|
|
20
|
+
|
|
21
|
+
The conversion is handled transparently in _convert_tools().
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
from __future__ import annotations
|
|
25
|
+
|
|
26
|
+
import json
|
|
27
|
+
import logging
|
|
28
|
+
import os
|
|
29
|
+
import ssl
|
|
30
|
+
from collections.abc import AsyncIterator
|
|
31
|
+
from typing import Any
|
|
32
|
+
|
|
33
|
+
import certifi
|
|
34
|
+
|
|
35
|
+
import httpx
|
|
36
|
+
|
|
37
|
+
from voxagent.providers.base import (
|
|
38
|
+
AbortSignal,
|
|
39
|
+
BaseProvider,
|
|
40
|
+
ErrorChunk,
|
|
41
|
+
MessageEndChunk,
|
|
42
|
+
StreamChunk,
|
|
43
|
+
TextDeltaChunk,
|
|
44
|
+
ToolUseChunk,
|
|
45
|
+
)
|
|
46
|
+
from voxagent.types import Message, ToolCall
|
|
47
|
+
|
|
48
|
+
logger = logging.getLogger(__name__)
|
|
49
|
+
|
|
50
|
+
# API endpoint
|
|
51
|
+
CHATGPT_API_ENDPOINT = "https://chatgpt.com/backend-api/codex/responses"
|
|
52
|
+
|
|
53
|
+
# Default instructions
|
|
54
|
+
DEFAULT_INSTRUCTIONS = "You are a helpful AI assistant."
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class ChatGPTProvider(BaseProvider):
|
|
58
|
+
"""Provider for ChatGPT's private backend API.
|
|
59
|
+
|
|
60
|
+
Uses OAuth tokens to access ChatGPT Plus models with full tool support.
|
|
61
|
+
|
|
62
|
+
The provider automatically converts voxagent's OpenAI-standard tool format
|
|
63
|
+
to the flat format expected by the ChatGPT backend API.
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
ENV_KEY = "CHATGPT_ACCESS_TOKEN"
|
|
67
|
+
|
|
68
|
+
SUPPORTED_MODELS = [
|
|
69
|
+
"gpt-5",
|
|
70
|
+
"gpt-5-codex",
|
|
71
|
+
"gpt-5-codex-mini",
|
|
72
|
+
"codex-mini-latest",
|
|
73
|
+
]
|
|
74
|
+
|
|
75
|
+
def __init__(
|
|
76
|
+
self,
|
|
77
|
+
model: str = "gpt-5-codex-mini",
|
|
78
|
+
api_key: str | None = None,
|
|
79
|
+
base_url: str | None = None,
|
|
80
|
+
instructions: str | None = None,
|
|
81
|
+
**kwargs: Any,
|
|
82
|
+
) -> None:
|
|
83
|
+
"""Initialize ChatGPT provider.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
model: Model name (gpt-5, gpt-5-codex-mini, etc.).
|
|
87
|
+
api_key: OAuth access token. Falls back to CHATGPT_ACCESS_TOKEN env var.
|
|
88
|
+
base_url: Optional override for API endpoint.
|
|
89
|
+
instructions: Custom system instructions for Codex API.
|
|
90
|
+
**kwargs: Additional arguments.
|
|
91
|
+
"""
|
|
92
|
+
super().__init__(api_key=api_key, base_url=base_url, **kwargs)
|
|
93
|
+
self._model = model
|
|
94
|
+
self._instructions = instructions or DEFAULT_INSTRUCTIONS
|
|
95
|
+
|
|
96
|
+
@property
|
|
97
|
+
def name(self) -> str:
|
|
98
|
+
"""Get the provider name."""
|
|
99
|
+
return "chatgpt"
|
|
100
|
+
|
|
101
|
+
@property
|
|
102
|
+
def models(self) -> list[str]:
|
|
103
|
+
"""Get supported models."""
|
|
104
|
+
return self.SUPPORTED_MODELS
|
|
105
|
+
|
|
106
|
+
@property
|
|
107
|
+
def supports_tools(self) -> bool:
|
|
108
|
+
"""Check if the provider supports tool/function calling.
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
True. The ChatGPT backend API supports tools with a flat format.
|
|
112
|
+
Tools are automatically converted from OpenAI's nested format.
|
|
113
|
+
"""
|
|
114
|
+
return True
|
|
115
|
+
|
|
116
|
+
@property
|
|
117
|
+
def supports_streaming(self) -> bool:
|
|
118
|
+
"""ChatGPT backend requires streaming."""
|
|
119
|
+
return True
|
|
120
|
+
|
|
121
|
+
@property
|
|
122
|
+
def context_limit(self) -> int:
|
|
123
|
+
"""Approximate context limit."""
|
|
124
|
+
return 128000 # GPT-5 models have large context
|
|
125
|
+
|
|
126
|
+
def _convert_tools(self, tools: list[Any] | None) -> list[dict[str, Any]]:
|
|
127
|
+
"""Convert OpenAI nested tool format to ChatGPT flat format.
|
|
128
|
+
|
|
129
|
+
OpenAI standard format (voxagent uses this):
|
|
130
|
+
{"type": "function", "function": {"name": "...", "description": "...", "parameters": {...}}}
|
|
131
|
+
|
|
132
|
+
ChatGPT backend format (flat):
|
|
133
|
+
{"type": "function", "name": "...", "description": "...", "parameters": {...}}
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
tools: List of tools in OpenAI standard format.
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
List of tools in ChatGPT flat format.
|
|
140
|
+
"""
|
|
141
|
+
if not tools:
|
|
142
|
+
return []
|
|
143
|
+
|
|
144
|
+
converted: list[dict[str, Any]] = []
|
|
145
|
+
for tool in tools:
|
|
146
|
+
if isinstance(tool, dict):
|
|
147
|
+
# Check if it's the nested OpenAI format
|
|
148
|
+
if "function" in tool and isinstance(tool["function"], dict):
|
|
149
|
+
func = tool["function"]
|
|
150
|
+
converted.append({
|
|
151
|
+
"type": "function",
|
|
152
|
+
"name": func.get("name", ""),
|
|
153
|
+
"description": func.get("description", ""),
|
|
154
|
+
"parameters": func.get("parameters", {"type": "object", "properties": {}}),
|
|
155
|
+
})
|
|
156
|
+
# Already flat format or unknown - pass through
|
|
157
|
+
elif "name" in tool:
|
|
158
|
+
converted.append(tool)
|
|
159
|
+
else:
|
|
160
|
+
logger.warning("Unknown tool format, skipping: %s", tool)
|
|
161
|
+
else:
|
|
162
|
+
logger.warning("Tool is not a dict, skipping: %s", type(tool))
|
|
163
|
+
|
|
164
|
+
return converted
|
|
165
|
+
|
|
166
|
+
def _get_headers(self) -> dict[str, str]:
|
|
167
|
+
"""Get request headers."""
|
|
168
|
+
token = self.api_key
|
|
169
|
+
if not token:
|
|
170
|
+
raise ValueError("No access token. Set CHATGPT_ACCESS_TOKEN or pass api_key.")
|
|
171
|
+
return {
|
|
172
|
+
"Authorization": f"Bearer {token}",
|
|
173
|
+
"Content-Type": "application/json",
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
def _build_input(
|
|
177
|
+
self,
|
|
178
|
+
messages: list[Message],
|
|
179
|
+
system: str | None = None,
|
|
180
|
+
) -> list[dict[str, Any]]:
|
|
181
|
+
"""Build input array for API request.
|
|
182
|
+
|
|
183
|
+
Note: System messages in the messages list are skipped because the
|
|
184
|
+
ChatGPT backend uses a separate 'instructions' field for system prompts.
|
|
185
|
+
The voxagent Agent may pass both a system prompt and system messages
|
|
186
|
+
in the list - we handle this by using the system param only.
|
|
187
|
+
|
|
188
|
+
Tool results in voxagent come as user messages with content being a list
|
|
189
|
+
of ToolResultBlock objects. These need to be converted to the ChatGPT
|
|
190
|
+
function_call_output format.
|
|
191
|
+
|
|
192
|
+
Assistant messages with tool_calls need to be converted to function_call
|
|
193
|
+
items so that the backend can match tool results to tool calls.
|
|
194
|
+
"""
|
|
195
|
+
input_msgs: list[dict[str, Any]] = []
|
|
196
|
+
|
|
197
|
+
for msg in messages:
|
|
198
|
+
# Skip system messages - ChatGPT API doesn't support them in input
|
|
199
|
+
if msg.role == "system":
|
|
200
|
+
continue
|
|
201
|
+
|
|
202
|
+
# Handle assistant messages with tool calls
|
|
203
|
+
if msg.role == "assistant" and msg.tool_calls:
|
|
204
|
+
# Add text content first if present
|
|
205
|
+
if isinstance(msg.content, str) and msg.content:
|
|
206
|
+
input_msgs.append({
|
|
207
|
+
"type": "message",
|
|
208
|
+
"role": "assistant",
|
|
209
|
+
"content": [{"type": "input_text", "text": msg.content}],
|
|
210
|
+
})
|
|
211
|
+
# Add function_call items for each tool call
|
|
212
|
+
for tc in msg.tool_calls:
|
|
213
|
+
# Convert params to JSON string if needed
|
|
214
|
+
args = tc.params
|
|
215
|
+
if isinstance(args, dict):
|
|
216
|
+
import json
|
|
217
|
+
args = json.dumps(args)
|
|
218
|
+
input_msgs.append({
|
|
219
|
+
"type": "function_call",
|
|
220
|
+
"call_id": tc.id,
|
|
221
|
+
"name": tc.name,
|
|
222
|
+
"arguments": args,
|
|
223
|
+
})
|
|
224
|
+
continue
|
|
225
|
+
|
|
226
|
+
# Handle content that may be a list (tool results or content blocks)
|
|
227
|
+
if isinstance(msg.content, list):
|
|
228
|
+
for block in msg.content:
|
|
229
|
+
# Handle ToolResultBlock (Pydantic model or dict)
|
|
230
|
+
if hasattr(block, "type") and getattr(block, "type", None) == "tool_result":
|
|
231
|
+
# Pydantic ToolResultBlock
|
|
232
|
+
input_msgs.append({
|
|
233
|
+
"type": "function_call_output",
|
|
234
|
+
"call_id": getattr(block, "tool_use_id", ""),
|
|
235
|
+
"output": getattr(block, "content", ""),
|
|
236
|
+
})
|
|
237
|
+
elif isinstance(block, dict) and block.get("type") == "tool_result":
|
|
238
|
+
# Dict-style tool result
|
|
239
|
+
input_msgs.append({
|
|
240
|
+
"type": "function_call_output",
|
|
241
|
+
"call_id": block.get("tool_use_id", ""),
|
|
242
|
+
"output": block.get("content", ""),
|
|
243
|
+
})
|
|
244
|
+
elif hasattr(block, "text"):
|
|
245
|
+
# TextBlock
|
|
246
|
+
input_msgs.append({
|
|
247
|
+
"type": "message",
|
|
248
|
+
"role": "user" if msg.role == "user" else "assistant",
|
|
249
|
+
"content": [{"type": "input_text", "text": block.text}],
|
|
250
|
+
})
|
|
251
|
+
elif isinstance(block, dict) and "text" in block:
|
|
252
|
+
input_msgs.append({
|
|
253
|
+
"type": "message",
|
|
254
|
+
"role": "user" if msg.role == "user" else "assistant",
|
|
255
|
+
"content": [{"type": "input_text", "text": block["text"]}],
|
|
256
|
+
})
|
|
257
|
+
elif isinstance(msg.content, str):
|
|
258
|
+
# Simple string content
|
|
259
|
+
role = "user" if msg.role == "user" else "assistant"
|
|
260
|
+
input_msgs.append({
|
|
261
|
+
"type": "message",
|
|
262
|
+
"role": role,
|
|
263
|
+
"content": [{"type": "input_text", "text": msg.content}],
|
|
264
|
+
})
|
|
265
|
+
|
|
266
|
+
return input_msgs
|
|
267
|
+
|
|
268
|
+
def _build_request_body(
|
|
269
|
+
self,
|
|
270
|
+
messages: list[Message],
|
|
271
|
+
system: str | None = None,
|
|
272
|
+
tools: list[Any] | None = None,
|
|
273
|
+
) -> dict[str, Any]:
|
|
274
|
+
"""Build request body for API.
|
|
275
|
+
|
|
276
|
+
Args:
|
|
277
|
+
messages: Conversation messages (system messages are filtered out).
|
|
278
|
+
system: System prompt to use as instructions.
|
|
279
|
+
tools: Optional tool definitions in OpenAI standard format.
|
|
280
|
+
These will be converted to ChatGPT's flat format.
|
|
281
|
+
|
|
282
|
+
Returns:
|
|
283
|
+
Request body dict ready to send to the ChatGPT backend API.
|
|
284
|
+
"""
|
|
285
|
+
# Use system prompt if provided, otherwise fall back to default instructions
|
|
286
|
+
instructions = system if system else self._instructions
|
|
287
|
+
|
|
288
|
+
# Convert tools from OpenAI nested format to ChatGPT flat format
|
|
289
|
+
converted_tools = self._convert_tools(tools)
|
|
290
|
+
has_tools = len(converted_tools) > 0
|
|
291
|
+
|
|
292
|
+
body: dict[str, Any] = {
|
|
293
|
+
"model": self._model,
|
|
294
|
+
"instructions": instructions,
|
|
295
|
+
"input": self._build_input(messages, system),
|
|
296
|
+
"tools": converted_tools,
|
|
297
|
+
"tool_choice": "auto" if has_tools else "none",
|
|
298
|
+
"parallel_tool_calls": False,
|
|
299
|
+
"reasoning": {"summary": "auto"},
|
|
300
|
+
"store": False,
|
|
301
|
+
"stream": True,
|
|
302
|
+
}
|
|
303
|
+
return body
|
|
304
|
+
|
|
305
|
+
async def stream(
|
|
306
|
+
self,
|
|
307
|
+
messages: list[Message],
|
|
308
|
+
system: str | None = None,
|
|
309
|
+
tools: list[Any] | None = None,
|
|
310
|
+
abort_signal: AbortSignal | None = None,
|
|
311
|
+
) -> AsyncIterator[StreamChunk]:
|
|
312
|
+
"""Stream a response from ChatGPT backend.
|
|
313
|
+
|
|
314
|
+
Args:
|
|
315
|
+
messages: Conversation messages.
|
|
316
|
+
system: Optional system prompt.
|
|
317
|
+
tools: Optional tool definitions.
|
|
318
|
+
abort_signal: Optional abort signal.
|
|
319
|
+
|
|
320
|
+
Yields:
|
|
321
|
+
StreamChunk objects.
|
|
322
|
+
"""
|
|
323
|
+
body = self._build_request_body(messages, system, tools)
|
|
324
|
+
endpoint = self._base_url or CHATGPT_API_ENDPOINT
|
|
325
|
+
|
|
326
|
+
try:
|
|
327
|
+
async with httpx.AsyncClient() as client:
|
|
328
|
+
async with client.stream(
|
|
329
|
+
"POST",
|
|
330
|
+
endpoint,
|
|
331
|
+
headers=self._get_headers(),
|
|
332
|
+
json=body,
|
|
333
|
+
timeout=120.0,
|
|
334
|
+
) as response:
|
|
335
|
+
if response.status_code == 401:
|
|
336
|
+
yield ErrorChunk(error="Authentication failed - token may be expired")
|
|
337
|
+
return
|
|
338
|
+
response.raise_for_status()
|
|
339
|
+
|
|
340
|
+
async for line in response.aiter_lines():
|
|
341
|
+
if abort_signal and abort_signal.aborted:
|
|
342
|
+
break
|
|
343
|
+
if not line or not line.startswith("data:"):
|
|
344
|
+
continue
|
|
345
|
+
data_str = line[5:].strip()
|
|
346
|
+
if data_str == "[DONE]":
|
|
347
|
+
break
|
|
348
|
+
try:
|
|
349
|
+
data = json.loads(data_str)
|
|
350
|
+
event_type = data.get("type", "")
|
|
351
|
+
# Text delta
|
|
352
|
+
if event_type == "response.output_text.delta":
|
|
353
|
+
text = data.get("delta", "")
|
|
354
|
+
if text:
|
|
355
|
+
yield TextDeltaChunk(delta=text)
|
|
356
|
+
# Tool calls - use output_item.done which has all data
|
|
357
|
+
elif event_type == "response.output_item.done":
|
|
358
|
+
item = data.get("item", {})
|
|
359
|
+
if item.get("type") == "function_call":
|
|
360
|
+
yield ToolUseChunk(
|
|
361
|
+
tool_call=ToolCall(
|
|
362
|
+
id=item.get("call_id", ""),
|
|
363
|
+
name=item.get("name", ""),
|
|
364
|
+
params=json.loads(item.get("arguments", "{}")),
|
|
365
|
+
)
|
|
366
|
+
)
|
|
367
|
+
except json.JSONDecodeError:
|
|
368
|
+
continue
|
|
369
|
+
except httpx.HTTPStatusError as e:
|
|
370
|
+
# Can't access response.text on streaming response without read()
|
|
371
|
+
yield ErrorChunk(error=f"HTTP {e.response.status_code}")
|
|
372
|
+
except Exception as e:
|
|
373
|
+
yield ErrorChunk(error=str(e))
|
|
374
|
+
|
|
375
|
+
yield MessageEndChunk()
|
|
376
|
+
|
|
377
|
+
async def complete(
|
|
378
|
+
self,
|
|
379
|
+
messages: list[Message],
|
|
380
|
+
system: str | None = None,
|
|
381
|
+
tools: list[Any] | None = None,
|
|
382
|
+
) -> Message:
|
|
383
|
+
"""Get a complete response (collects streamed chunks)."""
|
|
384
|
+
text_parts: list[str] = []
|
|
385
|
+
tool_calls: list[ToolCall] = []
|
|
386
|
+
|
|
387
|
+
async for chunk in self.stream(messages, system, tools):
|
|
388
|
+
if isinstance(chunk, TextDeltaChunk):
|
|
389
|
+
text_parts.append(chunk.delta)
|
|
390
|
+
elif isinstance(chunk, ToolUseChunk):
|
|
391
|
+
tool_calls.append(chunk.tool_call)
|
|
392
|
+
elif isinstance(chunk, ErrorChunk):
|
|
393
|
+
raise Exception(chunk.error)
|
|
394
|
+
|
|
395
|
+
return Message(
|
|
396
|
+
role="assistant",
|
|
397
|
+
content="".join(text_parts) if text_parts else None,
|
|
398
|
+
tool_calls=tool_calls if tool_calls else None,
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
def count_tokens(
|
|
402
|
+
self,
|
|
403
|
+
messages: list[Message],
|
|
404
|
+
system: str | None = None,
|
|
405
|
+
) -> int:
|
|
406
|
+
"""Estimate token count (rough approximation)."""
|
|
407
|
+
text = system or ""
|
|
408
|
+
for msg in messages:
|
|
409
|
+
text += msg.content or ""
|
|
410
|
+
# Rough estimate: ~4 chars per token
|
|
411
|
+
return len(text) // 4
|
|
412
|
+
|
|
413
|
+
|
|
414
|
+
__all__ = ["ChatGPTProvider", "CHATGPT_API_ENDPOINT"]
|
|
415
|
+
|
|
@@ -0,0 +1,162 @@
|
|
|
1
|
+
"""Claude Code CLI provider.
|
|
2
|
+
|
|
3
|
+
This provider wraps the Anthropic Claude Code CLI (claude command).
|
|
4
|
+
It requires:
|
|
5
|
+
1. The claude CLI to be installed
|
|
6
|
+
2. Authentication via: claude setup-token
|
|
7
|
+
|
|
8
|
+
Models available:
|
|
9
|
+
- sonnet: Claude Sonnet (latest)
|
|
10
|
+
- opus: Claude Opus (latest)
|
|
11
|
+
- haiku: Claude Haiku (latest)
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
import json
|
|
17
|
+
import logging
|
|
18
|
+
from collections.abc import AsyncIterator
|
|
19
|
+
from typing import Any
|
|
20
|
+
|
|
21
|
+
from voxagent.providers.cli_base import CLINotFoundError, CLIProvider
|
|
22
|
+
from voxagent.providers.base import (
|
|
23
|
+
AbortSignal,
|
|
24
|
+
ErrorChunk,
|
|
25
|
+
MessageEndChunk,
|
|
26
|
+
StreamChunk,
|
|
27
|
+
TextDeltaChunk,
|
|
28
|
+
)
|
|
29
|
+
from voxagent.types import Message
|
|
30
|
+
|
|
31
|
+
logger = logging.getLogger(__name__)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class ClaudeCodeProvider(CLIProvider):
|
|
35
|
+
"""Provider for Claude Code CLI.
|
|
36
|
+
|
|
37
|
+
Uses the claude CLI in print mode with text output for non-interactive use.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
CLI_NAME = "claude"
|
|
41
|
+
ENV_KEY = "ANTHROPIC_API_KEY"
|
|
42
|
+
|
|
43
|
+
SUPPORTED_MODELS = [
|
|
44
|
+
"sonnet",
|
|
45
|
+
"opus",
|
|
46
|
+
"haiku",
|
|
47
|
+
]
|
|
48
|
+
|
|
49
|
+
def __init__(
|
|
50
|
+
self,
|
|
51
|
+
model: str = "sonnet",
|
|
52
|
+
api_key: str | None = None,
|
|
53
|
+
base_url: str | None = None,
|
|
54
|
+
**kwargs: Any,
|
|
55
|
+
) -> None:
|
|
56
|
+
"""Initialize Claude Code provider.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
model: Model alias (sonnet, opus, haiku).
|
|
60
|
+
api_key: Optional Anthropic API key.
|
|
61
|
+
base_url: Optional base URL override.
|
|
62
|
+
**kwargs: Additional arguments.
|
|
63
|
+
"""
|
|
64
|
+
super().__init__(model=model, api_key=api_key, base_url=base_url, **kwargs)
|
|
65
|
+
|
|
66
|
+
@property
|
|
67
|
+
def name(self) -> str:
|
|
68
|
+
"""Get the provider name."""
|
|
69
|
+
return "claudecode"
|
|
70
|
+
|
|
71
|
+
@property
|
|
72
|
+
def models(self) -> list[str]:
|
|
73
|
+
"""Get supported models."""
|
|
74
|
+
return self.SUPPORTED_MODELS
|
|
75
|
+
|
|
76
|
+
@property
|
|
77
|
+
def supports_tools(self) -> bool:
|
|
78
|
+
"""Claude Code has tool support but we don't expose it."""
|
|
79
|
+
return False
|
|
80
|
+
|
|
81
|
+
@property
|
|
82
|
+
def context_limit(self) -> int:
|
|
83
|
+
"""Approximate context limit."""
|
|
84
|
+
return 200000
|
|
85
|
+
|
|
86
|
+
def _build_cli_args(
|
|
87
|
+
self,
|
|
88
|
+
prompt: str,
|
|
89
|
+
system: str | None = None,
|
|
90
|
+
) -> list[str]:
|
|
91
|
+
"""Build claude CLI arguments.
|
|
92
|
+
|
|
93
|
+
Uses print mode for non-interactive execution.
|
|
94
|
+
"""
|
|
95
|
+
args = ["--print", "--output-format", "text"]
|
|
96
|
+
|
|
97
|
+
if self._model:
|
|
98
|
+
args.extend(["--model", self._model])
|
|
99
|
+
|
|
100
|
+
if system:
|
|
101
|
+
args.extend(["--system-prompt", system])
|
|
102
|
+
|
|
103
|
+
# Add the prompt
|
|
104
|
+
args.append(prompt)
|
|
105
|
+
|
|
106
|
+
return args
|
|
107
|
+
|
|
108
|
+
def _parse_output(self, stdout: str, stderr: str) -> str:
|
|
109
|
+
"""Parse claude CLI output."""
|
|
110
|
+
# claude --print outputs just the response text
|
|
111
|
+
return stdout.strip()
|
|
112
|
+
|
|
113
|
+
async def stream(
|
|
114
|
+
self,
|
|
115
|
+
messages: list[Message],
|
|
116
|
+
system: str | None = None,
|
|
117
|
+
tools: list[Any] | None = None,
|
|
118
|
+
abort_signal: AbortSignal | None = None,
|
|
119
|
+
) -> AsyncIterator[StreamChunk]:
|
|
120
|
+
"""Stream a response from Claude Code CLI.
|
|
121
|
+
|
|
122
|
+
Note: The claude CLI has its own MCP tool configuration.
|
|
123
|
+
Tools passed from voxDomus are not used.
|
|
124
|
+
"""
|
|
125
|
+
if tools:
|
|
126
|
+
logger.debug(
|
|
127
|
+
"Claude CLI has its own MCP tools - ignoring %d passed tools",
|
|
128
|
+
len(tools),
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
try:
|
|
132
|
+
prompt = self._messages_to_prompt(messages)
|
|
133
|
+
response = await self._run_cli(prompt, system)
|
|
134
|
+
if response:
|
|
135
|
+
yield TextDeltaChunk(delta=response)
|
|
136
|
+
except CLINotFoundError as e:
|
|
137
|
+
yield ErrorChunk(error=str(e))
|
|
138
|
+
except Exception as e:
|
|
139
|
+
yield ErrorChunk(error=f"Claude Code CLI error: {e}")
|
|
140
|
+
|
|
141
|
+
yield MessageEndChunk()
|
|
142
|
+
|
|
143
|
+
async def complete(
|
|
144
|
+
self,
|
|
145
|
+
messages: list[Message],
|
|
146
|
+
system: str | None = None,
|
|
147
|
+
tools: list[Any] | None = None,
|
|
148
|
+
) -> Message:
|
|
149
|
+
"""Get a complete response from Claude Code CLI."""
|
|
150
|
+
text_parts: list[str] = []
|
|
151
|
+
|
|
152
|
+
async for chunk in self.stream(messages, system, tools):
|
|
153
|
+
if isinstance(chunk, TextDeltaChunk):
|
|
154
|
+
text_parts.append(chunk.delta)
|
|
155
|
+
elif isinstance(chunk, ErrorChunk):
|
|
156
|
+
raise Exception(chunk.error)
|
|
157
|
+
|
|
158
|
+
return Message(role="assistant", content="".join(text_parts))
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
__all__ = ["ClaudeCodeProvider"]
|
|
162
|
+
|