voxagent 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- voxagent/__init__.py +143 -0
- voxagent/_version.py +5 -0
- voxagent/agent/__init__.py +32 -0
- voxagent/agent/abort.py +178 -0
- voxagent/agent/core.py +902 -0
- voxagent/code/__init__.py +9 -0
- voxagent/mcp/__init__.py +16 -0
- voxagent/mcp/manager.py +188 -0
- voxagent/mcp/tool.py +152 -0
- voxagent/providers/__init__.py +110 -0
- voxagent/providers/anthropic.py +498 -0
- voxagent/providers/augment.py +293 -0
- voxagent/providers/auth.py +116 -0
- voxagent/providers/base.py +268 -0
- voxagent/providers/chatgpt.py +415 -0
- voxagent/providers/claudecode.py +162 -0
- voxagent/providers/cli_base.py +265 -0
- voxagent/providers/codex.py +183 -0
- voxagent/providers/failover.py +90 -0
- voxagent/providers/google.py +532 -0
- voxagent/providers/groq.py +96 -0
- voxagent/providers/ollama.py +425 -0
- voxagent/providers/openai.py +435 -0
- voxagent/providers/registry.py +175 -0
- voxagent/py.typed +1 -0
- voxagent/security/__init__.py +14 -0
- voxagent/security/events.py +75 -0
- voxagent/security/filter.py +169 -0
- voxagent/security/registry.py +87 -0
- voxagent/session/__init__.py +39 -0
- voxagent/session/compaction.py +237 -0
- voxagent/session/lock.py +103 -0
- voxagent/session/model.py +109 -0
- voxagent/session/storage.py +184 -0
- voxagent/streaming/__init__.py +52 -0
- voxagent/streaming/emitter.py +286 -0
- voxagent/streaming/events.py +255 -0
- voxagent/subagent/__init__.py +20 -0
- voxagent/subagent/context.py +124 -0
- voxagent/subagent/definition.py +172 -0
- voxagent/tools/__init__.py +32 -0
- voxagent/tools/context.py +50 -0
- voxagent/tools/decorator.py +175 -0
- voxagent/tools/definition.py +131 -0
- voxagent/tools/executor.py +109 -0
- voxagent/tools/policy.py +89 -0
- voxagent/tools/registry.py +89 -0
- voxagent/types/__init__.py +46 -0
- voxagent/types/messages.py +134 -0
- voxagent/types/run.py +176 -0
- voxagent-0.1.0.dist-info/METADATA +186 -0
- voxagent-0.1.0.dist-info/RECORD +53 -0
- voxagent-0.1.0.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,498 @@
|
|
|
1
|
+
"""Anthropic provider implementation.
|
|
2
|
+
|
|
3
|
+
This module implements the AnthropicProvider for Claude models,
|
|
4
|
+
supporting streaming, tool use, and extended thinking mode.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
from collections.abc import AsyncIterator
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
import httpx
|
|
12
|
+
|
|
13
|
+
from voxagent.providers.base import (
|
|
14
|
+
AbortSignal,
|
|
15
|
+
BaseProvider,
|
|
16
|
+
ErrorChunk,
|
|
17
|
+
MessageEndChunk,
|
|
18
|
+
StreamChunk,
|
|
19
|
+
TextDeltaChunk,
|
|
20
|
+
ToolUseChunk,
|
|
21
|
+
)
|
|
22
|
+
from voxagent.types import Message, ToolCall
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class AnthropicProvider(BaseProvider):
|
|
26
|
+
"""Provider for Anthropic Claude models.
|
|
27
|
+
|
|
28
|
+
Supports Claude 3.5 Sonnet, Claude 3.5 Haiku, Claude 3 Opus, and other models.
|
|
29
|
+
Implements streaming, tool use, and extended thinking mode.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
ENV_KEY = "ANTHROPIC_API_KEY"
|
|
33
|
+
DEFAULT_BASE_URL = "https://api.anthropic.com"
|
|
34
|
+
|
|
35
|
+
SUPPORTED_MODELS = [
|
|
36
|
+
"claude-3-5-sonnet-20241022",
|
|
37
|
+
"claude-3-5-haiku-20241022",
|
|
38
|
+
"claude-3-opus-20240229",
|
|
39
|
+
"claude-3-sonnet-20240229",
|
|
40
|
+
"claude-3-haiku-20240307",
|
|
41
|
+
]
|
|
42
|
+
|
|
43
|
+
def __init__(
|
|
44
|
+
self,
|
|
45
|
+
api_key: str | None = None,
|
|
46
|
+
base_url: str | None = None,
|
|
47
|
+
model: str = "claude-3-5-sonnet-20241022",
|
|
48
|
+
thinking: bool = False,
|
|
49
|
+
**kwargs: Any,
|
|
50
|
+
) -> None:
|
|
51
|
+
"""Initialize the Anthropic provider.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
api_key: API key for authentication. Falls back to ANTHROPIC_API_KEY env var.
|
|
55
|
+
base_url: Optional custom base URL for API requests.
|
|
56
|
+
model: Model to use (default: claude-3-5-sonnet-20241022).
|
|
57
|
+
thinking: Enable extended thinking mode.
|
|
58
|
+
**kwargs: Additional provider-specific arguments.
|
|
59
|
+
"""
|
|
60
|
+
super().__init__(api_key=api_key, base_url=base_url, **kwargs)
|
|
61
|
+
self._model = model
|
|
62
|
+
self._thinking = thinking
|
|
63
|
+
# State for SSE parsing
|
|
64
|
+
self._reset_tool_state()
|
|
65
|
+
|
|
66
|
+
def _reset_tool_state(self) -> None:
|
|
67
|
+
"""Reset tool use state for streaming."""
|
|
68
|
+
self._current_tool_id: str | None = None
|
|
69
|
+
self._current_tool_name: str | None = None
|
|
70
|
+
self._current_tool_input_json: str = ""
|
|
71
|
+
self._current_block_type: str | None = None
|
|
72
|
+
|
|
73
|
+
@property
|
|
74
|
+
def name(self) -> str:
|
|
75
|
+
"""Get the provider name."""
|
|
76
|
+
return "anthropic"
|
|
77
|
+
|
|
78
|
+
@property
|
|
79
|
+
def model(self) -> str:
|
|
80
|
+
"""Get the current model."""
|
|
81
|
+
return self._model
|
|
82
|
+
|
|
83
|
+
@property
|
|
84
|
+
def models(self) -> list[str]:
|
|
85
|
+
"""Get the list of supported model names."""
|
|
86
|
+
return self.SUPPORTED_MODELS.copy()
|
|
87
|
+
|
|
88
|
+
@property
|
|
89
|
+
def supports_tools(self) -> bool:
|
|
90
|
+
"""Check if the provider supports tool/function calling."""
|
|
91
|
+
return True
|
|
92
|
+
|
|
93
|
+
@property
|
|
94
|
+
def supports_streaming(self) -> bool:
|
|
95
|
+
"""Check if the provider supports streaming responses."""
|
|
96
|
+
return True
|
|
97
|
+
|
|
98
|
+
@property
|
|
99
|
+
def context_limit(self) -> int:
|
|
100
|
+
"""Get the maximum context length in tokens."""
|
|
101
|
+
return 200000
|
|
102
|
+
|
|
103
|
+
def _get_headers(self) -> dict[str, str]:
|
|
104
|
+
"""Get headers for Anthropic API requests."""
|
|
105
|
+
return {
|
|
106
|
+
"x-api-key": self.api_key or "",
|
|
107
|
+
"anthropic-version": "2023-06-01",
|
|
108
|
+
"content-type": "application/json",
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
def _get_api_endpoint(self) -> str:
|
|
112
|
+
"""Get the API endpoint for messages."""
|
|
113
|
+
base = self._base_url or self.DEFAULT_BASE_URL
|
|
114
|
+
return f"{base}/v1/messages"
|
|
115
|
+
|
|
116
|
+
def _convert_messages_to_anthropic(
|
|
117
|
+
self, messages: list[Message]
|
|
118
|
+
) -> list[dict[str, Any]]:
|
|
119
|
+
"""Convert voxagent Messages to Anthropic format.
|
|
120
|
+
|
|
121
|
+
Args:
|
|
122
|
+
messages: List of voxagent Messages.
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
List of Anthropic-formatted message dicts.
|
|
126
|
+
"""
|
|
127
|
+
anthropic_messages: list[dict[str, Any]] = []
|
|
128
|
+
|
|
129
|
+
for msg in messages:
|
|
130
|
+
if msg.role == "system":
|
|
131
|
+
# System messages are handled separately in Anthropic
|
|
132
|
+
continue
|
|
133
|
+
|
|
134
|
+
anthropic_msg: dict[str, Any] = {"role": msg.role}
|
|
135
|
+
|
|
136
|
+
# Handle tool calls in assistant messages
|
|
137
|
+
if msg.tool_calls:
|
|
138
|
+
content_blocks: list[dict[str, Any]] = []
|
|
139
|
+
for tc in msg.tool_calls:
|
|
140
|
+
content_blocks.append({
|
|
141
|
+
"type": "tool_use",
|
|
142
|
+
"id": tc.id,
|
|
143
|
+
"name": tc.name,
|
|
144
|
+
"input": tc.params,
|
|
145
|
+
})
|
|
146
|
+
anthropic_msg["content"] = content_blocks
|
|
147
|
+
else:
|
|
148
|
+
# Regular text content
|
|
149
|
+
if isinstance(msg.content, str):
|
|
150
|
+
anthropic_msg["content"] = msg.content
|
|
151
|
+
else:
|
|
152
|
+
# Convert content blocks
|
|
153
|
+
anthropic_msg["content"] = self._convert_content_blocks(msg.content)
|
|
154
|
+
|
|
155
|
+
anthropic_messages.append(anthropic_msg)
|
|
156
|
+
|
|
157
|
+
return anthropic_messages
|
|
158
|
+
|
|
159
|
+
def _convert_content_blocks(self, blocks: list[Any]) -> list[dict[str, Any]]:
|
|
160
|
+
"""Convert content blocks to Anthropic format."""
|
|
161
|
+
result: list[dict[str, Any]] = []
|
|
162
|
+
for block in blocks:
|
|
163
|
+
if hasattr(block, "model_dump"):
|
|
164
|
+
result.append(block.model_dump())
|
|
165
|
+
elif isinstance(block, dict):
|
|
166
|
+
result.append(block)
|
|
167
|
+
return result
|
|
168
|
+
|
|
169
|
+
def _convert_anthropic_response_to_message(
|
|
170
|
+
self, response: dict[str, Any]
|
|
171
|
+
) -> Message:
|
|
172
|
+
"""Convert Anthropic API response to voxagent Message.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
response: Anthropic API response dict.
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
A voxagent Message.
|
|
179
|
+
"""
|
|
180
|
+
role = response.get("role", "assistant")
|
|
181
|
+
content_blocks = response.get("content", [])
|
|
182
|
+
|
|
183
|
+
text_content = ""
|
|
184
|
+
tool_calls: list[ToolCall] = []
|
|
185
|
+
|
|
186
|
+
for block in content_blocks:
|
|
187
|
+
if block.get("type") == "text":
|
|
188
|
+
text_content += block.get("text", "")
|
|
189
|
+
elif block.get("type") == "tool_use":
|
|
190
|
+
tool_calls.append(ToolCall(
|
|
191
|
+
id=block.get("id", ""),
|
|
192
|
+
name=block.get("name", ""),
|
|
193
|
+
params=block.get("input", {}),
|
|
194
|
+
))
|
|
195
|
+
|
|
196
|
+
return Message(
|
|
197
|
+
role=role,
|
|
198
|
+
content=text_content,
|
|
199
|
+
tool_calls=tool_calls if tool_calls else None,
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
def _build_request_body(
|
|
203
|
+
self,
|
|
204
|
+
messages: list[Message],
|
|
205
|
+
system: str | None = None,
|
|
206
|
+
tools: list[Any] | None = None,
|
|
207
|
+
stream: bool = False,
|
|
208
|
+
thinking: bool = False,
|
|
209
|
+
) -> dict[str, Any]:
|
|
210
|
+
"""Build the request body for Anthropic API.
|
|
211
|
+
|
|
212
|
+
Args:
|
|
213
|
+
messages: The conversation messages.
|
|
214
|
+
system: Optional system prompt.
|
|
215
|
+
tools: Optional list of tool definitions.
|
|
216
|
+
stream: Whether to enable streaming.
|
|
217
|
+
thinking: Whether to enable thinking mode.
|
|
218
|
+
|
|
219
|
+
Returns:
|
|
220
|
+
Request body dict.
|
|
221
|
+
"""
|
|
222
|
+
body: dict[str, Any] = {
|
|
223
|
+
"model": self._model,
|
|
224
|
+
"messages": self._convert_messages_to_anthropic(messages),
|
|
225
|
+
"max_tokens": 4096,
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
if stream:
|
|
229
|
+
body["stream"] = True
|
|
230
|
+
|
|
231
|
+
if system:
|
|
232
|
+
body["system"] = system
|
|
233
|
+
|
|
234
|
+
if tools:
|
|
235
|
+
body["tools"] = tools
|
|
236
|
+
|
|
237
|
+
if thinking:
|
|
238
|
+
body["thinking"] = {"type": "enabled", "budget_tokens": 4096}
|
|
239
|
+
|
|
240
|
+
return body
|
|
241
|
+
|
|
242
|
+
def _parse_sse_event(
|
|
243
|
+
self, event_type: str, data: dict[str, Any]
|
|
244
|
+
) -> StreamChunk | None:
|
|
245
|
+
"""Parse an SSE event and return appropriate StreamChunk.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
event_type: The SSE event type.
|
|
249
|
+
data: The parsed JSON data.
|
|
250
|
+
|
|
251
|
+
Returns:
|
|
252
|
+
A StreamChunk or None if the event doesn't produce a chunk.
|
|
253
|
+
"""
|
|
254
|
+
if event_type == "message_start":
|
|
255
|
+
# Initialize message state
|
|
256
|
+
return None
|
|
257
|
+
|
|
258
|
+
elif event_type == "content_block_start":
|
|
259
|
+
content_block = data.get("content_block", {})
|
|
260
|
+
block_type = content_block.get("type")
|
|
261
|
+
self._current_block_type = block_type
|
|
262
|
+
|
|
263
|
+
if block_type == "tool_use":
|
|
264
|
+
self._current_tool_id = content_block.get("id")
|
|
265
|
+
self._current_tool_name = content_block.get("name")
|
|
266
|
+
self._current_tool_input_json = ""
|
|
267
|
+
return None
|
|
268
|
+
|
|
269
|
+
elif event_type == "content_block_delta":
|
|
270
|
+
delta = data.get("delta", {})
|
|
271
|
+
delta_type = delta.get("type")
|
|
272
|
+
|
|
273
|
+
if delta_type == "text_delta":
|
|
274
|
+
return TextDeltaChunk(delta=delta.get("text", ""))
|
|
275
|
+
elif delta_type == "input_json_delta":
|
|
276
|
+
self._current_tool_input_json += delta.get("partial_json", "")
|
|
277
|
+
return None
|
|
278
|
+
elif delta_type == "thinking_delta":
|
|
279
|
+
# Extended thinking - we could emit thinking chunks
|
|
280
|
+
return None
|
|
281
|
+
return None
|
|
282
|
+
|
|
283
|
+
elif event_type == "content_block_stop":
|
|
284
|
+
# If we have accumulated tool use, yield it
|
|
285
|
+
if self._current_tool_id and self._current_tool_name:
|
|
286
|
+
try:
|
|
287
|
+
params = json.loads(self._current_tool_input_json) if self._current_tool_input_json else {}
|
|
288
|
+
except json.JSONDecodeError:
|
|
289
|
+
params = {}
|
|
290
|
+
|
|
291
|
+
tool_call = ToolCall(
|
|
292
|
+
id=self._current_tool_id,
|
|
293
|
+
name=self._current_tool_name,
|
|
294
|
+
params=params,
|
|
295
|
+
)
|
|
296
|
+
self._reset_tool_state()
|
|
297
|
+
return ToolUseChunk(tool_call=tool_call)
|
|
298
|
+
|
|
299
|
+
self._reset_tool_state()
|
|
300
|
+
return None
|
|
301
|
+
|
|
302
|
+
elif event_type == "message_delta":
|
|
303
|
+
return None
|
|
304
|
+
|
|
305
|
+
elif event_type == "message_stop":
|
|
306
|
+
return MessageEndChunk()
|
|
307
|
+
|
|
308
|
+
elif event_type == "error":
|
|
309
|
+
error_info = data.get("error", {})
|
|
310
|
+
error_msg = error_info.get("message", "Unknown error")
|
|
311
|
+
return ErrorChunk(error=error_msg)
|
|
312
|
+
|
|
313
|
+
return None
|
|
314
|
+
|
|
315
|
+
|
|
316
|
+
async def _make_streaming_request(
|
|
317
|
+
self,
|
|
318
|
+
messages: list[Message],
|
|
319
|
+
system: str | None = None,
|
|
320
|
+
tools: list[Any] | None = None,
|
|
321
|
+
thinking: bool = False,
|
|
322
|
+
) -> AsyncIterator[str]:
|
|
323
|
+
"""Make a streaming request to the Anthropic API.
|
|
324
|
+
|
|
325
|
+
Args:
|
|
326
|
+
messages: The conversation messages.
|
|
327
|
+
system: Optional system prompt.
|
|
328
|
+
tools: Optional list of tool definitions.
|
|
329
|
+
thinking: Whether to enable thinking mode.
|
|
330
|
+
|
|
331
|
+
Yields:
|
|
332
|
+
SSE lines from the response.
|
|
333
|
+
"""
|
|
334
|
+
body = self._build_request_body(
|
|
335
|
+
messages, system=system, tools=tools, stream=True, thinking=thinking
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
async with httpx.AsyncClient() as client:
|
|
339
|
+
async with client.stream(
|
|
340
|
+
"POST",
|
|
341
|
+
self._get_api_endpoint(),
|
|
342
|
+
headers=self._get_headers(),
|
|
343
|
+
json=body,
|
|
344
|
+
timeout=120.0,
|
|
345
|
+
) as response:
|
|
346
|
+
response.raise_for_status()
|
|
347
|
+
async for line in response.aiter_lines():
|
|
348
|
+
yield line
|
|
349
|
+
|
|
350
|
+
async def stream(
|
|
351
|
+
self,
|
|
352
|
+
messages: list[Message],
|
|
353
|
+
system: str | None = None,
|
|
354
|
+
tools: list[Any] | None = None,
|
|
355
|
+
abort_signal: AbortSignal | None = None,
|
|
356
|
+
thinking: bool | None = None,
|
|
357
|
+
) -> AsyncIterator[StreamChunk]:
|
|
358
|
+
"""Stream a response from the Anthropic API.
|
|
359
|
+
|
|
360
|
+
Args:
|
|
361
|
+
messages: The conversation messages.
|
|
362
|
+
system: Optional system prompt.
|
|
363
|
+
tools: Optional list of tool definitions.
|
|
364
|
+
abort_signal: Optional signal to abort the stream.
|
|
365
|
+
thinking: Override thinking mode for this request.
|
|
366
|
+
|
|
367
|
+
Yields:
|
|
368
|
+
StreamChunk objects containing response data.
|
|
369
|
+
"""
|
|
370
|
+
self._reset_tool_state()
|
|
371
|
+
use_thinking = thinking if thinking is not None else self._thinking
|
|
372
|
+
|
|
373
|
+
try:
|
|
374
|
+
current_event_type: str | None = None
|
|
375
|
+
|
|
376
|
+
async for line in self._make_streaming_request(
|
|
377
|
+
messages, system=system, tools=tools, thinking=use_thinking
|
|
378
|
+
):
|
|
379
|
+
if abort_signal and abort_signal.aborted:
|
|
380
|
+
break
|
|
381
|
+
|
|
382
|
+
line = line.strip()
|
|
383
|
+
|
|
384
|
+
if not line:
|
|
385
|
+
continue
|
|
386
|
+
|
|
387
|
+
if line.startswith("event:"):
|
|
388
|
+
current_event_type = line[6:].strip()
|
|
389
|
+
elif line.startswith("data:"):
|
|
390
|
+
data_str = line[5:].strip()
|
|
391
|
+
if data_str and current_event_type:
|
|
392
|
+
try:
|
|
393
|
+
data = json.loads(data_str)
|
|
394
|
+
chunk = self._parse_sse_event(current_event_type, data)
|
|
395
|
+
if chunk:
|
|
396
|
+
yield chunk
|
|
397
|
+
except json.JSONDecodeError:
|
|
398
|
+
pass
|
|
399
|
+
current_event_type = None
|
|
400
|
+
|
|
401
|
+
except Exception as e:
|
|
402
|
+
yield ErrorChunk(error=str(e))
|
|
403
|
+
|
|
404
|
+
async def _make_request(
|
|
405
|
+
self,
|
|
406
|
+
messages: list[Message],
|
|
407
|
+
system: str | None = None,
|
|
408
|
+
tools: list[Any] | None = None,
|
|
409
|
+
) -> dict[str, Any]:
|
|
410
|
+
"""Make a non-streaming request to the Anthropic API.
|
|
411
|
+
|
|
412
|
+
Args:
|
|
413
|
+
messages: The conversation messages.
|
|
414
|
+
system: Optional system prompt.
|
|
415
|
+
tools: Optional list of tool definitions.
|
|
416
|
+
|
|
417
|
+
Returns:
|
|
418
|
+
The response JSON dict.
|
|
419
|
+
"""
|
|
420
|
+
body = self._build_request_body(
|
|
421
|
+
messages, system=system, tools=tools, stream=False
|
|
422
|
+
)
|
|
423
|
+
|
|
424
|
+
async with httpx.AsyncClient() as client:
|
|
425
|
+
response = await client.post(
|
|
426
|
+
self._get_api_endpoint(),
|
|
427
|
+
headers=self._get_headers(),
|
|
428
|
+
json=body,
|
|
429
|
+
timeout=120.0,
|
|
430
|
+
)
|
|
431
|
+
response.raise_for_status()
|
|
432
|
+
return response.json()
|
|
433
|
+
|
|
434
|
+
async def complete(
|
|
435
|
+
self,
|
|
436
|
+
messages: list[Message],
|
|
437
|
+
system: str | None = None,
|
|
438
|
+
tools: list[Any] | None = None,
|
|
439
|
+
) -> Message:
|
|
440
|
+
"""Get a complete response from the Anthropic API.
|
|
441
|
+
|
|
442
|
+
Args:
|
|
443
|
+
messages: The conversation messages.
|
|
444
|
+
system: Optional system prompt.
|
|
445
|
+
tools: Optional list of tool definitions.
|
|
446
|
+
|
|
447
|
+
Returns:
|
|
448
|
+
The assistant's response message.
|
|
449
|
+
"""
|
|
450
|
+
response = await self._make_request(messages, system=system, tools=tools)
|
|
451
|
+
return self._convert_anthropic_response_to_message(response)
|
|
452
|
+
|
|
453
|
+
def count_tokens(
|
|
454
|
+
self,
|
|
455
|
+
messages: list[Message],
|
|
456
|
+
system: str | None = None,
|
|
457
|
+
) -> int:
|
|
458
|
+
"""Count tokens in the messages.
|
|
459
|
+
|
|
460
|
+
Uses a simple estimation based on character count.
|
|
461
|
+
For more accurate counting, use the Anthropic token counting API.
|
|
462
|
+
|
|
463
|
+
Args:
|
|
464
|
+
messages: The conversation messages.
|
|
465
|
+
system: Optional system prompt.
|
|
466
|
+
|
|
467
|
+
Returns:
|
|
468
|
+
Estimated token count.
|
|
469
|
+
"""
|
|
470
|
+
# Approximate token counting (roughly 4 chars per token for English)
|
|
471
|
+
char_count = 0
|
|
472
|
+
|
|
473
|
+
if system:
|
|
474
|
+
char_count += len(system)
|
|
475
|
+
|
|
476
|
+
for msg in messages:
|
|
477
|
+
if isinstance(msg.content, str):
|
|
478
|
+
char_count += len(msg.content)
|
|
479
|
+
else:
|
|
480
|
+
for block in msg.content:
|
|
481
|
+
if hasattr(block, "text"):
|
|
482
|
+
char_count += len(block.text)
|
|
483
|
+
elif hasattr(block, "content"):
|
|
484
|
+
char_count += len(block.content)
|
|
485
|
+
|
|
486
|
+
# Add overhead for role and structure
|
|
487
|
+
char_count += 10
|
|
488
|
+
|
|
489
|
+
if msg.tool_calls:
|
|
490
|
+
for tc in msg.tool_calls:
|
|
491
|
+
char_count += len(tc.name) + len(json.dumps(tc.params))
|
|
492
|
+
|
|
493
|
+
# Rough estimate: 4 characters per token
|
|
494
|
+
return max(1, char_count // 4)
|
|
495
|
+
|
|
496
|
+
|
|
497
|
+
__all__ = ["AnthropicProvider"]
|
|
498
|
+
|