meshagent-anthropic 0.23.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- meshagent/anthropic/__init__.py +25 -0
- meshagent/anthropic/mcp.py +103 -0
- meshagent/anthropic/messages_adapter.py +637 -0
- meshagent/anthropic/openai_responses_stream_adapter.py +400 -0
- meshagent/anthropic/proxy/__init__.py +3 -0
- meshagent/anthropic/proxy/proxy.py +90 -0
- meshagent/anthropic/tests/anthropic_live_test.py +156 -0
- meshagent/anthropic/tests/mcp_test.py +64 -0
- meshagent/anthropic/tests/messages_adapter_test.py +179 -0
- meshagent/anthropic/tests/openai_responses_stream_adapter_test.py +102 -0
- meshagent/anthropic/version.py +1 -0
- meshagent_anthropic-0.23.0.dist-info/METADATA +44 -0
- meshagent_anthropic-0.23.0.dist-info/RECORD +16 -0
- meshagent_anthropic-0.23.0.dist-info/WHEEL +5 -0
- meshagent_anthropic-0.23.0.dist-info/licenses/LICENSE +201 -0
- meshagent_anthropic-0.23.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,637 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from meshagent.agents.agent import AgentChatContext
|
|
4
|
+
from meshagent.api import RoomClient, RoomException, RemoteParticipant
|
|
5
|
+
from meshagent.tools import Toolkit, ToolContext, Tool, BaseTool
|
|
6
|
+
from meshagent.api.messaging import (
|
|
7
|
+
Response,
|
|
8
|
+
LinkResponse,
|
|
9
|
+
FileResponse,
|
|
10
|
+
JsonResponse,
|
|
11
|
+
TextResponse,
|
|
12
|
+
EmptyResponse,
|
|
13
|
+
RawOutputs,
|
|
14
|
+
ensure_response,
|
|
15
|
+
)
|
|
16
|
+
from meshagent.agents.adapter import ToolResponseAdapter, LLMAdapter
|
|
17
|
+
|
|
18
|
+
import json
|
|
19
|
+
from typing import Any, Optional, Callable
|
|
20
|
+
import os
|
|
21
|
+
import logging
|
|
22
|
+
import re
|
|
23
|
+
import asyncio
|
|
24
|
+
import base64
|
|
25
|
+
|
|
26
|
+
from meshagent.anthropic.proxy import get_client, get_logging_httpx_client
|
|
27
|
+
from meshagent.anthropic.mcp import MCPTool as MCPConnectorTool
|
|
28
|
+
|
|
29
|
+
try:
|
|
30
|
+
from anthropic import APIStatusError
|
|
31
|
+
except Exception: # pragma: no cover
|
|
32
|
+
APIStatusError = Exception # type: ignore
|
|
33
|
+
|
|
34
|
+
logger = logging.getLogger("anthropic_agent")
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _replace_non_matching(text: str, allowed_chars: str, replacement: str) -> str:
|
|
38
|
+
pattern = rf"[^{allowed_chars}]"
|
|
39
|
+
return re.sub(pattern, replacement, text)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def safe_tool_name(name: str) -> str:
|
|
43
|
+
return _replace_non_matching(name, "a-zA-Z0-9_-", "_")
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _as_jsonable(obj: Any) -> Any:
|
|
47
|
+
if isinstance(obj, dict):
|
|
48
|
+
return obj
|
|
49
|
+
return obj.model_dump(mode="json")
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def _text_block(text: str) -> dict:
|
|
53
|
+
return {"type": "text", "text": text}
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class MessagesToolBundle:
|
|
57
|
+
def __init__(self, toolkits: list[Toolkit]):
|
|
58
|
+
self._executors: dict[str, Toolkit] = {}
|
|
59
|
+
self._safe_names: dict[str, str] = {}
|
|
60
|
+
self._tools_by_safe_name: dict[str, Tool] = {}
|
|
61
|
+
|
|
62
|
+
tools: list[dict] = []
|
|
63
|
+
|
|
64
|
+
for toolkit in toolkits:
|
|
65
|
+
for v in toolkit.tools:
|
|
66
|
+
if not isinstance(v, Tool):
|
|
67
|
+
raise RoomException(f"unsupported tool type {type(v)}")
|
|
68
|
+
|
|
69
|
+
original_name = v.name
|
|
70
|
+
safe_name = safe_tool_name(original_name)
|
|
71
|
+
|
|
72
|
+
if original_name in self._executors:
|
|
73
|
+
raise Exception(
|
|
74
|
+
f"duplicate in bundle '{original_name}', tool names must be unique."
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
self._executors[original_name] = toolkit
|
|
78
|
+
self._safe_names[safe_name] = original_name
|
|
79
|
+
self._tools_by_safe_name[safe_name] = v
|
|
80
|
+
|
|
81
|
+
schema = {**v.input_schema}
|
|
82
|
+
if v.defs is not None:
|
|
83
|
+
schema["$defs"] = v.defs
|
|
84
|
+
|
|
85
|
+
tools.append(
|
|
86
|
+
{
|
|
87
|
+
"name": safe_name,
|
|
88
|
+
"description": v.description,
|
|
89
|
+
"input_schema": schema,
|
|
90
|
+
}
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
self._tools = tools or None
|
|
94
|
+
|
|
95
|
+
def to_json(self) -> list[dict] | None:
|
|
96
|
+
return None if self._tools is None else self._tools.copy()
|
|
97
|
+
|
|
98
|
+
def get_tool(self, safe_name: str) -> Tool | None:
|
|
99
|
+
return self._tools_by_safe_name.get(safe_name)
|
|
100
|
+
|
|
101
|
+
async def execute(self, *, context: ToolContext, tool_use: dict) -> Response:
|
|
102
|
+
safe_name = tool_use.get("name")
|
|
103
|
+
if safe_name not in self._safe_names:
|
|
104
|
+
raise RoomException(
|
|
105
|
+
f"Invalid tool name {safe_name}, check the name of the tool"
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
name = self._safe_names[safe_name]
|
|
109
|
+
if name not in self._executors:
|
|
110
|
+
raise Exception(f"Unregistered tool name {name}")
|
|
111
|
+
|
|
112
|
+
arguments = tool_use.get("input") or {}
|
|
113
|
+
proxy = self._executors[name]
|
|
114
|
+
result = await proxy.execute(context=context, name=name, arguments=arguments)
|
|
115
|
+
return ensure_response(result)
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
class AnthropicMessagesToolResponseAdapter(ToolResponseAdapter):
|
|
119
|
+
async def to_plain_text(self, *, room: RoomClient, response: Response) -> str:
|
|
120
|
+
if isinstance(response, LinkResponse):
|
|
121
|
+
return json.dumps({"name": response.name, "url": response.url})
|
|
122
|
+
if isinstance(response, JsonResponse):
|
|
123
|
+
return json.dumps(response.json)
|
|
124
|
+
if isinstance(response, TextResponse):
|
|
125
|
+
return response.text
|
|
126
|
+
if isinstance(response, FileResponse):
|
|
127
|
+
return response.name
|
|
128
|
+
if isinstance(response, EmptyResponse):
|
|
129
|
+
return "ok"
|
|
130
|
+
if isinstance(response, dict):
|
|
131
|
+
return json.dumps(response)
|
|
132
|
+
if isinstance(response, str):
|
|
133
|
+
return response
|
|
134
|
+
if response is None:
|
|
135
|
+
return "ok"
|
|
136
|
+
raise Exception("unexpected return type: {type}".format(type=type(response)))
|
|
137
|
+
|
|
138
|
+
async def create_messages(
|
|
139
|
+
self,
|
|
140
|
+
*,
|
|
141
|
+
context: AgentChatContext,
|
|
142
|
+
tool_call: Any,
|
|
143
|
+
room: RoomClient,
|
|
144
|
+
response: Response,
|
|
145
|
+
) -> list:
|
|
146
|
+
tool_use = tool_call if isinstance(tool_call, dict) else _as_jsonable(tool_call)
|
|
147
|
+
tool_use_id = tool_use.get("id")
|
|
148
|
+
if tool_use_id is None:
|
|
149
|
+
raise RoomException("anthropic tool_use block was missing an id")
|
|
150
|
+
|
|
151
|
+
if isinstance(response, RawOutputs):
|
|
152
|
+
# Allow advanced tools to return pre-built Anthropic blocks.
|
|
153
|
+
return [{"role": "user", "content": response.outputs}]
|
|
154
|
+
|
|
155
|
+
tool_result_content: list[dict]
|
|
156
|
+
try:
|
|
157
|
+
if isinstance(response, FileResponse):
|
|
158
|
+
mime_type = (response.mime_type or "").lower()
|
|
159
|
+
|
|
160
|
+
if mime_type == "image/jpg":
|
|
161
|
+
mime_type = "image/jpeg"
|
|
162
|
+
|
|
163
|
+
if mime_type.startswith("image/"):
|
|
164
|
+
allowed = {"image/jpeg", "image/png", "image/gif", "image/webp"}
|
|
165
|
+
if mime_type not in allowed:
|
|
166
|
+
output = f"{response.name} was returned as {response.mime_type}, which Anthropic does not accept as an image block"
|
|
167
|
+
tool_result_content = [_text_block(output)]
|
|
168
|
+
else:
|
|
169
|
+
tool_result_content = [
|
|
170
|
+
{
|
|
171
|
+
"type": "image",
|
|
172
|
+
"source": {
|
|
173
|
+
"type": "base64",
|
|
174
|
+
"media_type": mime_type,
|
|
175
|
+
"data": base64.b64encode(response.data).decode(
|
|
176
|
+
"utf-8"
|
|
177
|
+
),
|
|
178
|
+
},
|
|
179
|
+
}
|
|
180
|
+
]
|
|
181
|
+
|
|
182
|
+
elif mime_type == "application/pdf":
|
|
183
|
+
tool_result_content = [
|
|
184
|
+
{
|
|
185
|
+
"type": "document",
|
|
186
|
+
"title": response.name,
|
|
187
|
+
"source": {
|
|
188
|
+
"type": "base64",
|
|
189
|
+
"media_type": "application/pdf",
|
|
190
|
+
"data": base64.b64encode(response.data).decode("utf-8"),
|
|
191
|
+
},
|
|
192
|
+
}
|
|
193
|
+
]
|
|
194
|
+
|
|
195
|
+
else:
|
|
196
|
+
output = await self.to_plain_text(room=room, response=response)
|
|
197
|
+
tool_result_content = [_text_block(output)]
|
|
198
|
+
|
|
199
|
+
else:
|
|
200
|
+
output = await self.to_plain_text(room=room, response=response)
|
|
201
|
+
tool_result_content = [_text_block(output)]
|
|
202
|
+
|
|
203
|
+
except Exception as ex:
|
|
204
|
+
logger.error("unable to process tool call results", exc_info=ex)
|
|
205
|
+
tool_result_content = [_text_block(f"Error: {ex}")]
|
|
206
|
+
|
|
207
|
+
message = {
|
|
208
|
+
"role": "user",
|
|
209
|
+
"content": [
|
|
210
|
+
{
|
|
211
|
+
"type": "tool_result",
|
|
212
|
+
"tool_use_id": tool_use_id,
|
|
213
|
+
"content": tool_result_content,
|
|
214
|
+
}
|
|
215
|
+
],
|
|
216
|
+
}
|
|
217
|
+
|
|
218
|
+
room.developer.log_nowait(
|
|
219
|
+
type="llm.message",
|
|
220
|
+
data={
|
|
221
|
+
"context": context.id,
|
|
222
|
+
"participant_id": room.local_participant.id,
|
|
223
|
+
"participant_name": room.local_participant.get_attribute("name"),
|
|
224
|
+
"message": message,
|
|
225
|
+
},
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
return [message]
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
class AnthropicMessagesAdapter(LLMAdapter[dict]):
|
|
232
|
+
def __init__(
|
|
233
|
+
self,
|
|
234
|
+
model: str = os.getenv("ANTHROPIC_MODEL", "claude-3-5-sonnet-latest"),
|
|
235
|
+
max_tokens: int = int(os.getenv("ANTHROPIC_MAX_TOKENS", "1024")),
|
|
236
|
+
client: Optional[Any] = None,
|
|
237
|
+
message_options: Optional[dict] = None,
|
|
238
|
+
provider: str = "anthropic",
|
|
239
|
+
log_requests: bool = False,
|
|
240
|
+
):
|
|
241
|
+
self._model = model
|
|
242
|
+
self._max_tokens = max_tokens
|
|
243
|
+
self._client = client
|
|
244
|
+
self._message_options = message_options or {}
|
|
245
|
+
self._provider = provider
|
|
246
|
+
self._log_requests = log_requests
|
|
247
|
+
|
|
248
|
+
def default_model(self) -> str:
|
|
249
|
+
return self._model
|
|
250
|
+
|
|
251
|
+
def create_chat_context(self) -> AgentChatContext:
|
|
252
|
+
return AgentChatContext(system_role=None)
|
|
253
|
+
|
|
254
|
+
def get_anthropic_client(self, *, room: RoomClient) -> Any:
|
|
255
|
+
if self._client is not None:
|
|
256
|
+
return self._client
|
|
257
|
+
http_client = get_logging_httpx_client() if self._log_requests else None
|
|
258
|
+
return get_client(room=room, http_client=http_client)
|
|
259
|
+
|
|
260
|
+
def _convert_messages(
|
|
261
|
+
self, *, context: AgentChatContext
|
|
262
|
+
) -> tuple[list[dict], Optional[str]]:
|
|
263
|
+
system = context.get_system_instructions()
|
|
264
|
+
|
|
265
|
+
def as_blocks(role: str, content: Any) -> dict:
|
|
266
|
+
if isinstance(content, str):
|
|
267
|
+
return {"role": role, "content": [_text_block(content)]}
|
|
268
|
+
if isinstance(content, list):
|
|
269
|
+
return {"role": role, "content": content}
|
|
270
|
+
return {"role": role, "content": [_text_block(str(content))]}
|
|
271
|
+
|
|
272
|
+
messages: list[dict] = []
|
|
273
|
+
pending_tool_use_ids: set[str] = set()
|
|
274
|
+
|
|
275
|
+
for m in context.messages:
|
|
276
|
+
role = m.get("role")
|
|
277
|
+
if role not in {"user", "assistant"}:
|
|
278
|
+
continue
|
|
279
|
+
|
|
280
|
+
msg = as_blocks(role, m.get("content"))
|
|
281
|
+
|
|
282
|
+
# Anthropic requires that tool_result blocks appear in the *immediately next*
|
|
283
|
+
# user message after an assistant tool_use.
|
|
284
|
+
if pending_tool_use_ids:
|
|
285
|
+
if role == "assistant":
|
|
286
|
+
# Drop any assistant chatter that appears between tool_use and tool_result.
|
|
287
|
+
logger.warning(
|
|
288
|
+
"dropping assistant message between tool_use and tool_result"
|
|
289
|
+
)
|
|
290
|
+
continue
|
|
291
|
+
|
|
292
|
+
# role == user
|
|
293
|
+
content_blocks = msg.get("content") or []
|
|
294
|
+
tool_results = [
|
|
295
|
+
b
|
|
296
|
+
for b in content_blocks
|
|
297
|
+
if isinstance(b, dict) and b.get("type") == "tool_result"
|
|
298
|
+
]
|
|
299
|
+
tool_result_ids = {
|
|
300
|
+
b.get("tool_use_id") for b in tool_results if b.get("tool_use_id")
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
if not pending_tool_use_ids.issubset(tool_result_ids):
|
|
304
|
+
# If we can't satisfy the ordering contract, it's better to fail early
|
|
305
|
+
# with a clear error than to send an invalid request.
|
|
306
|
+
raise RoomException(
|
|
307
|
+
"invalid transcript: tool_use blocks must be followed by a user message "
|
|
308
|
+
"containing tool_result blocks for all tool_use ids"
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
pending_tool_use_ids.clear()
|
|
312
|
+
|
|
313
|
+
# Track tool_use ids introduced by assistant messages.
|
|
314
|
+
if role == "assistant":
|
|
315
|
+
content_blocks = msg.get("content") or []
|
|
316
|
+
for b in content_blocks:
|
|
317
|
+
if isinstance(b, dict) and b.get("type") == "tool_use":
|
|
318
|
+
tool_id = b.get("id")
|
|
319
|
+
if tool_id:
|
|
320
|
+
pending_tool_use_ids.add(tool_id)
|
|
321
|
+
|
|
322
|
+
messages.append(msg)
|
|
323
|
+
|
|
324
|
+
return messages, system
|
|
325
|
+
|
|
326
|
+
def _messages_api(self, *, client: Any, request: dict) -> Any:
|
|
327
|
+
# The MCP connector requires `client.beta.messages.*`.
|
|
328
|
+
if request.get("betas") is not None:
|
|
329
|
+
return client.beta.messages
|
|
330
|
+
return client.messages
|
|
331
|
+
|
|
332
|
+
async def _create_with_optional_headers(self, *, client: Any, request: dict) -> Any:
|
|
333
|
+
api = self._messages_api(client=client, request=request)
|
|
334
|
+
try:
|
|
335
|
+
return await api.create(**request)
|
|
336
|
+
except TypeError:
|
|
337
|
+
request = dict(request)
|
|
338
|
+
request.pop("extra_headers", None)
|
|
339
|
+
return await api.create(**request)
|
|
340
|
+
|
|
341
|
+
async def _stream_message(
|
|
342
|
+
self,
|
|
343
|
+
*,
|
|
344
|
+
client: Any,
|
|
345
|
+
request: dict,
|
|
346
|
+
event_handler: Callable[[dict], None],
|
|
347
|
+
) -> Any:
|
|
348
|
+
"""Stream text deltas and return the final message.
|
|
349
|
+
|
|
350
|
+
Uses the official Anthropic SDK streaming helper:
|
|
351
|
+
|
|
352
|
+
```py
|
|
353
|
+
async with client.messages.stream(...) as stream:
|
|
354
|
+
async for text in stream.text_stream:
|
|
355
|
+
...
|
|
356
|
+
message = await stream.get_final_message()
|
|
357
|
+
```
|
|
358
|
+
"""
|
|
359
|
+
|
|
360
|
+
api = self._messages_api(client=client, request=request)
|
|
361
|
+
stream_mgr = api.stream(**request)
|
|
362
|
+
|
|
363
|
+
async with stream_mgr as stream:
|
|
364
|
+
async for event in stream:
|
|
365
|
+
event_handler({"type": event.type, "event": _as_jsonable(event)})
|
|
366
|
+
|
|
367
|
+
final_message = await stream.get_final_message()
|
|
368
|
+
event_handler(
|
|
369
|
+
{"type": "message.completed", "message": _as_jsonable(final_message)}
|
|
370
|
+
)
|
|
371
|
+
return final_message
|
|
372
|
+
|
|
373
|
+
def _split_toolkits(
|
|
374
|
+
self, *, toolkits: list[Toolkit]
|
|
375
|
+
) -> tuple[list[Toolkit], list[MCPConnectorTool]]:
|
|
376
|
+
"""Split toolkits into executable tools and request middleware tools."""
|
|
377
|
+
|
|
378
|
+
executable_toolkits: list[Toolkit] = []
|
|
379
|
+
middleware: list[MCPConnectorTool] = []
|
|
380
|
+
|
|
381
|
+
for toolkit in toolkits:
|
|
382
|
+
executable_tools: list[Tool] = []
|
|
383
|
+
|
|
384
|
+
for t in toolkit.tools:
|
|
385
|
+
if isinstance(t, MCPConnectorTool):
|
|
386
|
+
middleware.append(t)
|
|
387
|
+
elif isinstance(t, Tool):
|
|
388
|
+
executable_tools.append(t)
|
|
389
|
+
elif isinstance(t, BaseTool):
|
|
390
|
+
# Non-executable tool types are ignored.
|
|
391
|
+
continue
|
|
392
|
+
else:
|
|
393
|
+
raise RoomException(f"unsupported tool type {type(t)}")
|
|
394
|
+
|
|
395
|
+
if executable_tools:
|
|
396
|
+
executable_toolkits.append(
|
|
397
|
+
Toolkit(
|
|
398
|
+
name=toolkit.name,
|
|
399
|
+
title=getattr(toolkit, "title", None),
|
|
400
|
+
description=getattr(toolkit, "description", None),
|
|
401
|
+
thumbnail_url=getattr(toolkit, "thumbnail_url", None),
|
|
402
|
+
rules=getattr(toolkit, "rules", []),
|
|
403
|
+
tools=executable_tools,
|
|
404
|
+
)
|
|
405
|
+
)
|
|
406
|
+
|
|
407
|
+
return executable_toolkits, middleware
|
|
408
|
+
|
|
409
|
+
def _apply_request_middleware(
|
|
410
|
+
self, *, request: dict, middleware: list[MCPConnectorTool]
|
|
411
|
+
) -> dict:
|
|
412
|
+
for m in middleware:
|
|
413
|
+
m.apply(request=request)
|
|
414
|
+
return request
|
|
415
|
+
|
|
416
|
+
async def next(
|
|
417
|
+
self,
|
|
418
|
+
*,
|
|
419
|
+
context: AgentChatContext,
|
|
420
|
+
room: RoomClient,
|
|
421
|
+
toolkits: list[Toolkit],
|
|
422
|
+
tool_adapter: Optional[ToolResponseAdapter] = None,
|
|
423
|
+
output_schema: Optional[dict] = None,
|
|
424
|
+
event_handler: Optional[Callable[[dict], None]] = None,
|
|
425
|
+
model: Optional[str] = None,
|
|
426
|
+
on_behalf_of: Optional[RemoteParticipant] = None,
|
|
427
|
+
) -> Any:
|
|
428
|
+
if model is None:
|
|
429
|
+
model = self.default_model()
|
|
430
|
+
|
|
431
|
+
if tool_adapter is None:
|
|
432
|
+
tool_adapter = AnthropicMessagesToolResponseAdapter()
|
|
433
|
+
|
|
434
|
+
client = self.get_anthropic_client(room=room)
|
|
435
|
+
|
|
436
|
+
validation_attempts = 0
|
|
437
|
+
|
|
438
|
+
try:
|
|
439
|
+
while True:
|
|
440
|
+
executable_toolkits, middleware = self._split_toolkits(
|
|
441
|
+
toolkits=toolkits
|
|
442
|
+
)
|
|
443
|
+
tool_bundle = MessagesToolBundle(toolkits=executable_toolkits)
|
|
444
|
+
|
|
445
|
+
messages, system = self._convert_messages(context=context)
|
|
446
|
+
|
|
447
|
+
if output_schema is not None:
|
|
448
|
+
schema_hint = json.dumps(output_schema)
|
|
449
|
+
schema_system = (
|
|
450
|
+
"Return ONLY valid JSON that matches this JSON Schema. "
|
|
451
|
+
"Do not wrap in markdown. Schema: " + schema_hint
|
|
452
|
+
)
|
|
453
|
+
system = (
|
|
454
|
+
(system + "\n" + schema_system) if system else schema_system
|
|
455
|
+
)
|
|
456
|
+
|
|
457
|
+
extra_headers = {}
|
|
458
|
+
if on_behalf_of is not None:
|
|
459
|
+
extra_headers["Meshagent-On-Behalf-Of"] = (
|
|
460
|
+
on_behalf_of.get_attribute("name")
|
|
461
|
+
)
|
|
462
|
+
|
|
463
|
+
message_options = dict(self._message_options or {})
|
|
464
|
+
|
|
465
|
+
tools_list: list[dict] = tool_bundle.to_json() or []
|
|
466
|
+
extra_tools = message_options.pop("tools", None)
|
|
467
|
+
if isinstance(extra_tools, list):
|
|
468
|
+
tools_list.extend(extra_tools)
|
|
469
|
+
|
|
470
|
+
request = {
|
|
471
|
+
"model": model,
|
|
472
|
+
"max_tokens": self._max_tokens,
|
|
473
|
+
"messages": messages,
|
|
474
|
+
"system": system,
|
|
475
|
+
"tools": tools_list,
|
|
476
|
+
"extra_headers": extra_headers or None,
|
|
477
|
+
**message_options,
|
|
478
|
+
}
|
|
479
|
+
|
|
480
|
+
request = self._apply_request_middleware(
|
|
481
|
+
request=request,
|
|
482
|
+
middleware=middleware,
|
|
483
|
+
)
|
|
484
|
+
|
|
485
|
+
# Normalize empty lists to None for Anthropic.
|
|
486
|
+
if (
|
|
487
|
+
isinstance(request.get("tools"), list)
|
|
488
|
+
and len(request["tools"]) == 0
|
|
489
|
+
):
|
|
490
|
+
request["tools"] = None
|
|
491
|
+
if (
|
|
492
|
+
isinstance(request.get("mcp_servers"), list)
|
|
493
|
+
and len(request["mcp_servers"]) == 0
|
|
494
|
+
):
|
|
495
|
+
request["mcp_servers"] = None
|
|
496
|
+
if (
|
|
497
|
+
isinstance(request.get("betas"), list)
|
|
498
|
+
and len(request["betas"]) == 0
|
|
499
|
+
):
|
|
500
|
+
request["betas"] = None
|
|
501
|
+
|
|
502
|
+
# remove None fields
|
|
503
|
+
request = {k: v for k, v in request.items() if v is not None}
|
|
504
|
+
|
|
505
|
+
logger.info("requesting response from anthropic with model: %s", model)
|
|
506
|
+
|
|
507
|
+
if event_handler is not None:
|
|
508
|
+
final_message = await self._stream_message(
|
|
509
|
+
client=client,
|
|
510
|
+
request=request,
|
|
511
|
+
event_handler=event_handler,
|
|
512
|
+
)
|
|
513
|
+
response_dict = _as_jsonable(final_message)
|
|
514
|
+
else:
|
|
515
|
+
response = await self._create_with_optional_headers(
|
|
516
|
+
client=client,
|
|
517
|
+
request=request,
|
|
518
|
+
)
|
|
519
|
+
response_dict = _as_jsonable(response)
|
|
520
|
+
|
|
521
|
+
content_blocks = []
|
|
522
|
+
raw_content = response_dict.get("content")
|
|
523
|
+
if isinstance(raw_content, list):
|
|
524
|
+
content_blocks = raw_content
|
|
525
|
+
|
|
526
|
+
tool_uses = [b for b in content_blocks if b.get("type") == "tool_use"]
|
|
527
|
+
|
|
528
|
+
# Keep the assistant message in context.
|
|
529
|
+
assistant_message = {"role": "assistant", "content": content_blocks}
|
|
530
|
+
context.messages.append(assistant_message)
|
|
531
|
+
|
|
532
|
+
if tool_uses:
|
|
533
|
+
tasks = []
|
|
534
|
+
|
|
535
|
+
async def do_tool(tool_use: dict) -> list[dict]:
|
|
536
|
+
tool_context = ToolContext(
|
|
537
|
+
room=room,
|
|
538
|
+
caller=room.local_participant,
|
|
539
|
+
on_behalf_of=on_behalf_of,
|
|
540
|
+
caller_context={"chat": context.to_json()},
|
|
541
|
+
)
|
|
542
|
+
try:
|
|
543
|
+
tool_response = await tool_bundle.execute(
|
|
544
|
+
context=tool_context,
|
|
545
|
+
tool_use=tool_use,
|
|
546
|
+
)
|
|
547
|
+
return await tool_adapter.create_messages(
|
|
548
|
+
context=context,
|
|
549
|
+
tool_call=tool_use,
|
|
550
|
+
room=room,
|
|
551
|
+
response=tool_response,
|
|
552
|
+
)
|
|
553
|
+
except Exception as ex:
|
|
554
|
+
tool_result_content = [_text_block(f"Error: {ex}")]
|
|
555
|
+
message = {
|
|
556
|
+
"role": "user",
|
|
557
|
+
"content": [
|
|
558
|
+
{
|
|
559
|
+
"type": "tool_result",
|
|
560
|
+
"tool_use_id": tool_use.get("id"),
|
|
561
|
+
"content": tool_result_content,
|
|
562
|
+
}
|
|
563
|
+
],
|
|
564
|
+
}
|
|
565
|
+
return [message]
|
|
566
|
+
|
|
567
|
+
for tool_use in tool_uses:
|
|
568
|
+
tasks.append(asyncio.create_task(do_tool(tool_use)))
|
|
569
|
+
|
|
570
|
+
results = await asyncio.gather(*tasks)
|
|
571
|
+
|
|
572
|
+
# Anthropic requires tool_result blocks for *all* tool_use ids to appear in the
|
|
573
|
+
# *immediately next* user message after the assistant tool_use message.
|
|
574
|
+
tool_result_blocks: list[dict] = []
|
|
575
|
+
trailing_messages: list[dict] = []
|
|
576
|
+
|
|
577
|
+
for msgs in results:
|
|
578
|
+
for msg in msgs:
|
|
579
|
+
if (
|
|
580
|
+
isinstance(msg, dict)
|
|
581
|
+
and msg.get("role") == "user"
|
|
582
|
+
and isinstance(msg.get("content"), list)
|
|
583
|
+
and all(
|
|
584
|
+
isinstance(b, dict)
|
|
585
|
+
and b.get("type") == "tool_result"
|
|
586
|
+
for b in msg["content"]
|
|
587
|
+
)
|
|
588
|
+
):
|
|
589
|
+
tool_result_blocks.extend(msg["content"])
|
|
590
|
+
else:
|
|
591
|
+
trailing_messages.append(msg)
|
|
592
|
+
|
|
593
|
+
if tool_result_blocks:
|
|
594
|
+
context.messages.append(
|
|
595
|
+
{"role": "user", "content": tool_result_blocks}
|
|
596
|
+
)
|
|
597
|
+
|
|
598
|
+
for msg in trailing_messages:
|
|
599
|
+
context.messages.append(msg)
|
|
600
|
+
|
|
601
|
+
continue
|
|
602
|
+
|
|
603
|
+
# no tool calls; return final content
|
|
604
|
+
text = "".join(
|
|
605
|
+
[
|
|
606
|
+
b.get("text", "")
|
|
607
|
+
for b in content_blocks
|
|
608
|
+
if b.get("type") == "text"
|
|
609
|
+
]
|
|
610
|
+
)
|
|
611
|
+
|
|
612
|
+
if output_schema is None:
|
|
613
|
+
return text
|
|
614
|
+
|
|
615
|
+
# Schema-mode: parse and validate JSON.
|
|
616
|
+
validation_attempts += 1
|
|
617
|
+
try:
|
|
618
|
+
parsed = json.loads(text)
|
|
619
|
+
self.validate(response=parsed, output_schema=output_schema)
|
|
620
|
+
return parsed
|
|
621
|
+
except Exception as e:
|
|
622
|
+
if validation_attempts >= 3:
|
|
623
|
+
raise RoomException(
|
|
624
|
+
f"Invalid JSON response from Anthropic: {e}"
|
|
625
|
+
)
|
|
626
|
+
context.messages.append(
|
|
627
|
+
{
|
|
628
|
+
"role": "user",
|
|
629
|
+
"content": (
|
|
630
|
+
"The previous response did not match the required JSON schema. "
|
|
631
|
+
f"Error: {e}. Please try again and return only valid JSON."
|
|
632
|
+
),
|
|
633
|
+
}
|
|
634
|
+
)
|
|
635
|
+
|
|
636
|
+
except APIStatusError as e:
|
|
637
|
+
raise RoomException(f"Error from Anthropic: {e}")
|