meshagent-anthropic 0.23.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- meshagent/anthropic/__init__.py +25 -0
- meshagent/anthropic/mcp.py +103 -0
- meshagent/anthropic/messages_adapter.py +637 -0
- meshagent/anthropic/openai_responses_stream_adapter.py +400 -0
- meshagent/anthropic/proxy/__init__.py +3 -0
- meshagent/anthropic/proxy/proxy.py +90 -0
- meshagent/anthropic/tests/anthropic_live_test.py +156 -0
- meshagent/anthropic/tests/mcp_test.py +64 -0
- meshagent/anthropic/tests/messages_adapter_test.py +179 -0
- meshagent/anthropic/tests/openai_responses_stream_adapter_test.py +102 -0
- meshagent/anthropic/version.py +1 -0
- meshagent_anthropic-0.23.0.dist-info/METADATA +44 -0
- meshagent_anthropic-0.23.0.dist-info/RECORD +16 -0
- meshagent_anthropic-0.23.0.dist-info/WHEEL +5 -0
- meshagent_anthropic-0.23.0.dist-info/licenses/LICENSE +201 -0
- meshagent_anthropic-0.23.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,400 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from typing import Any, Callable, Optional
|
|
6
|
+
|
|
7
|
+
from meshagent.api import RoomClient, RemoteParticipant
|
|
8
|
+
from meshagent.agents.agent import AgentChatContext
|
|
9
|
+
from meshagent.tools import Toolkit
|
|
10
|
+
|
|
11
|
+
from .messages_adapter import AnthropicMessagesAdapter
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class _OutputBlockState:
|
|
16
|
+
kind: str # "message" | "function_call" | "reasoning"
|
|
17
|
+
item_id: str
|
|
18
|
+
output_index: int
|
|
19
|
+
content_index: int
|
|
20
|
+
name: Optional[str] = None
|
|
21
|
+
call_id: Optional[str] = None
|
|
22
|
+
text: str = ""
|
|
23
|
+
arguments: str = ""
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class AnthropicOpenAIResponsesStreamAdapter(AnthropicMessagesAdapter):
|
|
27
|
+
"""Anthropic adapter that emits OpenAI Responses-style stream events.
|
|
28
|
+
|
|
29
|
+
This is useful when you have downstream consumers that already understand the
|
|
30
|
+
OpenAI Responses streaming event schema (e.g. UI code), but want to run the
|
|
31
|
+
underlying inference on Anthropic.
|
|
32
|
+
|
|
33
|
+
Notes:
|
|
34
|
+
- This adapter only affects the *streaming* event shape.
|
|
35
|
+
- Tool execution still uses MeshAgent toolkits and the Anthropic tool loop.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
async def _stream_message(
|
|
39
|
+
self,
|
|
40
|
+
*,
|
|
41
|
+
client: Any,
|
|
42
|
+
request: dict,
|
|
43
|
+
event_handler: Callable[[dict], None],
|
|
44
|
+
) -> Any:
|
|
45
|
+
seq = 0
|
|
46
|
+
response_id: Optional[str] = None
|
|
47
|
+
response_model: str = str(request.get("model"))
|
|
48
|
+
|
|
49
|
+
output: list[dict] = []
|
|
50
|
+
blocks: dict[int, _OutputBlockState] = {}
|
|
51
|
+
|
|
52
|
+
created_at = int(time.time())
|
|
53
|
+
|
|
54
|
+
def emit(payload: dict) -> None:
|
|
55
|
+
nonlocal seq
|
|
56
|
+
if "sequence_number" not in payload:
|
|
57
|
+
payload["sequence_number"] = seq
|
|
58
|
+
seq += 1
|
|
59
|
+
event_handler(payload)
|
|
60
|
+
|
|
61
|
+
def output_message_item(*, item_id: str, status: str, text: str) -> dict:
|
|
62
|
+
return {
|
|
63
|
+
"type": "message",
|
|
64
|
+
"id": item_id,
|
|
65
|
+
"role": "assistant",
|
|
66
|
+
"status": status,
|
|
67
|
+
"content": [
|
|
68
|
+
{
|
|
69
|
+
"type": "output_text",
|
|
70
|
+
"text": text,
|
|
71
|
+
"annotations": [],
|
|
72
|
+
"logprobs": None,
|
|
73
|
+
}
|
|
74
|
+
],
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
def output_function_call_item(
|
|
78
|
+
*,
|
|
79
|
+
item_id: str,
|
|
80
|
+
call_id: str,
|
|
81
|
+
name: str,
|
|
82
|
+
status: Optional[str],
|
|
83
|
+
arguments: str,
|
|
84
|
+
) -> dict:
|
|
85
|
+
return {
|
|
86
|
+
"type": "function_call",
|
|
87
|
+
"id": item_id,
|
|
88
|
+
"call_id": call_id,
|
|
89
|
+
"name": name,
|
|
90
|
+
"arguments": arguments,
|
|
91
|
+
"status": status,
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
def output_reasoning_item(*, item_id: str, status: str, text: str) -> dict:
|
|
95
|
+
return {
|
|
96
|
+
"type": "reasoning",
|
|
97
|
+
"id": item_id,
|
|
98
|
+
"status": status,
|
|
99
|
+
"summary": [],
|
|
100
|
+
"content": [{"type": "reasoning_text", "text": text}],
|
|
101
|
+
"encrypted_content": None,
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
response_obj: dict = {
|
|
105
|
+
"id": None,
|
|
106
|
+
"object": "response",
|
|
107
|
+
"created_at": created_at,
|
|
108
|
+
"model": response_model,
|
|
109
|
+
"output": output,
|
|
110
|
+
"status": "in_progress",
|
|
111
|
+
"error": None,
|
|
112
|
+
"usage": None,
|
|
113
|
+
}
|
|
114
|
+
|
|
115
|
+
async with client.messages.stream(**request) as stream:
|
|
116
|
+
async for event in stream:
|
|
117
|
+
data = event.model_dump(mode="json")
|
|
118
|
+
etype = data.get("type")
|
|
119
|
+
|
|
120
|
+
if etype == "message_start":
|
|
121
|
+
message = data.get("message") or {}
|
|
122
|
+
response_id = message.get("id") or response_id
|
|
123
|
+
response_obj["id"] = response_id
|
|
124
|
+
if message.get("model") is not None:
|
|
125
|
+
response_obj["model"] = message.get("model")
|
|
126
|
+
|
|
127
|
+
emit({"type": "response.created", "response": dict(response_obj)})
|
|
128
|
+
|
|
129
|
+
elif etype == "content_block_start":
|
|
130
|
+
idx = int(data.get("index"))
|
|
131
|
+
block = data.get("content_block") or {}
|
|
132
|
+
btype = block.get("type")
|
|
133
|
+
|
|
134
|
+
output_index = len(output)
|
|
135
|
+
base_item_id = response_id or "anthropic"
|
|
136
|
+
item_id = f"{base_item_id}_out_{output_index}"
|
|
137
|
+
|
|
138
|
+
if btype == "text":
|
|
139
|
+
item = output_message_item(
|
|
140
|
+
item_id=item_id, status="in_progress", text=""
|
|
141
|
+
)
|
|
142
|
+
output.append(item)
|
|
143
|
+
blocks[idx] = _OutputBlockState(
|
|
144
|
+
kind="message",
|
|
145
|
+
item_id=item_id,
|
|
146
|
+
output_index=output_index,
|
|
147
|
+
content_index=0,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
elif btype == "tool_use":
|
|
151
|
+
call_id = str(block.get("id"))
|
|
152
|
+
name = str(block.get("name"))
|
|
153
|
+
item = output_function_call_item(
|
|
154
|
+
item_id=item_id,
|
|
155
|
+
call_id=call_id,
|
|
156
|
+
name=name,
|
|
157
|
+
status="in_progress",
|
|
158
|
+
arguments="",
|
|
159
|
+
)
|
|
160
|
+
output.append(item)
|
|
161
|
+
blocks[idx] = _OutputBlockState(
|
|
162
|
+
kind="function_call",
|
|
163
|
+
item_id=item_id,
|
|
164
|
+
output_index=output_index,
|
|
165
|
+
content_index=0,
|
|
166
|
+
name=name,
|
|
167
|
+
call_id=call_id,
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
elif btype == "thinking":
|
|
171
|
+
item = output_reasoning_item(
|
|
172
|
+
item_id=item_id, status="in_progress", text=""
|
|
173
|
+
)
|
|
174
|
+
output.append(item)
|
|
175
|
+
blocks[idx] = _OutputBlockState(
|
|
176
|
+
kind="reasoning",
|
|
177
|
+
item_id=item_id,
|
|
178
|
+
output_index=output_index,
|
|
179
|
+
content_index=0,
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
else:
|
|
183
|
+
# Unknown block type: ignore for OpenAI compatibility.
|
|
184
|
+
continue
|
|
185
|
+
|
|
186
|
+
emit(
|
|
187
|
+
{
|
|
188
|
+
"type": "response.output_item.added",
|
|
189
|
+
"output_index": output_index,
|
|
190
|
+
"item": output[output_index],
|
|
191
|
+
}
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
# OpenAI-style content part events (text + reasoning content).
|
|
195
|
+
state = blocks.get(idx)
|
|
196
|
+
if state is not None and state.kind in {"message", "reasoning"}:
|
|
197
|
+
emit(
|
|
198
|
+
{
|
|
199
|
+
"type": "response.content_part.added",
|
|
200
|
+
"output_index": state.output_index,
|
|
201
|
+
"item_id": state.item_id,
|
|
202
|
+
"content_index": state.content_index,
|
|
203
|
+
"part": output[state.output_index]["content"][
|
|
204
|
+
state.content_index
|
|
205
|
+
],
|
|
206
|
+
}
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
elif etype == "content_block_delta":
|
|
210
|
+
idx = int(data.get("index"))
|
|
211
|
+
state = blocks.get(idx)
|
|
212
|
+
if state is None:
|
|
213
|
+
continue
|
|
214
|
+
|
|
215
|
+
delta = data.get("delta") or {}
|
|
216
|
+
dtype = delta.get("type")
|
|
217
|
+
|
|
218
|
+
if dtype == "text_delta" and state.kind == "message":
|
|
219
|
+
piece = str(delta.get("text") or "")
|
|
220
|
+
state.text += piece
|
|
221
|
+
output[state.output_index]["content"][0]["text"] = state.text
|
|
222
|
+
|
|
223
|
+
emit(
|
|
224
|
+
{
|
|
225
|
+
"type": "response.output_text.delta",
|
|
226
|
+
"output_index": state.output_index,
|
|
227
|
+
"item_id": state.item_id,
|
|
228
|
+
"content_index": state.content_index,
|
|
229
|
+
"delta": piece,
|
|
230
|
+
"logprobs": None,
|
|
231
|
+
}
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
elif dtype == "input_json_delta" and state.kind == "function_call":
|
|
235
|
+
piece = str(delta.get("partial_json") or "")
|
|
236
|
+
state.arguments += piece
|
|
237
|
+
output[state.output_index]["arguments"] = state.arguments
|
|
238
|
+
|
|
239
|
+
emit(
|
|
240
|
+
{
|
|
241
|
+
"type": "response.function_call_arguments.delta",
|
|
242
|
+
"output_index": state.output_index,
|
|
243
|
+
"item_id": state.item_id,
|
|
244
|
+
"delta": piece,
|
|
245
|
+
}
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
elif dtype == "thinking_delta" and state.kind == "reasoning":
|
|
249
|
+
piece = str(delta.get("thinking") or "")
|
|
250
|
+
state.text += piece
|
|
251
|
+
output[state.output_index]["content"][0]["text"] = state.text
|
|
252
|
+
|
|
253
|
+
emit(
|
|
254
|
+
{
|
|
255
|
+
"type": "response.reasoning_text.delta",
|
|
256
|
+
"output_index": state.output_index,
|
|
257
|
+
"item_id": state.item_id,
|
|
258
|
+
"content_index": state.content_index,
|
|
259
|
+
"delta": piece,
|
|
260
|
+
}
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
else:
|
|
264
|
+
# Ignore signature_delta and unknown deltas.
|
|
265
|
+
continue
|
|
266
|
+
|
|
267
|
+
elif etype == "content_block_stop":
|
|
268
|
+
idx = int(data.get("index"))
|
|
269
|
+
state = blocks.get(idx)
|
|
270
|
+
if state is None:
|
|
271
|
+
continue
|
|
272
|
+
|
|
273
|
+
if state.kind == "message":
|
|
274
|
+
emit(
|
|
275
|
+
{
|
|
276
|
+
"type": "response.output_text.done",
|
|
277
|
+
"output_index": state.output_index,
|
|
278
|
+
"item_id": state.item_id,
|
|
279
|
+
"content_index": state.content_index,
|
|
280
|
+
"text": state.text,
|
|
281
|
+
"logprobs": None,
|
|
282
|
+
}
|
|
283
|
+
)
|
|
284
|
+
output[state.output_index] = output_message_item(
|
|
285
|
+
item_id=state.item_id, status="completed", text=state.text
|
|
286
|
+
)
|
|
287
|
+
emit(
|
|
288
|
+
{
|
|
289
|
+
"type": "response.content_part.done",
|
|
290
|
+
"output_index": state.output_index,
|
|
291
|
+
"item_id": state.item_id,
|
|
292
|
+
"content_index": state.content_index,
|
|
293
|
+
"part": output[state.output_index]["content"][
|
|
294
|
+
state.content_index
|
|
295
|
+
],
|
|
296
|
+
}
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
elif state.kind == "function_call":
|
|
300
|
+
emit(
|
|
301
|
+
{
|
|
302
|
+
"type": "response.function_call_arguments.done",
|
|
303
|
+
"output_index": state.output_index,
|
|
304
|
+
"item_id": state.item_id,
|
|
305
|
+
"name": state.name,
|
|
306
|
+
"arguments": state.arguments,
|
|
307
|
+
}
|
|
308
|
+
)
|
|
309
|
+
output[state.output_index] = output_function_call_item(
|
|
310
|
+
item_id=state.item_id,
|
|
311
|
+
call_id=str(state.call_id),
|
|
312
|
+
name=str(state.name),
|
|
313
|
+
status="completed",
|
|
314
|
+
arguments=state.arguments,
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
elif state.kind == "reasoning":
|
|
318
|
+
emit(
|
|
319
|
+
{
|
|
320
|
+
"type": "response.reasoning_text.done",
|
|
321
|
+
"output_index": state.output_index,
|
|
322
|
+
"item_id": state.item_id,
|
|
323
|
+
"content_index": state.content_index,
|
|
324
|
+
"text": state.text,
|
|
325
|
+
}
|
|
326
|
+
)
|
|
327
|
+
output[state.output_index] = output_reasoning_item(
|
|
328
|
+
item_id=state.item_id, status="completed", text=state.text
|
|
329
|
+
)
|
|
330
|
+
emit(
|
|
331
|
+
{
|
|
332
|
+
"type": "response.content_part.done",
|
|
333
|
+
"output_index": state.output_index,
|
|
334
|
+
"item_id": state.item_id,
|
|
335
|
+
"content_index": state.content_index,
|
|
336
|
+
"part": output[state.output_index]["content"][
|
|
337
|
+
state.content_index
|
|
338
|
+
],
|
|
339
|
+
}
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
emit(
|
|
343
|
+
{
|
|
344
|
+
"type": "response.output_item.done",
|
|
345
|
+
"output_index": state.output_index,
|
|
346
|
+
"item": output[state.output_index],
|
|
347
|
+
}
|
|
348
|
+
)
|
|
349
|
+
|
|
350
|
+
elif etype == "message_stop":
|
|
351
|
+
# Defer `response.completed` until after we have final usage.
|
|
352
|
+
pass
|
|
353
|
+
|
|
354
|
+
else:
|
|
355
|
+
# ping, message_delta, etc.
|
|
356
|
+
continue
|
|
357
|
+
|
|
358
|
+
final_message = await stream.get_final_message()
|
|
359
|
+
final_dict = final_message.model_dump(mode="json")
|
|
360
|
+
usage = final_dict.get("usage") or {}
|
|
361
|
+
input_tokens = usage.get("input_tokens")
|
|
362
|
+
output_tokens = usage.get("output_tokens")
|
|
363
|
+
if isinstance(input_tokens, int) and isinstance(output_tokens, int):
|
|
364
|
+
response_obj["usage"] = {
|
|
365
|
+
"input_tokens": input_tokens,
|
|
366
|
+
"output_tokens": output_tokens,
|
|
367
|
+
"total_tokens": input_tokens + output_tokens,
|
|
368
|
+
"input_tokens_details": None,
|
|
369
|
+
"output_tokens_details": None,
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
response_obj["status"] = "completed"
|
|
373
|
+
response_obj["output"] = output
|
|
374
|
+
|
|
375
|
+
emit({"type": "response.completed", "response": dict(response_obj)})
|
|
376
|
+
return final_message
|
|
377
|
+
|
|
378
|
+
async def next(
|
|
379
|
+
self,
|
|
380
|
+
*,
|
|
381
|
+
context: AgentChatContext,
|
|
382
|
+
room: RoomClient,
|
|
383
|
+
toolkits: list[Toolkit],
|
|
384
|
+
tool_adapter: Any = None,
|
|
385
|
+
output_schema: Optional[dict] = None,
|
|
386
|
+
event_handler: Optional[Callable[[dict], None]] = None,
|
|
387
|
+
model: Optional[str] = None,
|
|
388
|
+
on_behalf_of: Optional[RemoteParticipant] = None,
|
|
389
|
+
) -> Any:
|
|
390
|
+
# Keep the same behavior; only streaming shape changes.
|
|
391
|
+
return await super().next(
|
|
392
|
+
context=context,
|
|
393
|
+
room=room,
|
|
394
|
+
toolkits=toolkits,
|
|
395
|
+
tool_adapter=tool_adapter,
|
|
396
|
+
output_schema=output_schema,
|
|
397
|
+
event_handler=event_handler,
|
|
398
|
+
model=model,
|
|
399
|
+
on_behalf_of=on_behalf_of,
|
|
400
|
+
)
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
from meshagent.api import RoomClient
|
|
2
|
+
import logging
|
|
3
|
+
import json
|
|
4
|
+
import httpx
|
|
5
|
+
from typing import Optional, Any
|
|
6
|
+
|
|
7
|
+
try:
|
|
8
|
+
from anthropic import AsyncAnthropic
|
|
9
|
+
except Exception: # pragma: no cover
|
|
10
|
+
AsyncAnthropic = None # type: ignore
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger("anthropic.client")
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _redact_headers(headers: httpx.Headers) -> dict:
|
|
16
|
+
h = dict(headers)
|
|
17
|
+
if "x-api-key" in {k.lower() for k in h.keys()}:
|
|
18
|
+
for k in list(h.keys()):
|
|
19
|
+
if k.lower() == "x-api-key":
|
|
20
|
+
h[k] = "***REDACTED***"
|
|
21
|
+
if "authorization" in {k.lower() for k in h.keys()}:
|
|
22
|
+
for k in list(h.keys()):
|
|
23
|
+
if k.lower() == "authorization":
|
|
24
|
+
h[k] = "***REDACTED***"
|
|
25
|
+
return h
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _truncate_bytes(b: bytes, limit: int = 1024 * 100) -> str:
|
|
29
|
+
s = b.decode("utf-8", errors="replace")
|
|
30
|
+
return (
|
|
31
|
+
s
|
|
32
|
+
if len(s) <= limit
|
|
33
|
+
else (s[:limit] + f"\n... (truncated, {len(s)} chars total)")
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
async def log_request(request: httpx.Request):
|
|
38
|
+
logging.info("==> %s %s", request.method, request.url)
|
|
39
|
+
logging.info("headers=%s", json.dumps(_redact_headers(request.headers), indent=2))
|
|
40
|
+
if request.content:
|
|
41
|
+
logging.info("body=%s", _truncate_bytes(request.content))
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
async def log_response(response: httpx.Response):
|
|
45
|
+
body = await response.aread()
|
|
46
|
+
logging.info("<== %s %s", response.status_code, response.request.url)
|
|
47
|
+
logging.info("headers=%s", json.dumps(_redact_headers(response.headers), indent=2))
|
|
48
|
+
if body:
|
|
49
|
+
logging.info("body=%s", _truncate_bytes(body))
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def get_logging_httpx_client() -> httpx.AsyncClient:
|
|
53
|
+
return httpx.AsyncClient(
|
|
54
|
+
event_hooks={"request": [log_request], "response": [log_response]},
|
|
55
|
+
timeout=60.0,
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def get_client(
|
|
60
|
+
*, room: RoomClient, http_client: Optional[httpx.AsyncClient] = None
|
|
61
|
+
) -> Any:
|
|
62
|
+
if AsyncAnthropic is None: # pragma: no cover
|
|
63
|
+
raise RuntimeError(
|
|
64
|
+
"anthropic is not installed. Install `meshagent-anthropic` extras/deps."
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
token: str = room.protocol.token
|
|
68
|
+
|
|
69
|
+
url = getattr(room.protocol, "url")
|
|
70
|
+
if url is None:
|
|
71
|
+
logger.debug(
|
|
72
|
+
"protocol does not have url, anthropic client falling back to room url %s",
|
|
73
|
+
room.room_url,
|
|
74
|
+
)
|
|
75
|
+
url = room.room_url
|
|
76
|
+
else:
|
|
77
|
+
logger.debug("protocol had url, anthropic client will use %s", url)
|
|
78
|
+
|
|
79
|
+
room_proxy_url = f"{url}/anthropic"
|
|
80
|
+
|
|
81
|
+
if room_proxy_url.startswith("ws:") or room_proxy_url.startswith("wss:"):
|
|
82
|
+
room_proxy_url = room_proxy_url.replace("ws", "http", 1)
|
|
83
|
+
|
|
84
|
+
# The MeshAgent room proxy validates `x-api-key` and `Meshagent-Session`.
|
|
85
|
+
return AsyncAnthropic(
|
|
86
|
+
api_key=token,
|
|
87
|
+
base_url=room_proxy_url,
|
|
88
|
+
http_client=http_client,
|
|
89
|
+
default_headers={"Meshagent-Session": room.session_id},
|
|
90
|
+
)
|
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
|
|
4
|
+
import pytest
|
|
5
|
+
|
|
6
|
+
from meshagent.anthropic.messages_adapter import AnthropicMessagesAdapter
|
|
7
|
+
from meshagent.anthropic.mcp import MCPConfig, MCPServer, MCPTool
|
|
8
|
+
from meshagent.agents.agent import AgentChatContext
|
|
9
|
+
from meshagent.tools import Toolkit
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _import_real_anthropic_sdk():
|
|
13
|
+
"""Import the external `anthropic` SDK without shadowing.
|
|
14
|
+
|
|
15
|
+
If `pytest` is run from inside `.../meshagent/`, Python may resolve
|
|
16
|
+
`import anthropic` to the local `meshagent/anthropic` package directory.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
cwd = os.getcwd()
|
|
20
|
+
|
|
21
|
+
if os.path.isdir(os.path.join(cwd, "anthropic")):
|
|
22
|
+
sys.path = [p for p in sys.path if p not in ("", cwd)]
|
|
23
|
+
|
|
24
|
+
import importlib
|
|
25
|
+
|
|
26
|
+
mod = importlib.import_module("anthropic")
|
|
27
|
+
|
|
28
|
+
mod_file = getattr(mod, "__file__", "") or ""
|
|
29
|
+
if mod_file.endswith("/meshagent/anthropic/__init__.py"):
|
|
30
|
+
raise RuntimeError(
|
|
31
|
+
"Imported local `meshagent/anthropic` instead of the Anthropic SDK. "
|
|
32
|
+
"Run pytest from the repo root."
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
return mod
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
a = _import_real_anthropic_sdk()
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class _DummyRoom:
|
|
42
|
+
# Adapter won't touch room when no tools.
|
|
43
|
+
pass
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
@pytest.mark.asyncio
|
|
47
|
+
async def test_live_anthropic_adapter_messages_create_if_key_set():
|
|
48
|
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
|
49
|
+
if not api_key:
|
|
50
|
+
pytest.skip("ANTHROPIC_API_KEY not set")
|
|
51
|
+
|
|
52
|
+
model = os.getenv("ANTHROPIC_TEST_MODEL", "claude-sonnet-4-5")
|
|
53
|
+
|
|
54
|
+
client = a.AsyncAnthropic(api_key=api_key)
|
|
55
|
+
adapter = AnthropicMessagesAdapter(model=model, client=client, max_tokens=64)
|
|
56
|
+
|
|
57
|
+
ctx = AgentChatContext(system_role=None)
|
|
58
|
+
ctx.append_user_message("Say hello in one word.")
|
|
59
|
+
|
|
60
|
+
text = await adapter.next(context=ctx, room=_DummyRoom(), toolkits=[])
|
|
61
|
+
|
|
62
|
+
assert isinstance(text, str)
|
|
63
|
+
assert len(text.strip()) > 0
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
@pytest.mark.asyncio
|
|
67
|
+
async def test_live_anthropic_adapter_streaming_if_key_set():
|
|
68
|
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
|
69
|
+
if not api_key:
|
|
70
|
+
pytest.skip("ANTHROPIC_API_KEY not set")
|
|
71
|
+
|
|
72
|
+
model = os.getenv("ANTHROPIC_TEST_MODEL", "claude-sonnet-4-5")
|
|
73
|
+
|
|
74
|
+
client = a.AsyncAnthropic(api_key=api_key)
|
|
75
|
+
adapter = AnthropicMessagesAdapter(model=model, client=client, max_tokens=64)
|
|
76
|
+
|
|
77
|
+
ctx = AgentChatContext(system_role=None)
|
|
78
|
+
ctx.append_user_message("Count from 1 to 3.")
|
|
79
|
+
|
|
80
|
+
seen_types: list[str] = []
|
|
81
|
+
|
|
82
|
+
def handler(event: dict):
|
|
83
|
+
if isinstance(event, dict) and "type" in event:
|
|
84
|
+
seen_types.append(event["type"])
|
|
85
|
+
|
|
86
|
+
text = await adapter.next(
|
|
87
|
+
context=ctx,
|
|
88
|
+
room=_DummyRoom(),
|
|
89
|
+
toolkits=[],
|
|
90
|
+
event_handler=handler,
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
assert isinstance(text, str)
|
|
94
|
+
assert len(text.strip()) > 0
|
|
95
|
+
# These are best-effort; event types depend on Anthropic SDK.
|
|
96
|
+
assert len(seen_types) > 0
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
@pytest.mark.asyncio
|
|
100
|
+
async def test_live_anthropic_mcp_deepwiki_if_key_set():
|
|
101
|
+
api_key = os.getenv("ANTHROPIC_API_KEY")
|
|
102
|
+
if not api_key:
|
|
103
|
+
pytest.skip("ANTHROPIC_API_KEY not set")
|
|
104
|
+
|
|
105
|
+
model = os.getenv("ANTHROPIC_TEST_MODEL", "claude-sonnet-4-5")
|
|
106
|
+
|
|
107
|
+
client = a.AsyncAnthropic(api_key=api_key)
|
|
108
|
+
adapter = AnthropicMessagesAdapter(model=model, client=client, max_tokens=256)
|
|
109
|
+
|
|
110
|
+
ctx = AgentChatContext(system_role=None)
|
|
111
|
+
ctx.append_user_message(
|
|
112
|
+
"Use the DeepWiki MCP toolset and make at least one tool call. "
|
|
113
|
+
"Then reply with a one-sentence summary of what you learned."
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
mcp_toolkit = Toolkit(
|
|
117
|
+
name="mcp",
|
|
118
|
+
tools=[
|
|
119
|
+
MCPTool(
|
|
120
|
+
config=MCPConfig(
|
|
121
|
+
mcp_servers=[
|
|
122
|
+
MCPServer(url="https://mcp.deepwiki.com/mcp", name="deepwiki")
|
|
123
|
+
]
|
|
124
|
+
)
|
|
125
|
+
)
|
|
126
|
+
],
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
seen_mcp_blocks = False
|
|
130
|
+
|
|
131
|
+
def handler(event: dict):
|
|
132
|
+
nonlocal seen_mcp_blocks
|
|
133
|
+
if not isinstance(event, dict):
|
|
134
|
+
return
|
|
135
|
+
|
|
136
|
+
# Adapter forwards Anthropic SDK stream events:
|
|
137
|
+
# {"type": "content_block_start", "event": {...}}
|
|
138
|
+
if event.get("type") == "content_block_start":
|
|
139
|
+
payload = event.get("event") or {}
|
|
140
|
+
content_block = payload.get("content_block") or {}
|
|
141
|
+
if content_block.get("type") in {"mcp_tool_use", "mcp_tool_result"}:
|
|
142
|
+
seen_mcp_blocks = True
|
|
143
|
+
|
|
144
|
+
text = await adapter.next(
|
|
145
|
+
context=ctx,
|
|
146
|
+
room=_DummyRoom(),
|
|
147
|
+
toolkits=[mcp_toolkit],
|
|
148
|
+
event_handler=handler,
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
assert isinstance(text, str)
|
|
152
|
+
assert len(text.strip()) > 0
|
|
153
|
+
|
|
154
|
+
# This asserts the connector actually engaged (best-effort, but should be stable
|
|
155
|
+
# for DeepWiki).
|
|
156
|
+
assert seen_mcp_blocks
|