fast-agent-mcp 0.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- fast_agent_mcp-0.0.7.dist-info/METADATA +322 -0
- fast_agent_mcp-0.0.7.dist-info/RECORD +100 -0
- fast_agent_mcp-0.0.7.dist-info/WHEEL +4 -0
- fast_agent_mcp-0.0.7.dist-info/entry_points.txt +5 -0
- fast_agent_mcp-0.0.7.dist-info/licenses/LICENSE +201 -0
- mcp_agent/__init__.py +0 -0
- mcp_agent/agents/__init__.py +0 -0
- mcp_agent/agents/agent.py +277 -0
- mcp_agent/app.py +303 -0
- mcp_agent/cli/__init__.py +0 -0
- mcp_agent/cli/__main__.py +4 -0
- mcp_agent/cli/commands/bootstrap.py +221 -0
- mcp_agent/cli/commands/config.py +11 -0
- mcp_agent/cli/commands/setup.py +229 -0
- mcp_agent/cli/main.py +68 -0
- mcp_agent/cli/terminal.py +24 -0
- mcp_agent/config.py +334 -0
- mcp_agent/console.py +28 -0
- mcp_agent/context.py +251 -0
- mcp_agent/context_dependent.py +48 -0
- mcp_agent/core/fastagent.py +1013 -0
- mcp_agent/eval/__init__.py +0 -0
- mcp_agent/event_progress.py +88 -0
- mcp_agent/executor/__init__.py +0 -0
- mcp_agent/executor/decorator_registry.py +120 -0
- mcp_agent/executor/executor.py +293 -0
- mcp_agent/executor/task_registry.py +34 -0
- mcp_agent/executor/temporal.py +405 -0
- mcp_agent/executor/workflow.py +197 -0
- mcp_agent/executor/workflow_signal.py +325 -0
- mcp_agent/human_input/__init__.py +0 -0
- mcp_agent/human_input/handler.py +49 -0
- mcp_agent/human_input/types.py +58 -0
- mcp_agent/logging/__init__.py +0 -0
- mcp_agent/logging/events.py +123 -0
- mcp_agent/logging/json_serializer.py +163 -0
- mcp_agent/logging/listeners.py +216 -0
- mcp_agent/logging/logger.py +365 -0
- mcp_agent/logging/rich_progress.py +120 -0
- mcp_agent/logging/tracing.py +140 -0
- mcp_agent/logging/transport.py +461 -0
- mcp_agent/mcp/__init__.py +0 -0
- mcp_agent/mcp/gen_client.py +85 -0
- mcp_agent/mcp/mcp_activity.py +18 -0
- mcp_agent/mcp/mcp_agent_client_session.py +242 -0
- mcp_agent/mcp/mcp_agent_server.py +56 -0
- mcp_agent/mcp/mcp_aggregator.py +394 -0
- mcp_agent/mcp/mcp_connection_manager.py +330 -0
- mcp_agent/mcp/stdio.py +104 -0
- mcp_agent/mcp_server_registry.py +275 -0
- mcp_agent/progress_display.py +10 -0
- mcp_agent/resources/examples/decorator/main.py +26 -0
- mcp_agent/resources/examples/decorator/optimizer.py +78 -0
- mcp_agent/resources/examples/decorator/orchestrator.py +68 -0
- mcp_agent/resources/examples/decorator/parallel.py +81 -0
- mcp_agent/resources/examples/decorator/router.py +56 -0
- mcp_agent/resources/examples/decorator/tiny.py +22 -0
- mcp_agent/resources/examples/mcp_researcher/main-evalopt.py +53 -0
- mcp_agent/resources/examples/mcp_researcher/main.py +38 -0
- mcp_agent/telemetry/__init__.py +0 -0
- mcp_agent/telemetry/usage_tracking.py +18 -0
- mcp_agent/workflows/__init__.py +0 -0
- mcp_agent/workflows/embedding/__init__.py +0 -0
- mcp_agent/workflows/embedding/embedding_base.py +61 -0
- mcp_agent/workflows/embedding/embedding_cohere.py +49 -0
- mcp_agent/workflows/embedding/embedding_openai.py +46 -0
- mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +359 -0
- mcp_agent/workflows/intent_classifier/__init__.py +0 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_base.py +120 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +134 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +45 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +45 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +161 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +60 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +60 -0
- mcp_agent/workflows/llm/__init__.py +0 -0
- mcp_agent/workflows/llm/augmented_llm.py +645 -0
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +539 -0
- mcp_agent/workflows/llm/augmented_llm_openai.py +615 -0
- mcp_agent/workflows/llm/llm_selector.py +345 -0
- mcp_agent/workflows/llm/model_factory.py +175 -0
- mcp_agent/workflows/orchestrator/__init__.py +0 -0
- mcp_agent/workflows/orchestrator/orchestrator.py +407 -0
- mcp_agent/workflows/orchestrator/orchestrator_models.py +154 -0
- mcp_agent/workflows/orchestrator/orchestrator_prompts.py +113 -0
- mcp_agent/workflows/parallel/__init__.py +0 -0
- mcp_agent/workflows/parallel/fan_in.py +350 -0
- mcp_agent/workflows/parallel/fan_out.py +187 -0
- mcp_agent/workflows/parallel/parallel_llm.py +141 -0
- mcp_agent/workflows/router/__init__.py +0 -0
- mcp_agent/workflows/router/router_base.py +276 -0
- mcp_agent/workflows/router/router_embedding.py +240 -0
- mcp_agent/workflows/router/router_embedding_cohere.py +59 -0
- mcp_agent/workflows/router/router_embedding_openai.py +59 -0
- mcp_agent/workflows/router/router_llm.py +301 -0
- mcp_agent/workflows/swarm/__init__.py +0 -0
- mcp_agent/workflows/swarm/swarm.py +320 -0
- mcp_agent/workflows/swarm/swarm_anthropic.py +42 -0
- mcp_agent/workflows/swarm/swarm_openai.py +41 -0
|
@@ -0,0 +1,539 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Iterable, List, Type
|
|
3
|
+
|
|
4
|
+
from pydantic import BaseModel
|
|
5
|
+
|
|
6
|
+
import instructor
|
|
7
|
+
from anthropic import Anthropic
|
|
8
|
+
from anthropic.types import (
|
|
9
|
+
ContentBlock,
|
|
10
|
+
DocumentBlockParam,
|
|
11
|
+
Message,
|
|
12
|
+
MessageParam,
|
|
13
|
+
ImageBlockParam,
|
|
14
|
+
TextBlock,
|
|
15
|
+
TextBlockParam,
|
|
16
|
+
ToolParam,
|
|
17
|
+
ToolResultBlockParam,
|
|
18
|
+
ToolUseBlockParam,
|
|
19
|
+
)
|
|
20
|
+
from mcp.types import (
|
|
21
|
+
CallToolRequestParams,
|
|
22
|
+
CallToolRequest,
|
|
23
|
+
EmbeddedResource,
|
|
24
|
+
ImageContent,
|
|
25
|
+
StopReason,
|
|
26
|
+
TextContent,
|
|
27
|
+
TextResourceContents,
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
from mcp_agent.workflows.router.router_llm import StructuredResponse
|
|
31
|
+
from mcp_agent.workflows.llm.augmented_llm import (
|
|
32
|
+
AugmentedLLM,
|
|
33
|
+
ModelT,
|
|
34
|
+
MCPMessageParam,
|
|
35
|
+
MCPMessageResult,
|
|
36
|
+
ProviderToMCPConverter,
|
|
37
|
+
RequestParams,
|
|
38
|
+
)
|
|
39
|
+
from mcp_agent.logging.logger import get_logger
|
|
40
|
+
|
|
41
|
+
DEFAULT_ANTHROPIC_MODEL = "claude-3-5-sonnet-latest"
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
45
|
+
"""
|
|
46
|
+
The basic building block of agentic systems is an LLM enhanced with augmentations
|
|
47
|
+
such as retrieval, tools, and memory provided from a collection of MCP servers.
|
|
48
|
+
Our current models can actively use these capabilities—generating their own search queries,
|
|
49
|
+
selecting appropriate tools, and determining what information to retain.
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
def __init__(self, *args, **kwargs):
|
|
53
|
+
super().__init__(*args, type_converter=AnthropicMCPTypeConverter, **kwargs)
|
|
54
|
+
|
|
55
|
+
self.provider = "Anthropic"
|
|
56
|
+
# Initialize logger with name if available
|
|
57
|
+
self.logger = get_logger(f"{__name__}.{self.name}" if self.name else __name__)
|
|
58
|
+
|
|
59
|
+
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
|
60
|
+
"""Initialize Anthropic-specific default parameters"""
|
|
61
|
+
return RequestParams(
|
|
62
|
+
model=kwargs.get("model", DEFAULT_ANTHROPIC_MODEL),
|
|
63
|
+
modelPreferences=self.model_preferences,
|
|
64
|
+
maxTokens=4096, # default haiku3
|
|
65
|
+
systemPrompt=self.instruction,
|
|
66
|
+
parallel_tool_calls=True,
|
|
67
|
+
max_iterations=10,
|
|
68
|
+
use_history=True,
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
async def generate(
|
|
72
|
+
self,
|
|
73
|
+
message,
|
|
74
|
+
request_params: RequestParams | None = None,
|
|
75
|
+
):
|
|
76
|
+
"""
|
|
77
|
+
Process a query using an LLM and available tools.
|
|
78
|
+
Override this method to use a different LLM.
|
|
79
|
+
"""
|
|
80
|
+
config = self.context.config
|
|
81
|
+
anthropic = Anthropic(api_key=config.anthropic.api_key)
|
|
82
|
+
messages: List[MessageParam] = []
|
|
83
|
+
params = self.get_request_params(request_params)
|
|
84
|
+
|
|
85
|
+
if params.use_history:
|
|
86
|
+
messages.extend(self.history.get())
|
|
87
|
+
|
|
88
|
+
if isinstance(message, str):
|
|
89
|
+
messages.append({"role": "user", "content": message})
|
|
90
|
+
elif isinstance(message, list):
|
|
91
|
+
messages.extend(message)
|
|
92
|
+
else:
|
|
93
|
+
messages.append(message)
|
|
94
|
+
|
|
95
|
+
response = await self.aggregator.list_tools()
|
|
96
|
+
available_tools: List[ToolParam] = [
|
|
97
|
+
{
|
|
98
|
+
"name": tool.name,
|
|
99
|
+
"description": tool.description,
|
|
100
|
+
"input_schema": tool.inputSchema,
|
|
101
|
+
}
|
|
102
|
+
for tool in response.tools
|
|
103
|
+
]
|
|
104
|
+
|
|
105
|
+
responses: List[Message] = []
|
|
106
|
+
model = await self.select_model(params)
|
|
107
|
+
chat_turn = (len(messages) + 1) // 2
|
|
108
|
+
self._log_chat_progress(chat_turn, model=model)
|
|
109
|
+
self.show_user_message(str(message), model, chat_turn)
|
|
110
|
+
|
|
111
|
+
for i in range(params.max_iterations):
|
|
112
|
+
arguments = {
|
|
113
|
+
"model": model,
|
|
114
|
+
"messages": messages,
|
|
115
|
+
"system": self.instruction or params.systemPrompt,
|
|
116
|
+
"stop_sequences": params.stopSequences,
|
|
117
|
+
"tools": available_tools,
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
if params.maxTokens is not None:
|
|
121
|
+
arguments["max_tokens"] = params.maxTokens
|
|
122
|
+
|
|
123
|
+
if params.metadata:
|
|
124
|
+
arguments = {**arguments, **params.metadata}
|
|
125
|
+
|
|
126
|
+
self.logger.debug(f"{arguments}")
|
|
127
|
+
|
|
128
|
+
executor_result = await self.executor.execute(
|
|
129
|
+
anthropic.messages.create, **arguments
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
response = executor_result[0]
|
|
133
|
+
|
|
134
|
+
if isinstance(response, BaseException):
|
|
135
|
+
self.logger.error(f"Error: {executor_result}")
|
|
136
|
+
# Don't break, instead create an error response
|
|
137
|
+
error_message = f"Error during generation: {str(response)}"
|
|
138
|
+
response = Message(
|
|
139
|
+
role="assistant",
|
|
140
|
+
type="message",
|
|
141
|
+
content=[TextBlock(type="text", text=error_message)],
|
|
142
|
+
stop_reason="error",
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
self.logger.debug(
|
|
146
|
+
f"{model} response:",
|
|
147
|
+
data=response,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
response_as_message = self.convert_message_to_message_param(response)
|
|
151
|
+
messages.append(response_as_message)
|
|
152
|
+
responses.append(response)
|
|
153
|
+
|
|
154
|
+
if response.stop_reason == "end_turn":
|
|
155
|
+
message_text = ""
|
|
156
|
+
for block in response_as_message["content"]:
|
|
157
|
+
if isinstance(block, dict) and block.get("type") == "text":
|
|
158
|
+
message_text += block.get("text", "")
|
|
159
|
+
elif hasattr(block, "type") and block.type == "text":
|
|
160
|
+
message_text += block.text
|
|
161
|
+
|
|
162
|
+
await self.show_assistant_message(message_text)
|
|
163
|
+
|
|
164
|
+
self.logger.debug(
|
|
165
|
+
f"Iteration {i}: Stopping because finish_reason is 'end_turn'"
|
|
166
|
+
)
|
|
167
|
+
break
|
|
168
|
+
elif response.stop_reason == "stop_sequence":
|
|
169
|
+
# We have reached a stop sequence
|
|
170
|
+
self.logger.debug(
|
|
171
|
+
f"Iteration {i}: Stopping because finish_reason is 'stop_sequence'"
|
|
172
|
+
)
|
|
173
|
+
break
|
|
174
|
+
elif response.stop_reason == "max_tokens":
|
|
175
|
+
# We have reached the max tokens limit
|
|
176
|
+
self.logger.debug(
|
|
177
|
+
f"Iteration {i}: Stopping because finish_reason is 'max_tokens'"
|
|
178
|
+
)
|
|
179
|
+
# TODO: saqadri - would be useful to return the reason for stopping to the caller
|
|
180
|
+
break
|
|
181
|
+
else:
|
|
182
|
+
message_text = ""
|
|
183
|
+
for block in response_as_message["content"]:
|
|
184
|
+
if isinstance(block, dict) and block.get("type") == "text":
|
|
185
|
+
message_text += block.get("text", "")
|
|
186
|
+
elif hasattr(block, "type") and block.type == "text":
|
|
187
|
+
message_text += block.text
|
|
188
|
+
|
|
189
|
+
# response.stop_reason == "tool_use":
|
|
190
|
+
for content in response.content:
|
|
191
|
+
if content.type == "tool_use":
|
|
192
|
+
tool_name = content.name
|
|
193
|
+
tool_args = content.input
|
|
194
|
+
tool_use_id = content.id
|
|
195
|
+
|
|
196
|
+
await self.show_assistant_message(message_text, tool_name)
|
|
197
|
+
|
|
198
|
+
self.show_tool_call(available_tools, tool_name, tool_args)
|
|
199
|
+
tool_call_request = CallToolRequest(
|
|
200
|
+
method="tools/call",
|
|
201
|
+
params=CallToolRequestParams(
|
|
202
|
+
name=tool_name, arguments=tool_args
|
|
203
|
+
),
|
|
204
|
+
)
|
|
205
|
+
# TODO -- support MCP isError etc.
|
|
206
|
+
result = await self.call_tool(
|
|
207
|
+
request=tool_call_request, tool_call_id=tool_use_id
|
|
208
|
+
)
|
|
209
|
+
self.show_tool_result(result)
|
|
210
|
+
messages.append(
|
|
211
|
+
MessageParam(
|
|
212
|
+
role="user",
|
|
213
|
+
content=[
|
|
214
|
+
ToolResultBlockParam(
|
|
215
|
+
type="tool_result",
|
|
216
|
+
tool_use_id=tool_use_id,
|
|
217
|
+
content=result.content,
|
|
218
|
+
is_error=result.isError,
|
|
219
|
+
)
|
|
220
|
+
],
|
|
221
|
+
)
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
if params.use_history:
|
|
225
|
+
self.history.set(messages)
|
|
226
|
+
|
|
227
|
+
self._log_chat_finished(model=model)
|
|
228
|
+
|
|
229
|
+
return responses
|
|
230
|
+
|
|
231
|
+
async def generate_str(
|
|
232
|
+
self,
|
|
233
|
+
message,
|
|
234
|
+
request_params: RequestParams | None = None,
|
|
235
|
+
) -> str:
|
|
236
|
+
"""
|
|
237
|
+
Process a query using an LLM and available tools.
|
|
238
|
+
The default implementation uses Claude as the LLM.
|
|
239
|
+
Override this method to use a different LLM.
|
|
240
|
+
"""
|
|
241
|
+
responses: List[Message] = await self.generate(
|
|
242
|
+
message=message,
|
|
243
|
+
request_params=request_params,
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
final_text: List[str] = []
|
|
247
|
+
|
|
248
|
+
for response in responses:
|
|
249
|
+
for content in response.content:
|
|
250
|
+
if content.type == "text":
|
|
251
|
+
final_text.append(content.text)
|
|
252
|
+
elif content.type == "tool_use":
|
|
253
|
+
final_text.append(
|
|
254
|
+
f"[Calling tool {content.name} with args {content.input}]"
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
return "\n".join(final_text)
|
|
258
|
+
|
|
259
|
+
async def generate_structured(
|
|
260
|
+
self,
|
|
261
|
+
message,
|
|
262
|
+
response_model: Type[ModelT],
|
|
263
|
+
request_params: RequestParams | None = None,
|
|
264
|
+
) -> ModelT:
|
|
265
|
+
# First we invoke the LLM to generate a string response
|
|
266
|
+
# We need to do this in a two-step process because Instructor doesn't
|
|
267
|
+
# know how to invoke MCP tools via call_tool, so we'll handle all the
|
|
268
|
+
# processing first and then pass the final response through Instructor
|
|
269
|
+
response = await self.generate_str(
|
|
270
|
+
message=message,
|
|
271
|
+
request_params=request_params,
|
|
272
|
+
)
|
|
273
|
+
# Don't try to parse if we got no response
|
|
274
|
+
if not response:
|
|
275
|
+
self.logger.error("No response from generate_str")
|
|
276
|
+
return StructuredResponse(categories=[])
|
|
277
|
+
|
|
278
|
+
# Next we pass the text through instructor to extract structured data
|
|
279
|
+
client = instructor.from_anthropic(
|
|
280
|
+
Anthropic(api_key=self.context.config.anthropic.api_key),
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
params = self.get_request_params(request_params)
|
|
284
|
+
model = await self.select_model(params)
|
|
285
|
+
|
|
286
|
+
# Extract structured data from natural language
|
|
287
|
+
structured_response = client.chat.completions.create(
|
|
288
|
+
model=model,
|
|
289
|
+
response_model=response_model,
|
|
290
|
+
messages=[{"role": "user", "content": response}],
|
|
291
|
+
max_tokens=params.maxTokens,
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
return structured_response
|
|
295
|
+
|
|
296
|
+
@classmethod
|
|
297
|
+
def convert_message_to_message_param(
|
|
298
|
+
cls, message: Message, **kwargs
|
|
299
|
+
) -> MessageParam:
|
|
300
|
+
"""Convert a response object to an input parameter object to allow LLM calls to be chained."""
|
|
301
|
+
content = []
|
|
302
|
+
|
|
303
|
+
for content_block in message.content:
|
|
304
|
+
if content_block.type == "text":
|
|
305
|
+
content.append(TextBlockParam(type="text", text=content_block.text))
|
|
306
|
+
elif content_block.type == "tool_use":
|
|
307
|
+
content.append(
|
|
308
|
+
ToolUseBlockParam(
|
|
309
|
+
type="tool_use",
|
|
310
|
+
name=content_block.name,
|
|
311
|
+
input=content_block.input,
|
|
312
|
+
id=content_block.id,
|
|
313
|
+
)
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
return MessageParam(role="assistant", content=content, **kwargs)
|
|
317
|
+
|
|
318
|
+
def message_param_str(self, message: MessageParam) -> str:
|
|
319
|
+
"""Convert an input message to a string representation."""
|
|
320
|
+
|
|
321
|
+
if message.get("content"):
|
|
322
|
+
content = message["content"]
|
|
323
|
+
if isinstance(content, str):
|
|
324
|
+
return content
|
|
325
|
+
else:
|
|
326
|
+
final_text: List[str] = []
|
|
327
|
+
for block in content:
|
|
328
|
+
if block.text:
|
|
329
|
+
final_text.append(str(block.text))
|
|
330
|
+
else:
|
|
331
|
+
final_text.append(str(block))
|
|
332
|
+
|
|
333
|
+
return "\n".join(final_text)
|
|
334
|
+
|
|
335
|
+
return str(message)
|
|
336
|
+
|
|
337
|
+
def message_str(self, message: Message) -> str:
|
|
338
|
+
"""Convert an output message to a string representation."""
|
|
339
|
+
content = message.content
|
|
340
|
+
|
|
341
|
+
if content:
|
|
342
|
+
if isinstance(content, list):
|
|
343
|
+
final_text: List[str] = []
|
|
344
|
+
for block in content:
|
|
345
|
+
if block.text:
|
|
346
|
+
final_text.append(str(block.text))
|
|
347
|
+
else:
|
|
348
|
+
final_text.append(str(block))
|
|
349
|
+
|
|
350
|
+
return "\n".join(final_text)
|
|
351
|
+
else:
|
|
352
|
+
return str(content)
|
|
353
|
+
|
|
354
|
+
return str(message)
|
|
355
|
+
|
|
356
|
+
|
|
357
|
+
class AnthropicMCPTypeConverter(ProviderToMCPConverter[MessageParam, Message]):
|
|
358
|
+
"""
|
|
359
|
+
Convert between Anthropic and MCP types.
|
|
360
|
+
"""
|
|
361
|
+
|
|
362
|
+
@classmethod
|
|
363
|
+
def from_mcp_message_result(cls, result: MCPMessageResult) -> Message:
|
|
364
|
+
# MCPMessageResult -> Message
|
|
365
|
+
if result.role != "assistant":
|
|
366
|
+
raise ValueError(
|
|
367
|
+
f"Expected role to be 'assistant' but got '{result.role}' instead."
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
return Message(
|
|
371
|
+
role="assistant",
|
|
372
|
+
type="message",
|
|
373
|
+
content=[mcp_content_to_anthropic_content(result.content)],
|
|
374
|
+
model=result.model,
|
|
375
|
+
stop_reason=mcp_stop_reason_to_anthropic_stop_reason(result.stopReason),
|
|
376
|
+
id=result.id or None,
|
|
377
|
+
usage=result.usage or None,
|
|
378
|
+
# TODO: should we push extras?
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
@classmethod
|
|
382
|
+
def to_mcp_message_result(cls, result: Message) -> MCPMessageResult:
|
|
383
|
+
# Message -> MCPMessageResult
|
|
384
|
+
|
|
385
|
+
contents = anthropic_content_to_mcp_content(result.content)
|
|
386
|
+
if len(contents) > 1:
|
|
387
|
+
raise NotImplementedError(
|
|
388
|
+
"Multiple content elements in a single message are not supported in MCP yet"
|
|
389
|
+
)
|
|
390
|
+
mcp_content = contents[0]
|
|
391
|
+
|
|
392
|
+
return MCPMessageResult(
|
|
393
|
+
role=result.role,
|
|
394
|
+
content=mcp_content,
|
|
395
|
+
model=result.model,
|
|
396
|
+
stopReason=anthropic_stop_reason_to_mcp_stop_reason(result.stop_reason),
|
|
397
|
+
# extras for Message fields
|
|
398
|
+
**result.model_dump(exclude={"role", "content", "model", "stop_reason"}),
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
@classmethod
|
|
402
|
+
def from_mcp_message_param(cls, param: MCPMessageParam) -> MessageParam:
|
|
403
|
+
# MCPMessageParam -> MessageParam
|
|
404
|
+
extras = param.model_dump(exclude={"role", "content"})
|
|
405
|
+
return MessageParam(
|
|
406
|
+
role=param.role,
|
|
407
|
+
content=[mcp_content_to_anthropic_content(param.content)],
|
|
408
|
+
**extras,
|
|
409
|
+
)
|
|
410
|
+
|
|
411
|
+
@classmethod
|
|
412
|
+
def to_mcp_message_param(cls, param: MessageParam) -> MCPMessageParam:
|
|
413
|
+
# Implement the conversion from ChatCompletionMessage to MCP message param
|
|
414
|
+
|
|
415
|
+
contents = anthropic_content_to_mcp_content(param.content)
|
|
416
|
+
|
|
417
|
+
# TODO: saqadri - the mcp_content can have multiple elements
|
|
418
|
+
# while sampling message content has a single content element
|
|
419
|
+
# Right now we error out if there are > 1 elements in mcp_content
|
|
420
|
+
# We need to handle this case properly going forward
|
|
421
|
+
if len(contents) > 1:
|
|
422
|
+
raise NotImplementedError(
|
|
423
|
+
"Multiple content elements in a single message are not supported"
|
|
424
|
+
)
|
|
425
|
+
mcp_content = contents[0]
|
|
426
|
+
|
|
427
|
+
return MCPMessageParam(
|
|
428
|
+
role=param.role,
|
|
429
|
+
content=mcp_content,
|
|
430
|
+
**typed_dict_extras(param, ["role", "content"]),
|
|
431
|
+
)
|
|
432
|
+
|
|
433
|
+
|
|
434
|
+
def mcp_content_to_anthropic_content(
|
|
435
|
+
content: TextContent | ImageContent | EmbeddedResource,
|
|
436
|
+
) -> ContentBlock:
|
|
437
|
+
if isinstance(content, TextContent):
|
|
438
|
+
return TextBlock(type=content.type, text=content.text)
|
|
439
|
+
elif isinstance(content, ImageContent):
|
|
440
|
+
# Best effort to convert an image to text (since there's no ImageBlock)
|
|
441
|
+
return TextBlock(type="text", text=f"{content.mimeType}:{content.data}")
|
|
442
|
+
elif isinstance(content, EmbeddedResource):
|
|
443
|
+
if isinstance(content.resource, TextResourceContents):
|
|
444
|
+
return TextBlock(type="text", text=content.resource.text)
|
|
445
|
+
else: # BlobResourceContents
|
|
446
|
+
return TextBlock(
|
|
447
|
+
type="text", text=f"{content.resource.mimeType}:{content.resource.blob}"
|
|
448
|
+
)
|
|
449
|
+
else:
|
|
450
|
+
# Last effort to convert the content to a string
|
|
451
|
+
return TextBlock(type="text", text=str(content))
|
|
452
|
+
|
|
453
|
+
|
|
454
|
+
def anthropic_content_to_mcp_content(
|
|
455
|
+
content: str
|
|
456
|
+
| Iterable[
|
|
457
|
+
TextBlockParam
|
|
458
|
+
| ImageBlockParam
|
|
459
|
+
| ToolUseBlockParam
|
|
460
|
+
| ToolResultBlockParam
|
|
461
|
+
| DocumentBlockParam
|
|
462
|
+
| ContentBlock
|
|
463
|
+
],
|
|
464
|
+
) -> List[TextContent | ImageContent | EmbeddedResource]:
|
|
465
|
+
mcp_content = []
|
|
466
|
+
|
|
467
|
+
if isinstance(content, str):
|
|
468
|
+
mcp_content.append(TextContent(type="text", text=content))
|
|
469
|
+
else:
|
|
470
|
+
for block in content:
|
|
471
|
+
if block.type == "text":
|
|
472
|
+
mcp_content.append(TextContent(type="text", text=block.text))
|
|
473
|
+
elif block.type == "image":
|
|
474
|
+
raise NotImplementedError("Image content conversion not implemented")
|
|
475
|
+
elif block.type == "tool_use":
|
|
476
|
+
# Best effort to convert a tool use to text (since there's no ToolUseContent)
|
|
477
|
+
mcp_content.append(
|
|
478
|
+
TextContent(
|
|
479
|
+
type="text",
|
|
480
|
+
text=to_string(block),
|
|
481
|
+
)
|
|
482
|
+
)
|
|
483
|
+
elif block.type == "tool_result":
|
|
484
|
+
# Best effort to convert a tool result to text (since there's no ToolResultContent)
|
|
485
|
+
mcp_content.append(
|
|
486
|
+
TextContent(
|
|
487
|
+
type="text",
|
|
488
|
+
text=to_string(block),
|
|
489
|
+
)
|
|
490
|
+
)
|
|
491
|
+
elif block.type == "document":
|
|
492
|
+
raise NotImplementedError("Document content conversion not implemented")
|
|
493
|
+
else:
|
|
494
|
+
# Last effort to convert the content to a string
|
|
495
|
+
mcp_content.append(TextContent(type="text", text=str(block)))
|
|
496
|
+
|
|
497
|
+
return mcp_content
|
|
498
|
+
|
|
499
|
+
|
|
500
|
+
def mcp_stop_reason_to_anthropic_stop_reason(stop_reason: StopReason):
|
|
501
|
+
if not stop_reason:
|
|
502
|
+
return None
|
|
503
|
+
elif stop_reason == "endTurn":
|
|
504
|
+
return "end_turn"
|
|
505
|
+
elif stop_reason == "maxTokens":
|
|
506
|
+
return "max_tokens"
|
|
507
|
+
elif stop_reason == "stopSequence":
|
|
508
|
+
return "stop_sequence"
|
|
509
|
+
elif stop_reason == "toolUse":
|
|
510
|
+
return "tool_use"
|
|
511
|
+
else:
|
|
512
|
+
return stop_reason
|
|
513
|
+
|
|
514
|
+
|
|
515
|
+
def anthropic_stop_reason_to_mcp_stop_reason(stop_reason: str) -> StopReason:
|
|
516
|
+
if not stop_reason:
|
|
517
|
+
return None
|
|
518
|
+
elif stop_reason == "end_turn":
|
|
519
|
+
return "endTurn"
|
|
520
|
+
elif stop_reason == "max_tokens":
|
|
521
|
+
return "maxTokens"
|
|
522
|
+
elif stop_reason == "stop_sequence":
|
|
523
|
+
return "stopSequence"
|
|
524
|
+
elif stop_reason == "tool_use":
|
|
525
|
+
return "toolUse"
|
|
526
|
+
else:
|
|
527
|
+
return stop_reason
|
|
528
|
+
|
|
529
|
+
|
|
530
|
+
def to_string(obj: BaseModel | dict) -> str:
|
|
531
|
+
if isinstance(obj, BaseModel):
|
|
532
|
+
return obj.model_dump_json()
|
|
533
|
+
else:
|
|
534
|
+
return json.dumps(obj)
|
|
535
|
+
|
|
536
|
+
|
|
537
|
+
def typed_dict_extras(d: dict, exclude: List[str]):
|
|
538
|
+
extras = {k: v for k, v in d.items() if k not in exclude}
|
|
539
|
+
return extras
|