fast-agent-mcp 0.1.8__py3-none-any.whl → 0.1.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/METADATA +26 -4
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/RECORD +43 -22
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/entry_points.txt +1 -0
- mcp_agent/agents/agent.py +5 -11
- mcp_agent/core/agent_app.py +89 -13
- mcp_agent/core/fastagent.py +13 -3
- mcp_agent/core/mcp_content.py +222 -0
- mcp_agent/core/prompt.py +132 -0
- mcp_agent/core/proxies.py +41 -36
- mcp_agent/logging/transport.py +30 -3
- mcp_agent/mcp/mcp_aggregator.py +11 -10
- mcp_agent/mcp/mime_utils.py +69 -0
- mcp_agent/mcp/prompt_message_multipart.py +64 -0
- mcp_agent/mcp/prompt_serialization.py +447 -0
- mcp_agent/mcp/prompts/__init__.py +0 -0
- mcp_agent/mcp/prompts/__main__.py +10 -0
- mcp_agent/mcp/prompts/prompt_server.py +508 -0
- mcp_agent/mcp/prompts/prompt_template.py +469 -0
- mcp_agent/mcp/resource_utils.py +203 -0
- mcp_agent/resources/examples/internal/agent.py +1 -1
- mcp_agent/resources/examples/internal/fastagent.config.yaml +2 -2
- mcp_agent/resources/examples/internal/sizer.py +0 -5
- mcp_agent/resources/examples/prompting/__init__.py +3 -0
- mcp_agent/resources/examples/prompting/agent.py +23 -0
- mcp_agent/resources/examples/prompting/fastagent.config.yaml +44 -0
- mcp_agent/resources/examples/prompting/image_server.py +56 -0
- mcp_agent/workflows/llm/anthropic_utils.py +101 -0
- mcp_agent/workflows/llm/augmented_llm.py +139 -66
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +127 -251
- mcp_agent/workflows/llm/augmented_llm_openai.py +149 -305
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +43 -0
- mcp_agent/workflows/llm/augmented_llm_playback.py +109 -0
- mcp_agent/workflows/llm/model_factory.py +20 -3
- mcp_agent/workflows/llm/openai_utils.py +65 -0
- mcp_agent/workflows/llm/providers/__init__.py +8 -0
- mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +348 -0
- mcp_agent/workflows/llm/providers/multipart_converter_openai.py +426 -0
- mcp_agent/workflows/llm/providers/openai_multipart.py +197 -0
- mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +258 -0
- mcp_agent/workflows/llm/providers/sampling_converter_openai.py +229 -0
- mcp_agent/workflows/llm/sampling_format_converter.py +39 -0
- mcp_agent/core/server_validation.py +0 -44
- mcp_agent/core/simulator_registry.py +0 -22
- mcp_agent/workflows/llm/enhanced_passthrough.py +0 -70
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/licenses/LICENSE +0 -0
@@ -1,47 +1,42 @@
|
|
1
|
-
import json
|
2
1
|
import os
|
3
|
-
from typing import
|
2
|
+
from typing import List, Type, TYPE_CHECKING
|
3
|
+
|
4
|
+
from mcp_agent.workflows.llm.providers.multipart_converter_anthropic import (
|
5
|
+
AnthropicConverter,
|
6
|
+
)
|
7
|
+
from mcp_agent.workflows.llm.providers.sampling_converter_anthropic import (
|
8
|
+
AnthropicSamplingConverter,
|
9
|
+
)
|
10
|
+
|
11
|
+
if TYPE_CHECKING:
|
12
|
+
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
4
13
|
|
5
|
-
from pydantic import BaseModel
|
6
14
|
|
7
15
|
from anthropic import Anthropic, AuthenticationError
|
8
16
|
from anthropic.types import (
|
9
|
-
ContentBlock,
|
10
|
-
DocumentBlockParam,
|
11
17
|
Message,
|
12
18
|
MessageParam,
|
13
|
-
ImageBlockParam,
|
14
19
|
TextBlock,
|
15
20
|
TextBlockParam,
|
16
21
|
ToolParam,
|
17
|
-
ToolResultBlockParam,
|
18
22
|
ToolUseBlockParam,
|
19
23
|
)
|
20
24
|
from mcp.types import (
|
21
25
|
CallToolRequestParams,
|
22
26
|
CallToolRequest,
|
23
|
-
EmbeddedResource,
|
24
|
-
ImageContent,
|
25
|
-
StopReason,
|
26
|
-
TextContent,
|
27
|
-
TextResourceContents,
|
28
27
|
)
|
29
28
|
from pydantic_core import from_json
|
30
29
|
|
31
30
|
from mcp_agent.workflows.llm.augmented_llm import (
|
32
31
|
AugmentedLLM,
|
33
32
|
ModelT,
|
34
|
-
MCPMessageParam,
|
35
|
-
MCPMessageResult,
|
36
|
-
ProviderToMCPConverter,
|
37
33
|
RequestParams,
|
38
34
|
)
|
39
35
|
from mcp_agent.core.exceptions import ProviderKeyError
|
40
|
-
from mcp_agent.logging.logger import get_logger
|
41
|
-
from mcp.types import PromptMessage
|
42
36
|
from rich.text import Text
|
43
37
|
|
44
|
-
|
38
|
+
from mcp_agent.logging.logger import get_logger
|
39
|
+
|
45
40
|
DEFAULT_ANTHROPIC_MODEL = "claude-3-7-sonnet-latest"
|
46
41
|
|
47
42
|
|
@@ -59,7 +54,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
59
54
|
self.logger = get_logger(__name__)
|
60
55
|
|
61
56
|
# Now call super().__init__
|
62
|
-
super().__init__(*args, type_converter=
|
57
|
+
super().__init__(*args, type_converter=AnthropicSamplingConverter, **kwargs)
|
63
58
|
|
64
59
|
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
65
60
|
"""Initialize Anthropic-specific default parameters"""
|
@@ -272,21 +267,10 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
272
267
|
self.show_tool_result(result)
|
273
268
|
|
274
269
|
# Add each result to our collection
|
275
|
-
tool_results.append(
|
276
|
-
ToolResultBlockParam(
|
277
|
-
type="tool_result",
|
278
|
-
tool_use_id=tool_use_id,
|
279
|
-
content=result.content,
|
280
|
-
is_error=result.isError,
|
281
|
-
)
|
282
|
-
)
|
270
|
+
tool_results.append((tool_use_id, result))
|
283
271
|
|
284
|
-
# Add all tool results in a single message
|
285
272
|
messages.append(
|
286
|
-
|
287
|
-
role="user",
|
288
|
-
content=tool_results,
|
289
|
-
)
|
273
|
+
AnthropicConverter.create_tool_results_message(tool_results)
|
290
274
|
)
|
291
275
|
|
292
276
|
# Only save the new conversation messages to history if use_history is true
|
@@ -335,7 +319,15 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
335
319
|
Process a query using an LLM and available tools.
|
336
320
|
The default implementation uses Claude as the LLM.
|
337
321
|
Override this method to use a different LLM.
|
322
|
+
|
323
|
+
Special commands:
|
324
|
+
- "***SAVE_HISTORY <filename.md>" - Saves the conversation history to the specified file
|
325
|
+
in MCP prompt format with user/assistant delimiters.
|
338
326
|
"""
|
327
|
+
# Check if this is a special command to save history
|
328
|
+
if isinstance(message, str) and message.startswith("***SAVE_HISTORY "):
|
329
|
+
return await self._save_history_to_file(message)
|
330
|
+
|
339
331
|
responses: List[Message] = await self.generate(
|
340
332
|
message=message,
|
341
333
|
request_params=request_params,
|
@@ -360,6 +352,109 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
360
352
|
# Join all collected text
|
361
353
|
return "\n".join(final_text)
|
362
354
|
|
355
|
+
async def generate_prompt(
|
356
|
+
self, prompt: "PromptMessageMultipart", request_params: RequestParams | None
|
357
|
+
) -> str:
|
358
|
+
return await self.generate_str(
|
359
|
+
AnthropicConverter.convert_to_anthropic(prompt), request_params
|
360
|
+
)
|
361
|
+
|
362
|
+
async def _apply_prompt_template_provider_specific(
|
363
|
+
self, multipart_messages: List["PromptMessageMultipart"]
|
364
|
+
) -> str:
|
365
|
+
"""
|
366
|
+
Anthropic-specific implementation of apply_prompt_template that handles
|
367
|
+
multimodal content natively.
|
368
|
+
|
369
|
+
Args:
|
370
|
+
multipart_messages: List of PromptMessageMultipart objects parsed from the prompt template
|
371
|
+
|
372
|
+
Returns:
|
373
|
+
String representation of the assistant's response if generated,
|
374
|
+
or the last assistant message in the prompt
|
375
|
+
"""
|
376
|
+
# Check the last message role
|
377
|
+
last_message = multipart_messages[-1]
|
378
|
+
|
379
|
+
# Add all previous messages to history (or all messages if last is from assistant)
|
380
|
+
messages_to_add = (
|
381
|
+
multipart_messages[:-1]
|
382
|
+
if last_message.role == "user"
|
383
|
+
else multipart_messages
|
384
|
+
)
|
385
|
+
converted = []
|
386
|
+
for msg in messages_to_add:
|
387
|
+
converted.append(AnthropicConverter.convert_to_anthropic(msg))
|
388
|
+
self.history.extend(converted, is_prompt=True)
|
389
|
+
|
390
|
+
if last_message.role == "user":
|
391
|
+
# For user messages: Generate response to the last one
|
392
|
+
self.logger.debug(
|
393
|
+
"Last message in prompt is from user, generating assistant response"
|
394
|
+
)
|
395
|
+
message_param = AnthropicConverter.convert_to_anthropic(last_message)
|
396
|
+
return await self.generate_str(message_param)
|
397
|
+
else:
|
398
|
+
# For assistant messages: Return the last message content as text
|
399
|
+
self.logger.debug(
|
400
|
+
"Last message in prompt is from assistant, returning it directly"
|
401
|
+
)
|
402
|
+
return str(last_message)
|
403
|
+
|
404
|
+
async def _save_history_to_file(self, command: str) -> str:
|
405
|
+
"""
|
406
|
+
Save the conversation history to a file in MCP prompt format.
|
407
|
+
|
408
|
+
Args:
|
409
|
+
command: The command string, expected format: "***SAVE_HISTORY <filename.md>"
|
410
|
+
|
411
|
+
Returns:
|
412
|
+
Success or error message
|
413
|
+
"""
|
414
|
+
try:
|
415
|
+
# Extract the filename from the command
|
416
|
+
parts = command.split(" ", 1)
|
417
|
+
if len(parts) != 2 or not parts[1].strip():
|
418
|
+
return "Error: Invalid format. Expected '***SAVE_HISTORY <filename.md>'"
|
419
|
+
|
420
|
+
filename = parts[1].strip()
|
421
|
+
|
422
|
+
# Get all messages from history
|
423
|
+
messages = self.history.get(include_history=True)
|
424
|
+
|
425
|
+
# Import required utilities
|
426
|
+
from mcp_agent.workflows.llm.anthropic_utils import (
|
427
|
+
anthropic_message_param_to_prompt_message_multipart,
|
428
|
+
)
|
429
|
+
from mcp_agent.mcp.prompt_serialization import (
|
430
|
+
multipart_messages_to_delimited_format,
|
431
|
+
)
|
432
|
+
|
433
|
+
# Convert message params to PromptMessageMultipart objects
|
434
|
+
multipart_messages = []
|
435
|
+
for msg in messages:
|
436
|
+
multipart_messages.append(
|
437
|
+
anthropic_message_param_to_prompt_message_multipart(msg)
|
438
|
+
)
|
439
|
+
|
440
|
+
# Convert to delimited format
|
441
|
+
delimited_content = multipart_messages_to_delimited_format(
|
442
|
+
multipart_messages,
|
443
|
+
user_delimiter="---USER",
|
444
|
+
assistant_delimiter="---ASSISTANT",
|
445
|
+
)
|
446
|
+
|
447
|
+
# Write to file
|
448
|
+
with open(filename, "w", encoding="utf-8") as f:
|
449
|
+
f.write("\n\n".join(delimited_content))
|
450
|
+
|
451
|
+
self.logger.info(f"Saved conversation history to {filename}")
|
452
|
+
return f"Done. Saved conversation history to {filename}"
|
453
|
+
|
454
|
+
except Exception as e:
|
455
|
+
self.logger.error(f"Error saving history: {str(e)}")
|
456
|
+
return f"Error saving history: {str(e)}"
|
457
|
+
|
363
458
|
async def generate_structured(
|
364
459
|
self,
|
365
460
|
message,
|
@@ -437,222 +532,3 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
437
532
|
return str(content)
|
438
533
|
|
439
534
|
return str(message)
|
440
|
-
|
441
|
-
|
442
|
-
class AnthropicMCPTypeConverter(ProviderToMCPConverter[MessageParam, Message]):
|
443
|
-
"""
|
444
|
-
Convert between Anthropic and MCP types.
|
445
|
-
"""
|
446
|
-
|
447
|
-
@classmethod
|
448
|
-
def from_mcp_message_result(cls, result: MCPMessageResult) -> Message:
|
449
|
-
# MCPMessageResult -> Message
|
450
|
-
if result.role != "assistant":
|
451
|
-
raise ValueError(
|
452
|
-
f"Expected role to be 'assistant' but got '{result.role}' instead."
|
453
|
-
)
|
454
|
-
|
455
|
-
return Message(
|
456
|
-
role="assistant",
|
457
|
-
type="message",
|
458
|
-
content=[mcp_content_to_anthropic_content(result.content)],
|
459
|
-
model=result.model,
|
460
|
-
stop_reason=mcp_stop_reason_to_anthropic_stop_reason(result.stopReason),
|
461
|
-
id=result.id or None,
|
462
|
-
usage=result.usage or None,
|
463
|
-
# TODO: should we push extras?
|
464
|
-
)
|
465
|
-
|
466
|
-
@classmethod
|
467
|
-
def to_mcp_message_result(cls, result: Message) -> MCPMessageResult:
|
468
|
-
# Message -> MCPMessageResult
|
469
|
-
|
470
|
-
contents = anthropic_content_to_mcp_content(result.content)
|
471
|
-
if len(contents) > 1:
|
472
|
-
raise NotImplementedError(
|
473
|
-
"Multiple content elements in a single message are not supported in MCP yet"
|
474
|
-
)
|
475
|
-
mcp_content = contents[0]
|
476
|
-
|
477
|
-
return MCPMessageResult(
|
478
|
-
role=result.role,
|
479
|
-
content=mcp_content,
|
480
|
-
model=result.model,
|
481
|
-
stopReason=anthropic_stop_reason_to_mcp_stop_reason(result.stop_reason),
|
482
|
-
# extras for Message fields
|
483
|
-
**result.model_dump(exclude={"role", "content", "model", "stop_reason"}),
|
484
|
-
)
|
485
|
-
|
486
|
-
@classmethod
|
487
|
-
def from_mcp_message_param(cls, param: MCPMessageParam) -> MessageParam:
|
488
|
-
# MCPMessageParam -> MessageParam
|
489
|
-
extras = param.model_dump(exclude={"role", "content"})
|
490
|
-
return MessageParam(
|
491
|
-
role=param.role,
|
492
|
-
content=[mcp_content_to_anthropic_content(param.content)],
|
493
|
-
**extras,
|
494
|
-
)
|
495
|
-
|
496
|
-
@classmethod
|
497
|
-
def to_mcp_message_param(cls, param: MessageParam) -> MCPMessageParam:
|
498
|
-
# Implement the conversion from ChatCompletionMessage to MCP message param
|
499
|
-
|
500
|
-
contents = anthropic_content_to_mcp_content(param.content)
|
501
|
-
|
502
|
-
# TODO: saqadri - the mcp_content can have multiple elements
|
503
|
-
# while sampling message content has a single content element
|
504
|
-
# Right now we error out if there are > 1 elements in mcp_content
|
505
|
-
# We need to handle this case properly going forward
|
506
|
-
if len(contents) > 1:
|
507
|
-
raise NotImplementedError(
|
508
|
-
"Multiple content elements in a single message are not supported"
|
509
|
-
)
|
510
|
-
mcp_content = contents[0]
|
511
|
-
|
512
|
-
return MCPMessageParam(
|
513
|
-
role=param.role,
|
514
|
-
content=mcp_content,
|
515
|
-
**typed_dict_extras(param, ["role", "content"]),
|
516
|
-
)
|
517
|
-
|
518
|
-
@classmethod
|
519
|
-
def from_mcp_prompt_message(cls, message: PromptMessage) -> MessageParam:
|
520
|
-
"""Convert an MCP PromptMessage to an Anthropic MessageParam."""
|
521
|
-
|
522
|
-
# Extract content text
|
523
|
-
content_text = (
|
524
|
-
message.content.text
|
525
|
-
if hasattr(message.content, "text")
|
526
|
-
else str(message.content)
|
527
|
-
)
|
528
|
-
|
529
|
-
# Extract extras for flexibility
|
530
|
-
extras = message.model_dump(exclude={"role", "content"})
|
531
|
-
|
532
|
-
# Handle based on role
|
533
|
-
if message.role == "user":
|
534
|
-
return {"role": "user", "content": content_text, **extras}
|
535
|
-
elif message.role == "assistant":
|
536
|
-
return {
|
537
|
-
"role": "assistant",
|
538
|
-
"content": [{"type": "text", "text": content_text}],
|
539
|
-
**extras,
|
540
|
-
}
|
541
|
-
else:
|
542
|
-
# Fall back to user for any unrecognized role, including "system"
|
543
|
-
_logger.warning(
|
544
|
-
f"Unsupported role '{message.role}' in PromptMessage. Falling back to 'user' role."
|
545
|
-
)
|
546
|
-
return {
|
547
|
-
"role": "user",
|
548
|
-
"content": f"[{message.role.upper()}] {content_text}",
|
549
|
-
**extras,
|
550
|
-
}
|
551
|
-
|
552
|
-
|
553
|
-
def mcp_content_to_anthropic_content(
|
554
|
-
content: TextContent | ImageContent | EmbeddedResource,
|
555
|
-
) -> ContentBlock:
|
556
|
-
if isinstance(content, TextContent):
|
557
|
-
return TextBlock(type=content.type, text=content.text)
|
558
|
-
elif isinstance(content, ImageContent):
|
559
|
-
# Best effort to convert an image to text (since there's no ImageBlock)
|
560
|
-
return TextBlock(type="text", text=f"{content.mimeType}:{content.data}")
|
561
|
-
elif isinstance(content, EmbeddedResource):
|
562
|
-
if isinstance(content.resource, TextResourceContents):
|
563
|
-
return TextBlock(type="text", text=content.resource.text)
|
564
|
-
else: # BlobResourceContents
|
565
|
-
return TextBlock(
|
566
|
-
type="text", text=f"{content.resource.mimeType}:{content.resource.blob}"
|
567
|
-
)
|
568
|
-
else:
|
569
|
-
# Last effort to convert the content to a string
|
570
|
-
return TextBlock(type="text", text=str(content))
|
571
|
-
|
572
|
-
|
573
|
-
def anthropic_content_to_mcp_content(
|
574
|
-
content: str
|
575
|
-
| Iterable[
|
576
|
-
TextBlockParam
|
577
|
-
| ImageBlockParam
|
578
|
-
| ToolUseBlockParam
|
579
|
-
| ToolResultBlockParam
|
580
|
-
| DocumentBlockParam
|
581
|
-
| ContentBlock
|
582
|
-
],
|
583
|
-
) -> List[TextContent | ImageContent | EmbeddedResource]:
|
584
|
-
mcp_content = []
|
585
|
-
|
586
|
-
if isinstance(content, str):
|
587
|
-
mcp_content.append(TextContent(type="text", text=content))
|
588
|
-
else:
|
589
|
-
for block in content:
|
590
|
-
if block.type == "text":
|
591
|
-
mcp_content.append(TextContent(type="text", text=block.text))
|
592
|
-
elif block.type == "image":
|
593
|
-
raise NotImplementedError("Image content conversion not implemented")
|
594
|
-
elif block.type == "tool_use":
|
595
|
-
# Best effort to convert a tool use to text (since there's no ToolUseContent)
|
596
|
-
mcp_content.append(
|
597
|
-
TextContent(
|
598
|
-
type="text",
|
599
|
-
text=to_string(block),
|
600
|
-
)
|
601
|
-
)
|
602
|
-
elif block.type == "tool_result":
|
603
|
-
# Best effort to convert a tool result to text (since there's no ToolResultContent)
|
604
|
-
mcp_content.append(
|
605
|
-
TextContent(
|
606
|
-
type="text",
|
607
|
-
text=to_string(block),
|
608
|
-
)
|
609
|
-
)
|
610
|
-
elif block.type == "document":
|
611
|
-
raise NotImplementedError("Document content conversion not implemented")
|
612
|
-
else:
|
613
|
-
# Last effort to convert the content to a string
|
614
|
-
mcp_content.append(TextContent(type="text", text=str(block)))
|
615
|
-
|
616
|
-
return mcp_content
|
617
|
-
|
618
|
-
|
619
|
-
def mcp_stop_reason_to_anthropic_stop_reason(stop_reason: StopReason):
|
620
|
-
if not stop_reason:
|
621
|
-
return None
|
622
|
-
elif stop_reason == "endTurn":
|
623
|
-
return "end_turn"
|
624
|
-
elif stop_reason == "maxTokens":
|
625
|
-
return "max_tokens"
|
626
|
-
elif stop_reason == "stopSequence":
|
627
|
-
return "stop_sequence"
|
628
|
-
elif stop_reason == "toolUse":
|
629
|
-
return "tool_use"
|
630
|
-
else:
|
631
|
-
return stop_reason
|
632
|
-
|
633
|
-
|
634
|
-
def anthropic_stop_reason_to_mcp_stop_reason(stop_reason: str) -> StopReason:
|
635
|
-
if not stop_reason:
|
636
|
-
return None
|
637
|
-
elif stop_reason == "end_turn":
|
638
|
-
return "endTurn"
|
639
|
-
elif stop_reason == "max_tokens":
|
640
|
-
return "maxTokens"
|
641
|
-
elif stop_reason == "stop_sequence":
|
642
|
-
return "stopSequence"
|
643
|
-
elif stop_reason == "tool_use":
|
644
|
-
return "toolUse"
|
645
|
-
else:
|
646
|
-
return stop_reason
|
647
|
-
|
648
|
-
|
649
|
-
def to_string(obj: BaseModel | dict) -> str:
|
650
|
-
if isinstance(obj, BaseModel):
|
651
|
-
return obj.model_dump_json()
|
652
|
-
else:
|
653
|
-
return json.dumps(obj)
|
654
|
-
|
655
|
-
|
656
|
-
def typed_dict_extras(d: dict, exclude: List[str]):
|
657
|
-
extras = {k: v for k, v in d.items() if k not in exclude}
|
658
|
-
return extras
|