fast-agent-mcp 0.1.12__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/METADATA +3 -4
- fast_agent_mcp-0.2.0.dist-info/RECORD +123 -0
- mcp_agent/__init__.py +75 -0
- mcp_agent/agents/agent.py +61 -415
- mcp_agent/agents/base_agent.py +522 -0
- mcp_agent/agents/workflow/__init__.py +1 -0
- mcp_agent/agents/workflow/chain_agent.py +173 -0
- mcp_agent/agents/workflow/evaluator_optimizer.py +362 -0
- mcp_agent/agents/workflow/orchestrator_agent.py +591 -0
- mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_models.py +11 -21
- mcp_agent/agents/workflow/parallel_agent.py +182 -0
- mcp_agent/agents/workflow/router_agent.py +307 -0
- mcp_agent/app.py +15 -19
- mcp_agent/cli/commands/bootstrap.py +19 -38
- mcp_agent/cli/commands/config.py +4 -4
- mcp_agent/cli/commands/setup.py +7 -14
- mcp_agent/cli/main.py +7 -10
- mcp_agent/cli/terminal.py +3 -3
- mcp_agent/config.py +25 -40
- mcp_agent/context.py +12 -21
- mcp_agent/context_dependent.py +3 -5
- mcp_agent/core/agent_types.py +10 -7
- mcp_agent/core/direct_agent_app.py +179 -0
- mcp_agent/core/direct_decorators.py +443 -0
- mcp_agent/core/direct_factory.py +476 -0
- mcp_agent/core/enhanced_prompt.py +23 -55
- mcp_agent/core/exceptions.py +8 -8
- mcp_agent/core/fastagent.py +145 -371
- mcp_agent/core/interactive_prompt.py +424 -0
- mcp_agent/core/mcp_content.py +17 -17
- mcp_agent/core/prompt.py +6 -9
- mcp_agent/core/request_params.py +6 -3
- mcp_agent/core/validation.py +92 -18
- mcp_agent/executor/decorator_registry.py +9 -17
- mcp_agent/executor/executor.py +8 -17
- mcp_agent/executor/task_registry.py +2 -4
- mcp_agent/executor/temporal.py +19 -41
- mcp_agent/executor/workflow.py +3 -5
- mcp_agent/executor/workflow_signal.py +15 -21
- mcp_agent/human_input/handler.py +4 -7
- mcp_agent/human_input/types.py +2 -3
- mcp_agent/llm/__init__.py +2 -0
- mcp_agent/llm/augmented_llm.py +450 -0
- mcp_agent/llm/augmented_llm_passthrough.py +162 -0
- mcp_agent/llm/augmented_llm_playback.py +83 -0
- mcp_agent/llm/memory.py +103 -0
- mcp_agent/{workflows/llm → llm}/model_factory.py +22 -16
- mcp_agent/{workflows/llm → llm}/prompt_utils.py +1 -3
- mcp_agent/llm/providers/__init__.py +8 -0
- mcp_agent/{workflows/llm → llm/providers}/anthropic_utils.py +8 -25
- mcp_agent/{workflows/llm → llm/providers}/augmented_llm_anthropic.py +56 -194
- mcp_agent/llm/providers/augmented_llm_deepseek.py +53 -0
- mcp_agent/{workflows/llm → llm/providers}/augmented_llm_openai.py +99 -190
- mcp_agent/{workflows/llm → llm}/providers/multipart_converter_anthropic.py +72 -71
- mcp_agent/{workflows/llm → llm}/providers/multipart_converter_openai.py +65 -71
- mcp_agent/{workflows/llm → llm}/providers/openai_multipart.py +16 -44
- mcp_agent/{workflows/llm → llm/providers}/openai_utils.py +4 -4
- mcp_agent/{workflows/llm → llm}/providers/sampling_converter_anthropic.py +9 -11
- mcp_agent/{workflows/llm → llm}/providers/sampling_converter_openai.py +8 -12
- mcp_agent/{workflows/llm → llm}/sampling_converter.py +3 -31
- mcp_agent/llm/sampling_format_converter.py +37 -0
- mcp_agent/logging/events.py +1 -5
- mcp_agent/logging/json_serializer.py +7 -6
- mcp_agent/logging/listeners.py +20 -23
- mcp_agent/logging/logger.py +17 -19
- mcp_agent/logging/rich_progress.py +10 -8
- mcp_agent/logging/tracing.py +4 -6
- mcp_agent/logging/transport.py +22 -22
- mcp_agent/mcp/gen_client.py +1 -3
- mcp_agent/mcp/interfaces.py +117 -110
- mcp_agent/mcp/logger_textio.py +97 -0
- mcp_agent/mcp/mcp_agent_client_session.py +7 -7
- mcp_agent/mcp/mcp_agent_server.py +8 -8
- mcp_agent/mcp/mcp_aggregator.py +102 -143
- mcp_agent/mcp/mcp_connection_manager.py +20 -27
- mcp_agent/mcp/prompt_message_multipart.py +68 -16
- mcp_agent/mcp/prompt_render.py +77 -0
- mcp_agent/mcp/prompt_serialization.py +30 -48
- mcp_agent/mcp/prompts/prompt_constants.py +18 -0
- mcp_agent/mcp/prompts/prompt_helpers.py +327 -0
- mcp_agent/mcp/prompts/prompt_load.py +109 -0
- mcp_agent/mcp/prompts/prompt_server.py +155 -195
- mcp_agent/mcp/prompts/prompt_template.py +35 -66
- mcp_agent/mcp/resource_utils.py +7 -14
- mcp_agent/mcp/sampling.py +17 -17
- mcp_agent/mcp_server/agent_server.py +13 -17
- mcp_agent/mcp_server_registry.py +13 -22
- mcp_agent/resources/examples/{workflows → in_dev}/agent_build.py +3 -2
- mcp_agent/resources/examples/in_dev/slides.py +110 -0
- mcp_agent/resources/examples/internal/agent.py +6 -3
- mcp_agent/resources/examples/internal/fastagent.config.yaml +8 -2
- mcp_agent/resources/examples/internal/job.py +2 -1
- mcp_agent/resources/examples/internal/prompt_category.py +1 -1
- mcp_agent/resources/examples/internal/prompt_sizing.py +3 -5
- mcp_agent/resources/examples/internal/sizer.py +2 -1
- mcp_agent/resources/examples/internal/social.py +2 -1
- mcp_agent/resources/examples/prompting/agent.py +2 -1
- mcp_agent/resources/examples/prompting/image_server.py +4 -8
- mcp_agent/resources/examples/prompting/work_with_image.py +19 -0
- mcp_agent/ui/console_display.py +16 -20
- fast_agent_mcp-0.1.12.dist-info/RECORD +0 -161
- mcp_agent/core/agent_app.py +0 -646
- mcp_agent/core/agent_utils.py +0 -71
- mcp_agent/core/decorators.py +0 -455
- mcp_agent/core/factory.py +0 -463
- mcp_agent/core/proxies.py +0 -269
- mcp_agent/core/types.py +0 -24
- mcp_agent/eval/__init__.py +0 -0
- mcp_agent/mcp/stdio.py +0 -111
- mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -188
- mcp_agent/resources/examples/data-analysis/analysis.py +0 -65
- mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -41
- mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -1471
- mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -53
- mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -66
- mcp_agent/resources/examples/researcher/researcher-eval.py +0 -53
- mcp_agent/resources/examples/researcher/researcher-imp.py +0 -190
- mcp_agent/resources/examples/researcher/researcher.py +0 -38
- mcp_agent/resources/examples/workflows/chaining.py +0 -44
- mcp_agent/resources/examples/workflows/evaluator.py +0 -78
- mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -24
- mcp_agent/resources/examples/workflows/human_input.py +0 -25
- mcp_agent/resources/examples/workflows/orchestrator.py +0 -73
- mcp_agent/resources/examples/workflows/parallel.py +0 -78
- mcp_agent/resources/examples/workflows/router.py +0 -53
- mcp_agent/resources/examples/workflows/sse.py +0 -23
- mcp_agent/telemetry/__init__.py +0 -0
- mcp_agent/telemetry/usage_tracking.py +0 -18
- mcp_agent/workflows/__init__.py +0 -0
- mcp_agent/workflows/embedding/__init__.py +0 -0
- mcp_agent/workflows/embedding/embedding_base.py +0 -61
- mcp_agent/workflows/embedding/embedding_cohere.py +0 -49
- mcp_agent/workflows/embedding/embedding_openai.py +0 -46
- mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +0 -481
- mcp_agent/workflows/intent_classifier/__init__.py +0 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -120
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -134
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -45
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -45
- mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -161
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -60
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -60
- mcp_agent/workflows/llm/__init__.py +0 -0
- mcp_agent/workflows/llm/augmented_llm.py +0 -753
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +0 -241
- mcp_agent/workflows/llm/augmented_llm_playback.py +0 -109
- mcp_agent/workflows/llm/providers/__init__.py +0 -8
- mcp_agent/workflows/llm/sampling_format_converter.py +0 -22
- mcp_agent/workflows/orchestrator/__init__.py +0 -0
- mcp_agent/workflows/orchestrator/orchestrator.py +0 -578
- mcp_agent/workflows/parallel/__init__.py +0 -0
- mcp_agent/workflows/parallel/fan_in.py +0 -350
- mcp_agent/workflows/parallel/fan_out.py +0 -187
- mcp_agent/workflows/parallel/parallel_llm.py +0 -166
- mcp_agent/workflows/router/__init__.py +0 -0
- mcp_agent/workflows/router/router_base.py +0 -368
- mcp_agent/workflows/router/router_embedding.py +0 -240
- mcp_agent/workflows/router/router_embedding_cohere.py +0 -59
- mcp_agent/workflows/router/router_embedding_openai.py +0 -59
- mcp_agent/workflows/router/router_llm.py +0 -320
- mcp_agent/workflows/swarm/__init__.py +0 -0
- mcp_agent/workflows/swarm/swarm.py +0 -320
- mcp_agent/workflows/swarm/swarm_anthropic.py +0 -42
- mcp_agent/workflows/swarm/swarm_openai.py +0 -41
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/licenses/LICENSE +0 -0
- /mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_prompts.py +0 -0
@@ -0,0 +1,83 @@
|
|
1
|
+
from typing import Any, List
|
2
|
+
|
3
|
+
from mcp_agent.core.prompt import Prompt
|
4
|
+
from mcp_agent.llm.augmented_llm import RequestParams
|
5
|
+
from mcp_agent.llm.augmented_llm_passthrough import PassthroughLLM
|
6
|
+
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
7
|
+
from mcp_agent.mcp.prompts.prompt_helpers import MessageContent
|
8
|
+
|
9
|
+
|
10
|
+
class PlaybackLLM(PassthroughLLM):
|
11
|
+
"""
|
12
|
+
A specialized LLM implementation that plays back assistant messages when loaded with prompts.
|
13
|
+
|
14
|
+
Unlike the PassthroughLLM which simply passes through messages without modification,
|
15
|
+
PlaybackLLM is designed to simulate a conversation by playing back prompt messages
|
16
|
+
in sequence when loaded with prompts through apply_prompt_template.
|
17
|
+
|
18
|
+
After apply_prompts has been called, each call to generate_str returns the next
|
19
|
+
"ASSISTANT" message in the loaded messages. If no messages are set or all messages have
|
20
|
+
been played back, it returns a message indicating that messages are exhausted.
|
21
|
+
"""
|
22
|
+
|
23
|
+
def __init__(self, name: str = "Playback", **kwargs: dict[str, Any]) -> None:
|
24
|
+
super().__init__(name=name, **kwargs)
|
25
|
+
self._messages: List[PromptMessageMultipart] = []
|
26
|
+
self._current_index = -1
|
27
|
+
self._overage = -1
|
28
|
+
|
29
|
+
def _get_next_assistant_message(self) -> PromptMessageMultipart:
|
30
|
+
"""
|
31
|
+
Get the next assistant message from the loaded messages.
|
32
|
+
Increments the current message index and skips user messages.
|
33
|
+
"""
|
34
|
+
# Find next assistant message
|
35
|
+
while self._current_index < len(self._messages):
|
36
|
+
message = self._messages[self._current_index]
|
37
|
+
self._current_index += 1
|
38
|
+
if "assistant" != message.role:
|
39
|
+
continue
|
40
|
+
|
41
|
+
return message
|
42
|
+
|
43
|
+
self._overage += 1
|
44
|
+
return Prompt.assistant(
|
45
|
+
f"MESSAGES EXHAUSTED (list size {len(self._messages)}) ({self._overage} overage)"
|
46
|
+
)
|
47
|
+
|
48
|
+
async def generate(
|
49
|
+
self,
|
50
|
+
multipart_messages: List[PromptMessageMultipart],
|
51
|
+
request_params: RequestParams | None = None,
|
52
|
+
) -> PromptMessageMultipart:
|
53
|
+
"""
|
54
|
+
Handle playback of messages in two modes:
|
55
|
+
1. First call: store messages for playback and return "HISTORY LOADED"
|
56
|
+
2. Subsequent calls: return the next assistant message
|
57
|
+
"""
|
58
|
+
# If this is the first call (initialization) or we're loading a prompt template
|
59
|
+
# with multiple messages (comes from apply_prompt)
|
60
|
+
if -1 == self._current_index:
|
61
|
+
if len(multipart_messages) > 1:
|
62
|
+
self._messages = multipart_messages
|
63
|
+
else:
|
64
|
+
self._messages.extend(multipart_messages)
|
65
|
+
|
66
|
+
# Reset the index to the beginning for proper playback
|
67
|
+
self._current_index = 0
|
68
|
+
|
69
|
+
await self.show_assistant_message(
|
70
|
+
message_text=f"HISTORY LOADED ({len(self._messages)} messages)",
|
71
|
+
title="ASSISTANT/PLAYBACK",
|
72
|
+
)
|
73
|
+
|
74
|
+
# In PlaybackLLM, we always return "HISTORY LOADED" on initialization,
|
75
|
+
# regardless of the prompt content. The next call will return messages.
|
76
|
+
return Prompt.assistant("HISTORY LOADED")
|
77
|
+
|
78
|
+
response = self._get_next_assistant_message()
|
79
|
+
await self.show_assistant_message(
|
80
|
+
message_text=MessageContent.get_first_text(response), title="ASSISTANT/PLAYBACK"
|
81
|
+
)
|
82
|
+
|
83
|
+
return response
|
mcp_agent/llm/memory.py
ADDED
@@ -0,0 +1,103 @@
|
|
1
|
+
from typing import Generic, List, Protocol, TypeVar
|
2
|
+
|
3
|
+
# Define our own type variable for implementation use
|
4
|
+
MessageParamT = TypeVar("MessageParamT")
|
5
|
+
|
6
|
+
|
7
|
+
class Memory(Protocol, Generic[MessageParamT]):
|
8
|
+
"""
|
9
|
+
Simple memory management for storing past interactions in-memory.
|
10
|
+
"""
|
11
|
+
|
12
|
+
# TODO: saqadri - add checkpointing and other advanced memory capabilities
|
13
|
+
|
14
|
+
def __init__(self) -> None: ...
|
15
|
+
|
16
|
+
def extend(self, messages: List[MessageParamT], is_prompt: bool = False) -> None: ...
|
17
|
+
|
18
|
+
def set(self, messages: List[MessageParamT], is_prompt: bool = False) -> None: ...
|
19
|
+
|
20
|
+
def append(self, message: MessageParamT, is_prompt: bool = False) -> None: ...
|
21
|
+
|
22
|
+
def get(self, include_history: bool = True) -> List[MessageParamT]: ...
|
23
|
+
|
24
|
+
def clear(self, clear_prompts: bool = False) -> None: ...
|
25
|
+
|
26
|
+
|
27
|
+
class SimpleMemory(Memory, Generic[MessageParamT]):
|
28
|
+
"""
|
29
|
+
Simple memory management for storing past interactions in-memory.
|
30
|
+
|
31
|
+
Maintains both prompt messages (which are always included) and
|
32
|
+
generated conversation history (which is included based on use_history setting).
|
33
|
+
"""
|
34
|
+
|
35
|
+
def __init__(self) -> None:
|
36
|
+
self.history: List[MessageParamT] = []
|
37
|
+
self.prompt_messages: List[MessageParamT] = [] # Always included
|
38
|
+
|
39
|
+
def extend(self, messages: List[MessageParamT], is_prompt: bool = False) -> None:
|
40
|
+
"""
|
41
|
+
Add multiple messages to history.
|
42
|
+
|
43
|
+
Args:
|
44
|
+
messages: Messages to add
|
45
|
+
is_prompt: If True, add to prompt_messages instead of regular history
|
46
|
+
"""
|
47
|
+
if is_prompt:
|
48
|
+
self.prompt_messages.extend(messages)
|
49
|
+
else:
|
50
|
+
self.history.extend(messages)
|
51
|
+
|
52
|
+
def set(self, messages: List[MessageParamT], is_prompt: bool = False) -> None:
|
53
|
+
"""
|
54
|
+
Replace messages in history.
|
55
|
+
|
56
|
+
Args:
|
57
|
+
messages: Messages to set
|
58
|
+
is_prompt: If True, replace prompt_messages instead of regular history
|
59
|
+
"""
|
60
|
+
if is_prompt:
|
61
|
+
self.prompt_messages = messages.copy()
|
62
|
+
else:
|
63
|
+
self.history = messages.copy()
|
64
|
+
|
65
|
+
def append(self, message: MessageParamT, is_prompt: bool = False) -> None:
|
66
|
+
"""
|
67
|
+
Add a single message to history.
|
68
|
+
|
69
|
+
Args:
|
70
|
+
message: Message to add
|
71
|
+
is_prompt: If True, add to prompt_messages instead of regular history
|
72
|
+
"""
|
73
|
+
if is_prompt:
|
74
|
+
self.prompt_messages.append(message)
|
75
|
+
else:
|
76
|
+
self.history.append(message)
|
77
|
+
|
78
|
+
def get(self, include_history: bool = True) -> List[MessageParamT]:
|
79
|
+
"""
|
80
|
+
Get all messages in memory.
|
81
|
+
|
82
|
+
Args:
|
83
|
+
include_history: If True, include regular history messages
|
84
|
+
If False, only return prompt messages
|
85
|
+
|
86
|
+
Returns:
|
87
|
+
Combined list of prompt messages and optionally history messages
|
88
|
+
"""
|
89
|
+
if include_history:
|
90
|
+
return self.prompt_messages + self.history
|
91
|
+
else:
|
92
|
+
return self.prompt_messages.copy()
|
93
|
+
|
94
|
+
def clear(self, clear_prompts: bool = False) -> None:
|
95
|
+
"""
|
96
|
+
Clear history and optionally prompt messages.
|
97
|
+
|
98
|
+
Args:
|
99
|
+
clear_prompts: If True, also clear prompt messages
|
100
|
+
"""
|
101
|
+
self.history = []
|
102
|
+
if clear_prompts:
|
103
|
+
self.prompt_messages = []
|
@@ -1,14 +1,18 @@
|
|
1
1
|
from dataclasses import dataclass
|
2
2
|
from enum import Enum, auto
|
3
|
-
from typing import
|
3
|
+
from typing import Callable, Dict, Optional, Type, Union
|
4
4
|
|
5
5
|
from mcp_agent.agents.agent import Agent
|
6
6
|
from mcp_agent.core.exceptions import ModelConfigError
|
7
7
|
from mcp_agent.core.request_params import RequestParams
|
8
|
-
from mcp_agent.
|
9
|
-
from mcp_agent.
|
10
|
-
from mcp_agent.
|
11
|
-
from mcp_agent.
|
8
|
+
from mcp_agent.llm.augmented_llm_passthrough import PassthroughLLM
|
9
|
+
from mcp_agent.llm.augmented_llm_playback import PlaybackLLM
|
10
|
+
from mcp_agent.llm.providers.augmented_llm_anthropic import AnthropicAugmentedLLM
|
11
|
+
from mcp_agent.llm.providers.augmented_llm_deepseek import DeepSeekAugmentedLLM
|
12
|
+
from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
|
13
|
+
from mcp_agent.mcp.interfaces import AugmentedLLMProtocol
|
14
|
+
|
15
|
+
# from mcp_agent.workflows.llm.augmented_llm_deepseek import DeekSeekAugmentedLLM
|
12
16
|
|
13
17
|
|
14
18
|
# Type alias for LLM classes
|
@@ -17,6 +21,7 @@ LLMClass = Union[
|
|
17
21
|
Type[OpenAIAugmentedLLM],
|
18
22
|
Type[PassthroughLLM],
|
19
23
|
Type[PlaybackLLM],
|
24
|
+
Type[DeepSeekAugmentedLLM],
|
20
25
|
]
|
21
26
|
|
22
27
|
|
@@ -26,6 +31,7 @@ class Provider(Enum):
|
|
26
31
|
ANTHROPIC = auto()
|
27
32
|
OPENAI = auto()
|
28
33
|
FAST_AGENT = auto()
|
34
|
+
DEEPSEEK = auto()
|
29
35
|
|
30
36
|
|
31
37
|
class ReasoningEffort(Enum):
|
@@ -53,6 +59,7 @@ class ModelFactory:
|
|
53
59
|
"anthropic": Provider.ANTHROPIC,
|
54
60
|
"openai": Provider.OPENAI,
|
55
61
|
"fast-agent": Provider.FAST_AGENT,
|
62
|
+
"deepseek": Provider.DEEPSEEK,
|
56
63
|
}
|
57
64
|
|
58
65
|
# Mapping of effort strings to enum values
|
@@ -85,6 +92,8 @@ class ModelFactory:
|
|
85
92
|
"claude-3-7-sonnet-latest": Provider.ANTHROPIC,
|
86
93
|
"claude-3-opus-20240229": Provider.ANTHROPIC,
|
87
94
|
"claude-3-opus-latest": Provider.ANTHROPIC,
|
95
|
+
"deepseek-chat": Provider.DEEPSEEK,
|
96
|
+
# "deepseek-reasoner": Provider.DEEPSEEK, reinstate on release
|
88
97
|
}
|
89
98
|
|
90
99
|
MODEL_ALIASES = {
|
@@ -97,6 +106,8 @@ class ModelFactory:
|
|
97
106
|
"haiku35": "claude-3-5-haiku-latest",
|
98
107
|
"opus": "claude-3-opus-latest",
|
99
108
|
"opus3": "claude-3-opus-latest",
|
109
|
+
"deepseekv3": "deepseek-chat",
|
110
|
+
"deepseek": "deepseek-chat",
|
100
111
|
}
|
101
112
|
|
102
113
|
# Mapping of providers to their LLM classes
|
@@ -104,6 +115,7 @@ class ModelFactory:
|
|
104
115
|
Provider.ANTHROPIC: AnthropicAugmentedLLM,
|
105
116
|
Provider.OPENAI: OpenAIAugmentedLLM,
|
106
117
|
Provider.FAST_AGENT: PassthroughLLM,
|
118
|
+
Provider.DEEPSEEK: DeepSeekAugmentedLLM,
|
107
119
|
}
|
108
120
|
|
109
121
|
# Mapping of special model names to their specific LLM classes
|
@@ -152,7 +164,7 @@ class ModelFactory:
|
|
152
164
|
@classmethod
|
153
165
|
def create_factory(
|
154
166
|
cls, model_string: str, request_params: Optional[RequestParams] = None
|
155
|
-
) -> Callable[...,
|
167
|
+
) -> Callable[..., AugmentedLLMProtocol]:
|
156
168
|
"""
|
157
169
|
Creates a factory function that follows the attach_llm protocol.
|
158
170
|
|
@@ -173,19 +185,13 @@ class ModelFactory:
|
|
173
185
|
# Create a factory function matching the attach_llm protocol
|
174
186
|
def factory(agent: Agent, **kwargs) -> LLMClass:
|
175
187
|
# Create merged params with parsed model name
|
176
|
-
factory_params = (
|
177
|
-
|
178
|
-
)
|
179
|
-
factory_params.model = (
|
180
|
-
config.model_name
|
181
|
-
) # Use the parsed model name, not the alias
|
188
|
+
factory_params = request_params.model_copy() if request_params else RequestParams()
|
189
|
+
factory_params.model = config.model_name # Use the parsed model name, not the alias
|
182
190
|
|
183
191
|
# Merge with any provided default_request_params
|
184
192
|
if "default_request_params" in kwargs and kwargs["default_request_params"]:
|
185
193
|
params_dict = factory_params.model_dump()
|
186
|
-
params_dict.update(
|
187
|
-
kwargs["default_request_params"].model_dump(exclude_unset=True)
|
188
|
-
)
|
194
|
+
params_dict.update(kwargs["default_request_params"].model_dump(exclude_unset=True))
|
189
195
|
factory_params = RequestParams(**params_dict)
|
190
196
|
factory_params.model = (
|
191
197
|
config.model_name
|
@@ -208,7 +214,7 @@ class ModelFactory:
|
|
208
214
|
if key not in ["agent", "default_request_params", "name"]:
|
209
215
|
llm_args[key] = value
|
210
216
|
|
211
|
-
llm = llm_class(**llm_args)
|
217
|
+
llm: AugmentedLLMProtocol = llm_class(**llm_args)
|
212
218
|
return llm
|
213
219
|
|
214
220
|
return factory
|
@@ -97,9 +97,7 @@ def format_server_info(
|
|
97
97
|
|
98
98
|
# Combine all components
|
99
99
|
server_content = "\n".join(components)
|
100
|
-
return format_fastagent_tag(
|
101
|
-
"server", f"\n{server_content}\n", {"name": server_name}
|
102
|
-
)
|
100
|
+
return format_fastagent_tag("server", f"\n{server_content}\n", {"name": server_name})
|
103
101
|
|
104
102
|
|
105
103
|
def format_agent_info(
|
@@ -0,0 +1,8 @@
|
|
1
|
+
from mcp_agent.llm.providers.sampling_converter_anthropic import (
|
2
|
+
AnthropicSamplingConverter,
|
3
|
+
)
|
4
|
+
from mcp_agent.llm.providers.sampling_converter_openai import (
|
5
|
+
OpenAISamplingConverter,
|
6
|
+
)
|
7
|
+
|
8
|
+
__all__ = ["AnthropicSamplingConverter", "OpenAISamplingConverter"]
|
@@ -8,11 +8,10 @@ leveraging existing code for resource handling and delimited formats.
|
|
8
8
|
from anthropic.types import (
|
9
9
|
MessageParam,
|
10
10
|
)
|
11
|
-
|
12
11
|
from mcp.types import (
|
13
|
-
TextContent,
|
14
|
-
ImageContent,
|
15
12
|
EmbeddedResource,
|
13
|
+
ImageContent,
|
14
|
+
TextContent,
|
16
15
|
TextResourceContents,
|
17
16
|
)
|
18
17
|
|
@@ -37,9 +36,7 @@ def anthropic_message_param_to_prompt_message_multipart(
|
|
37
36
|
|
38
37
|
# Handle string content (user messages can be simple strings)
|
39
38
|
if isinstance(content, str):
|
40
|
-
return PromptMessageMultipart(
|
41
|
-
role=role, content=[TextContent(type="text", text=content)]
|
42
|
-
)
|
39
|
+
return PromptMessageMultipart(role=role, content=[TextContent(type="text", text=content)])
|
43
40
|
|
44
41
|
# Convert content blocks to MCP content types
|
45
42
|
mcp_contents = []
|
@@ -52,27 +49,15 @@ def anthropic_message_param_to_prompt_message_multipart(
|
|
52
49
|
# Check if this is a resource marker
|
53
50
|
if (
|
54
51
|
text
|
55
|
-
and (
|
56
|
-
text.startswith("[Resource:")
|
57
|
-
or text.startswith("[Binary Resource:")
|
58
|
-
)
|
52
|
+
and (text.startswith("[Resource:") or text.startswith("[Binary Resource:"))
|
59
53
|
and "\n" in text
|
60
54
|
):
|
61
55
|
header, content_text = text.split("\n", 1)
|
62
56
|
if "MIME:" in header:
|
63
57
|
mime_match = header.split("MIME:", 1)[1].split("]")[0].strip()
|
64
|
-
if
|
65
|
-
|
66
|
-
|
67
|
-
if (
|
68
|
-
"Resource:" in header
|
69
|
-
and "Binary Resource:" not in header
|
70
|
-
):
|
71
|
-
uri = (
|
72
|
-
header.split("Resource:", 1)[1]
|
73
|
-
.split(",")[0]
|
74
|
-
.strip()
|
75
|
-
)
|
58
|
+
if mime_match != "text/plain": # Only process non-plain text resources
|
59
|
+
if "Resource:" in header and "Binary Resource:" not in header:
|
60
|
+
uri = header.split("Resource:", 1)[1].split(",")[0].strip()
|
76
61
|
mcp_contents.append(
|
77
62
|
EmbeddedResource(
|
78
63
|
type="resource",
|
@@ -94,8 +79,6 @@ def anthropic_message_param_to_prompt_message_multipart(
|
|
94
79
|
if isinstance(source, dict) and source.get("type") == "base64":
|
95
80
|
media_type = source.get("media_type", "image/png")
|
96
81
|
data = source.get("data", "")
|
97
|
-
mcp_contents.append(
|
98
|
-
ImageContent(type="image", data=data, mimeType=media_type)
|
99
|
-
)
|
82
|
+
mcp_contents.append(ImageContent(type="image", data=data, mimeType=media_type))
|
100
83
|
|
101
84
|
return PromptMessageMultipart(role=role, content=mcp_contents)
|