fast-agent-mcp 0.2.6__py3-none-any.whl → 0.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.2.6.dist-info → fast_agent_mcp-0.2.7.dist-info}/METADATA +3 -4
- {fast_agent_mcp-0.2.6.dist-info → fast_agent_mcp-0.2.7.dist-info}/RECORD +15 -16
- mcp_agent/agents/base_agent.py +5 -5
- mcp_agent/agents/workflow/chain_agent.py +3 -11
- mcp_agent/agents/workflow/router_agent.py +22 -5
- mcp_agent/config.py +3 -3
- mcp_agent/core/fastagent.py +2 -0
- mcp_agent/llm/augmented_llm.py +5 -1
- mcp_agent/llm/providers/augmented_llm_anthropic.py +15 -49
- mcp_agent/llm/providers/augmented_llm_openai.py +19 -40
- mcp_agent/mcp/mcp_connection_manager.py +5 -3
- mcp_agent/mcp_server_registry.py +2 -1
- mcp_agent/mcp/mcp_activity.py +0 -18
- {fast_agent_mcp-0.2.6.dist-info → fast_agent_mcp-0.2.7.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.2.6.dist-info → fast_agent_mcp-0.2.7.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.2.6.dist-info → fast_agent_mcp-0.2.7.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: fast-agent-mcp
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.7
|
4
4
|
Summary: Define, Prompt and Test MCP enabled Agents and Workflows
|
5
5
|
Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
|
6
6
|
License: Apache License
|
@@ -260,8 +260,7 @@ The simple declarative syntax lets you concentrate on composing your Prompts and
|
|
260
260
|
|
261
261
|
`fast-agent` is multi-modal, supporting Images and PDFs for both Anthropic and OpenAI endpoints via Prompts, Resources and MCP Tool Call results. The inclusion of passthrough and playback LLMs enable rapid development and test of Python glue-code for your applications.
|
262
262
|
|
263
|
-
> [!TIP]
|
264
|
-
> `fast-agent` is now MCP Native! Coming Soon - Full Documentation Site and Further MCP Examples.
|
263
|
+
> [!TIP] > `fast-agent` is now MCP Native! Coming Soon - Full Documentation Site and Further MCP Examples.
|
265
264
|
|
266
265
|
### Agent Application Development
|
267
266
|
|
@@ -525,7 +524,7 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
|
|
525
524
|
servers=["filesystem"], # list of MCP Servers for the agent
|
526
525
|
model="o3-mini.high", # specify a model for the agent
|
527
526
|
use_history=True, # agent maintains chat history
|
528
|
-
request_params=
|
527
|
+
request_params=RequestParams(temperature= 0.7)), # additional parameters for the LLM (or RequestParams())
|
529
528
|
human_input=True, # agent can request human input
|
530
529
|
)
|
531
530
|
```
|
@@ -1,23 +1,23 @@
|
|
1
1
|
mcp_agent/__init__.py,sha256=-AIoeL4c9UAp_P4U0z-uIWTTmQWdihOis5nbQ5L_eao,1664
|
2
2
|
mcp_agent/app.py,sha256=jBmzYM_o50g8vhlTgkkf5TGiBWNbXWViYnd0WANbpzo,10276
|
3
|
-
mcp_agent/config.py,sha256=
|
3
|
+
mcp_agent/config.py,sha256=ymz8WHTM08ENeiFLy-7-oYd4rQAO_V-C4OOVcZSa41s,11715
|
4
4
|
mcp_agent/console.py,sha256=Gjf2QLFumwG1Lav__c07X_kZxxEUSkzV-1_-YbAwcwo,813
|
5
5
|
mcp_agent/context.py,sha256=pp_F1Q1jgAxGrRccSZJutn1JUxYfVue-St3S8tUyptM,7903
|
6
6
|
mcp_agent/context_dependent.py,sha256=QXfhw3RaQCKfscEEBRGuZ3sdMWqkgShz2jJ1ivGGX1I,1455
|
7
7
|
mcp_agent/event_progress.py,sha256=25iz0yyg-O4glMmtijcYpDdUmtUIKsCmR_8A52GgeC4,2716
|
8
|
-
mcp_agent/mcp_server_registry.py,sha256=
|
8
|
+
mcp_agent/mcp_server_registry.py,sha256=w0sq-5o_AVVGfwUBo0c_Ekbyjd3Tjg9bzi2r8UZry7o,9945
|
9
9
|
mcp_agent/progress_display.py,sha256=GeJU9VUt6qKsFVymG688hCMVCsAygG9ifiiEb5IcbN4,361
|
10
10
|
mcp_agent/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
11
11
|
mcp_agent/agents/agent.py,sha256=Tn2YKw_ytx9b8jC-65WYQmrnD43kYiZsLa4sVHxn9d4,3854
|
12
|
-
mcp_agent/agents/base_agent.py,sha256=
|
12
|
+
mcp_agent/agents/base_agent.py,sha256=dzyy4tDHJfRC4Sp-JqXeDwykk5SH55k89dUXQROIdQ4,23488
|
13
13
|
mcp_agent/agents/workflow/__init__.py,sha256=HloteEW6kalvgR0XewpiFAqaQlMPlPJYg5p3K33IUzI,25
|
14
|
-
mcp_agent/agents/workflow/chain_agent.py,sha256=
|
14
|
+
mcp_agent/agents/workflow/chain_agent.py,sha256=efftXdHc5F-XY8jnz5npHbKHhqnzHh28WbU5yQ4yUn0,6105
|
15
15
|
mcp_agent/agents/workflow/evaluator_optimizer.py,sha256=VWdzVIy_qSiVsDJO22ta3RB3drkvBfXk9HxBYMpsC5U,13300
|
16
16
|
mcp_agent/agents/workflow/orchestrator_agent.py,sha256=30hFQyAmtjQTX6Li_zWWIHCpdNpfZkDo57YXXW5xIsI,21561
|
17
17
|
mcp_agent/agents/workflow/orchestrator_models.py,sha256=5P_aXADVT4Et8qT4e1cb9RelmHX5dCRrzu8j8T41Kdg,7230
|
18
18
|
mcp_agent/agents/workflow/orchestrator_prompts.py,sha256=EXKEI174sshkZyPPEnWbwwNafzSPuA39MXL7iqG9cWc,9106
|
19
19
|
mcp_agent/agents/workflow/parallel_agent.py,sha256=SgIXJx2X_MSlLOv6WXYRezwjDYjU9f95eKQzTm5Y_lk,7087
|
20
|
-
mcp_agent/agents/workflow/router_agent.py,sha256=
|
20
|
+
mcp_agent/agents/workflow/router_agent.py,sha256=c4MU55U6q1DRayP0sDoyxdlnKX-N0LPbRv-MFlwbwrY,11165
|
21
21
|
mcp_agent/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
22
22
|
mcp_agent/cli/__main__.py,sha256=AVZ7tQFhU_sDOGuUGJq8ujgKtcxsYJBJwHbVaaiRDlI,166
|
23
23
|
mcp_agent/cli/main.py,sha256=PZdPJfsAJOm80vTu7j_XpMPhaDZOpqSe-ciU3YQsmA4,3149
|
@@ -33,7 +33,7 @@ mcp_agent/core/direct_factory.py,sha256=hYFCucZVAQ2wrfqIe9Qameoa-cCRaQ53R97EMHvU
|
|
33
33
|
mcp_agent/core/enhanced_prompt.py,sha256=P9FAtc0rqIYQfUDkTNVXitFIZEtB3fdq_Nr0-st64Qg,17948
|
34
34
|
mcp_agent/core/error_handling.py,sha256=xoyS2kLe0eG0bj2eSJCJ2odIhGUve2SbDR7jP-A-uRw,624
|
35
35
|
mcp_agent/core/exceptions.py,sha256=ENAD_qGG67foxy6vDkIvc-lgopIUQy6O7zvNPpPXaQg,2289
|
36
|
-
mcp_agent/core/fastagent.py,sha256=
|
36
|
+
mcp_agent/core/fastagent.py,sha256=T2kyq32wBJCOj13Zy1G_XJjQZb1S4HVdx3OBzmEMHBg,18644
|
37
37
|
mcp_agent/core/interactive_prompt.py,sha256=zU53h8mmaJBnddYy2j57tH7jreQ9PUz7vLEo2gdDrio,17704
|
38
38
|
mcp_agent/core/mcp_content.py,sha256=2D7KHY9mG_vxoDwFLKvsPQV9VRIzHItM7V-jcEnACh8,8878
|
39
39
|
mcp_agent/core/prompt.py,sha256=qnintOUGEoDPYLI9bu9G2OlgVMCe5ZPUZilgMzydXhc,7919
|
@@ -50,7 +50,7 @@ mcp_agent/human_input/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3h
|
|
50
50
|
mcp_agent/human_input/handler.py,sha256=s712Z5ssTCwjL9-VKoIdP5CtgMh43YvepynYisiWTTA,3144
|
51
51
|
mcp_agent/human_input/types.py,sha256=RtWBOVzy8vnYoQrc36jRLn8z8N3C4pDPMBN5vF6qM5Y,1476
|
52
52
|
mcp_agent/llm/__init__.py,sha256=d8zgwG-bRFuwiMNMYkywg_qytk4P8lawyld_meuUmHI,68
|
53
|
-
mcp_agent/llm/augmented_llm.py,sha256=
|
53
|
+
mcp_agent/llm/augmented_llm.py,sha256=YIB3I_taoglo_vSmZLQ50cv1qCSctaQlWVwjI-7WTkk,18304
|
54
54
|
mcp_agent/llm/augmented_llm_passthrough.py,sha256=U0LssNWNVuZRuD9I7Wuvpo7vdDW4xtoPLirnYCgBGTY,6128
|
55
55
|
mcp_agent/llm/augmented_llm_playback.py,sha256=YVR2adzjMf9Q5WfYBytryWMRqJ87a3kNBnjxhApsMcU,3413
|
56
56
|
mcp_agent/llm/memory.py,sha256=UakoBCJBf59JBtB6uyZM0OZjlxDW_VHtSfDs08ibVEc,3312
|
@@ -60,10 +60,10 @@ mcp_agent/llm/sampling_converter.py,sha256=C7wPBlmT0eD90XWabC22zkxsrVHKCrjwIwg6c
|
|
60
60
|
mcp_agent/llm/sampling_format_converter.py,sha256=xGz4odHpOcP7--eFaJaFtUR8eR9jxZS7MnLH6J7n0EU,1263
|
61
61
|
mcp_agent/llm/providers/__init__.py,sha256=heVxtmuqFJOnjjxHz4bWSqTAxXoN1E8twC_gQ_yJpHk,265
|
62
62
|
mcp_agent/llm/providers/anthropic_utils.py,sha256=vYDN5G5jKMhD2CQg8veJYab7tvvzYkDMq8M1g_hUAQg,3275
|
63
|
-
mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=
|
63
|
+
mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=CNKpTEvWqjOteACUx_Vha0uFpPt32C17JrkSXg_allM,14445
|
64
64
|
mcp_agent/llm/providers/augmented_llm_deepseek.py,sha256=SdYDqZZ9hM9sBvW1FSItNn_ENEKQXGNKwVHGnjqjyAA,1927
|
65
65
|
mcp_agent/llm/providers/augmented_llm_generic.py,sha256=IIgwPYsVGwDdL2mMYsc5seY3pVFblMwmnxoI5dbxras,1524
|
66
|
-
mcp_agent/llm/providers/augmented_llm_openai.py,sha256=
|
66
|
+
mcp_agent/llm/providers/augmented_llm_openai.py,sha256=Wso9GVgsq8y3sqlOzTk_iQqrkCOL3LyuG07nA1PWDng,17913
|
67
67
|
mcp_agent/llm/providers/multipart_converter_anthropic.py,sha256=t5lHYGfFUacJldnrVtMNW-8gEMoto8Y7hJkDrnyZR-Y,16650
|
68
68
|
mcp_agent/llm/providers/multipart_converter_openai.py,sha256=zCj0LBgd9FDG8aL_GeTrPo2ssloYnmC_Uj3ENWVUJAg,16753
|
69
69
|
mcp_agent/llm/providers/openai_multipart.py,sha256=qKBn7d3jSabnJmVgWweVzqh8q9mBqr09fsPmP92niAQ,6899
|
@@ -82,10 +82,9 @@ mcp_agent/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
82
82
|
mcp_agent/mcp/gen_client.py,sha256=fAVwFVCgSamw4PwoWOV4wrK9TABx1S_zZv8BctRyF2k,3030
|
83
83
|
mcp_agent/mcp/interfaces.py,sha256=vma7bbWbY3zp1RM6hMYxVO4aV6Vfaygm-nLwzK2jFKI,6748
|
84
84
|
mcp_agent/mcp/logger_textio.py,sha256=OpnqMam9Pu0oVzYQWFMhrX1dRg2f5Fqb3qqPA6QAATM,2778
|
85
|
-
mcp_agent/mcp/mcp_activity.py,sha256=CajXCFWZ2cKEX9s4-HfNVAj471ePTVs4NOkvmIh65tE,592
|
86
85
|
mcp_agent/mcp/mcp_agent_client_session.py,sha256=RMYNltc2pDIzxwEJSS5589RbvPO0KWV4Y3jSyAmhKf0,4181
|
87
86
|
mcp_agent/mcp/mcp_aggregator.py,sha256=jaWbOvb3wioECohZ47CubyxfJ5QkfNSshu1hwhZksG4,40486
|
88
|
-
mcp_agent/mcp/mcp_connection_manager.py,sha256=
|
87
|
+
mcp_agent/mcp/mcp_connection_manager.py,sha256=AMIm2FBbIk7zHInb8X-kFSQFO5TKcoi9w8WU8nx8Ig0,13834
|
89
88
|
mcp_agent/mcp/mime_utils.py,sha256=difepNR_gpb4MpMLkBRAoyhDk-AjXUHTiqKvT_VwS1o,1805
|
90
89
|
mcp_agent/mcp/prompt_message_multipart.py,sha256=IpIndd75tAcCbJbfqjpAF0tOUUP1TQceDbWoxO5gvpo,3684
|
91
90
|
mcp_agent/mcp/prompt_render.py,sha256=k3v4BZDThGE2gGiOYVQtA6x8WTEdOuXIEnRafANhN1U,2996
|
@@ -134,8 +133,8 @@ mcp_agent/resources/examples/workflows/orchestrator.py,sha256=rOGilFTliWWnZ3Jx5w
|
|
134
133
|
mcp_agent/resources/examples/workflows/parallel.py,sha256=n0dFN26QvYd2wjgohcaUBflac2SzXYx-bCyxMSousJE,1884
|
135
134
|
mcp_agent/resources/examples/workflows/router.py,sha256=E4x_-c3l4YW9w1i4ARcDtkdeqIdbWEGfsMzwLYpdbVc,1677
|
136
135
|
mcp_agent/ui/console_display.py,sha256=TVGDtJ37hc6UG0ei9g7ZPZZfFNeS1MYozt-Mx8HsPCk,9752
|
137
|
-
fast_agent_mcp-0.2.
|
138
|
-
fast_agent_mcp-0.2.
|
139
|
-
fast_agent_mcp-0.2.
|
140
|
-
fast_agent_mcp-0.2.
|
141
|
-
fast_agent_mcp-0.2.
|
136
|
+
fast_agent_mcp-0.2.7.dist-info/METADATA,sha256=V7zvKqAqqHxiKazK3bv1CmwcWGKJEc7NhwGZ_NKG1V0,29849
|
137
|
+
fast_agent_mcp-0.2.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
138
|
+
fast_agent_mcp-0.2.7.dist-info/entry_points.txt,sha256=qPM7vwtN1_KmP3dXehxgiCxUBHtqP7yfenZigztvY-w,226
|
139
|
+
fast_agent_mcp-0.2.7.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
|
140
|
+
fast_agent_mcp-0.2.7.dist-info/RECORD,,
|
mcp_agent/agents/base_agent.py
CHANGED
@@ -215,7 +215,7 @@ class BaseAgent(MCPAggregator, AgentProtocol):
|
|
215
215
|
|
216
216
|
# Use the LLM to generate a response
|
217
217
|
response = await self.generate([prompt], None)
|
218
|
-
return response.
|
218
|
+
return response.all_text()
|
219
219
|
|
220
220
|
def _normalize_message_input(
|
221
221
|
self, message: Union[str, PromptMessage, PromptMessageMultipart]
|
@@ -625,15 +625,15 @@ class BaseAgent(MCPAggregator, AgentProtocol):
|
|
625
625
|
@property
|
626
626
|
def agent_type(self) -> str:
|
627
627
|
"""
|
628
|
-
Return the type of this agent.
|
629
|
-
|
628
|
+
Return the type of this agent.
|
629
|
+
|
630
630
|
This is used for display purposes in the interactive prompt and other UI elements.
|
631
|
-
|
631
|
+
|
632
632
|
Returns:
|
633
633
|
String representing the agent type
|
634
634
|
"""
|
635
635
|
return self.config.agent_type
|
636
|
-
|
636
|
+
|
637
637
|
@property
|
638
638
|
def message_history(self) -> List[PromptMessageMultipart]:
|
639
639
|
"""
|
@@ -23,7 +23,8 @@ class ChainAgent(BaseAgent):
|
|
23
23
|
A chain agent that processes requests through a series of specialized agents in sequence.
|
24
24
|
Passes the output of each agent to the next agent in the chain.
|
25
25
|
"""
|
26
|
-
|
26
|
+
|
27
|
+
# TODO -- consider adding "repeat" mode
|
27
28
|
@property
|
28
29
|
def agent_type(self) -> str:
|
29
30
|
"""Return the type of this agent."""
|
@@ -70,20 +71,11 @@ class ChainAgent(BaseAgent):
|
|
70
71
|
# # Get the original user message (last message in the list)
|
71
72
|
user_message = multipart_messages[-1] if multipart_messages else None
|
72
73
|
|
73
|
-
# # If no user message, return an error
|
74
|
-
# if not user_message:
|
75
|
-
# return PromptMessageMultipart(
|
76
|
-
# role="assistant",
|
77
|
-
# content=[TextContent(type="text", text="No input message provided.")],
|
78
|
-
# )
|
79
|
-
|
80
|
-
# Initialize messages with the input
|
81
|
-
|
82
74
|
if not self.cumulative:
|
83
75
|
response: PromptMessageMultipart = await self.agents[0].generate(multipart_messages)
|
84
76
|
# Process the rest of the agents in the chain
|
85
77
|
for agent in self.agents[1:]:
|
86
|
-
next_message = Prompt.user(response.content
|
78
|
+
next_message = Prompt.user(*response.content)
|
87
79
|
response = await agent.generate([next_message])
|
88
80
|
|
89
81
|
return response
|
@@ -53,12 +53,29 @@ You are a highly accurate request router that directs incoming requests to the m
|
|
53
53
|
Your task is to analyze the request and determine the most appropriate agent from the options above.
|
54
54
|
|
55
55
|
<fastagent:instruction>
|
56
|
-
Respond
|
56
|
+
Respond with JSON following the schema below:
|
57
57
|
{{
|
58
|
-
"
|
59
|
-
"
|
60
|
-
"
|
58
|
+
"type": "object",
|
59
|
+
"required": ["agent", "confidence", "reasoning"],
|
60
|
+
"properties": {{
|
61
|
+
"agent": {{
|
62
|
+
"type": "string",
|
63
|
+
"description": "The exact name of the selected agent"
|
64
|
+
}},
|
65
|
+
"confidence": {{
|
66
|
+
"type": "string",
|
67
|
+
"enum": ["high", "medium", "low"],
|
68
|
+
"description": "Your confidence level in this selection"
|
69
|
+
}},
|
70
|
+
"reasoning": {{
|
71
|
+
"type": "string",
|
72
|
+
"description": "Brief explanation for your selection"
|
73
|
+
}}
|
74
|
+
}}
|
61
75
|
}}
|
76
|
+
|
77
|
+
Supply only the JSON with no preamble. Use "reasoning" field to describe actions. NEVER EMIT CODE FENCES.
|
78
|
+
|
62
79
|
</fastagent:instruction>
|
63
80
|
"""
|
64
81
|
|
@@ -87,7 +104,7 @@ class RouterAgent(BaseAgent):
|
|
87
104
|
A simplified router that uses an LLM to determine the best agent for a request,
|
88
105
|
then dispatches the request to that agent and returns the response.
|
89
106
|
"""
|
90
|
-
|
107
|
+
|
91
108
|
@property
|
92
109
|
def agent_type(self) -> str:
|
93
110
|
"""Return the type of this agent."""
|
mcp_agent/config.py
CHANGED
@@ -75,6 +75,9 @@ class MCPServerSettings(BaseModel):
|
|
75
75
|
url: str | None = None
|
76
76
|
"""The URL for the server (e.g. for SSE transport)."""
|
77
77
|
|
78
|
+
headers: Dict[str, str] | None = None
|
79
|
+
"""Headers dictionary for SSE connections"""
|
80
|
+
|
78
81
|
auth: MCPServerAuthSettings | None = None
|
79
82
|
"""The authentication configuration for the server."""
|
80
83
|
|
@@ -84,9 +87,6 @@ class MCPServerSettings(BaseModel):
|
|
84
87
|
env: Dict[str, str] | None = None
|
85
88
|
"""Environment variables to pass to the server process."""
|
86
89
|
|
87
|
-
env: Dict[str, str] | None = None
|
88
|
-
"""Environment variables to pass to the server process."""
|
89
|
-
|
90
90
|
sampling: MCPSamplingSettings | None = None
|
91
91
|
"""Sampling settings for this Client/Server pair"""
|
92
92
|
|
mcp_agent/core/fastagent.py
CHANGED
@@ -235,6 +235,8 @@ class FastAgent:
|
|
235
235
|
progress_display.stop()
|
236
236
|
|
237
237
|
# Pre-flight validation
|
238
|
+
if 0 == len(self.agents):
|
239
|
+
raise AgentConfigError("No agents defined. Please define at least one agent.")
|
238
240
|
validate_server_references(self.context, self.agents)
|
239
241
|
validate_workflow_references(self.agents)
|
240
242
|
|
mcp_agent/llm/augmented_llm.py
CHANGED
@@ -32,6 +32,7 @@ from mcp_agent.llm.sampling_format_converter import (
|
|
32
32
|
ProviderFormatConverter,
|
33
33
|
)
|
34
34
|
from mcp_agent.logging.logger import get_logger
|
35
|
+
from mcp_agent.mcp.helpers.content_helpers import get_text
|
35
36
|
from mcp_agent.mcp.interfaces import (
|
36
37
|
AugmentedLLMProtocol,
|
37
38
|
ModelT,
|
@@ -147,8 +148,11 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
147
148
|
"""Apply the prompt and return the result as a Pydantic model, or None if coercion fails"""
|
148
149
|
try:
|
149
150
|
result: PromptMessageMultipart = await self.generate(prompt, request_params)
|
150
|
-
|
151
|
+
final_generation = get_text(result.content[-1]) or ""
|
152
|
+
await self.show_assistant_message(final_generation)
|
153
|
+
json_data = from_json(final_generation, allow_partial=True)
|
151
154
|
validated_model = model.model_validate(json_data)
|
155
|
+
|
152
156
|
return cast("ModelT", validated_model), Prompt.assistant(json_data)
|
153
157
|
except Exception as e:
|
154
158
|
logger = get_logger(__name__)
|
@@ -1,5 +1,7 @@
|
|
1
1
|
import os
|
2
|
-
from typing import TYPE_CHECKING, List
|
2
|
+
from typing import TYPE_CHECKING, List
|
3
|
+
|
4
|
+
from mcp.types import EmbeddedResource, ImageContent, TextContent
|
3
5
|
|
4
6
|
from mcp_agent.core.prompt import Prompt
|
5
7
|
from mcp_agent.llm.providers.multipart_converter_anthropic import (
|
@@ -28,13 +30,11 @@ from mcp.types import (
|
|
28
30
|
CallToolRequest,
|
29
31
|
CallToolRequestParams,
|
30
32
|
)
|
31
|
-
from pydantic_core import from_json
|
32
33
|
from rich.text import Text
|
33
34
|
|
34
35
|
from mcp_agent.core.exceptions import ProviderKeyError
|
35
36
|
from mcp_agent.llm.augmented_llm import (
|
36
37
|
AugmentedLLM,
|
37
|
-
ModelT,
|
38
38
|
RequestParams,
|
39
39
|
)
|
40
40
|
from mcp_agent.logging.logger import get_logger
|
@@ -69,14 +69,15 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
69
69
|
use_history=True,
|
70
70
|
)
|
71
71
|
|
72
|
-
def _base_url(self) -> str:
|
72
|
+
def _base_url(self) -> str | None:
|
73
|
+
assert self.context.config
|
73
74
|
return self.context.config.anthropic.base_url if self.context.config.anthropic else None
|
74
75
|
|
75
76
|
async def generate_internal(
|
76
77
|
self,
|
77
78
|
message_param,
|
78
79
|
request_params: RequestParams | None = None,
|
79
|
-
):
|
80
|
+
) -> list[TextContent | ImageContent | EmbeddedResource]:
|
80
81
|
"""
|
81
82
|
Process a query using an LLM and available tools.
|
82
83
|
Override this method to use a different LLM.
|
@@ -113,7 +114,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
113
114
|
for tool in tool_list.tools
|
114
115
|
]
|
115
116
|
|
116
|
-
responses: List[
|
117
|
+
responses: List[TextContent | ImageContent | EmbeddedResource] = []
|
117
118
|
|
118
119
|
model = self.default_request_params.model
|
119
120
|
|
@@ -175,7 +176,8 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
175
176
|
|
176
177
|
response_as_message = self.convert_message_to_message_param(response)
|
177
178
|
messages.append(response_as_message)
|
178
|
-
|
179
|
+
if response.content[0].type == "text":
|
180
|
+
responses.append(TextContent(type="text", text=response.content[0].text))
|
179
181
|
|
180
182
|
if response.stop_reason == "end_turn":
|
181
183
|
message_text = ""
|
@@ -255,6 +257,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
255
257
|
|
256
258
|
# Add each result to our collection
|
257
259
|
tool_results.append((tool_use_id, result))
|
260
|
+
responses.extend(result.content)
|
258
261
|
|
259
262
|
messages.append(AnthropicConverter.create_tool_results_message(tool_results))
|
260
263
|
|
@@ -295,41 +298,22 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
295
298
|
|
296
299
|
return api_key
|
297
300
|
|
298
|
-
async def
|
301
|
+
async def generate_messages(
|
299
302
|
self,
|
300
303
|
message_param,
|
301
304
|
request_params: RequestParams | None = None,
|
302
|
-
) ->
|
305
|
+
) -> PromptMessageMultipart:
|
303
306
|
"""
|
304
307
|
Process a query using an LLM and available tools.
|
305
308
|
The default implementation uses Claude as the LLM.
|
306
309
|
Override this method to use a different LLM.
|
307
310
|
|
308
311
|
"""
|
309
|
-
|
310
|
-
responses: List[Message] = await self.generate_internal(
|
312
|
+
res = await self.generate_internal(
|
311
313
|
message_param=message_param,
|
312
314
|
request_params=request_params,
|
313
315
|
)
|
314
|
-
|
315
|
-
final_text: List[str] = []
|
316
|
-
|
317
|
-
# Process all responses and collect all text content
|
318
|
-
for response in responses:
|
319
|
-
# Extract text content from each message
|
320
|
-
message_text = ""
|
321
|
-
for content in response.content:
|
322
|
-
if content.type == "text":
|
323
|
-
# Extract text from text blocks
|
324
|
-
message_text += content.text
|
325
|
-
|
326
|
-
# Only append non-empty text
|
327
|
-
if message_text:
|
328
|
-
final_text.append(message_text)
|
329
|
-
|
330
|
-
# TODO -- make tool detail inclusion behaviour configurable
|
331
|
-
# Join all collected text
|
332
|
-
return "\n".join(final_text)
|
316
|
+
return Prompt.assistant(*res)
|
333
317
|
|
334
318
|
async def _apply_prompt_provider_specific(
|
335
319
|
self,
|
@@ -352,30 +336,12 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
352
336
|
if last_message.role == "user":
|
353
337
|
self.logger.debug("Last message in prompt is from user, generating assistant response")
|
354
338
|
message_param = AnthropicConverter.convert_to_anthropic(last_message)
|
355
|
-
return
|
339
|
+
return await self.generate_messages(message_param, request_params)
|
356
340
|
else:
|
357
341
|
# For assistant messages: Return the last message content as text
|
358
342
|
self.logger.debug("Last message in prompt is from assistant, returning it directly")
|
359
343
|
return last_message
|
360
344
|
|
361
|
-
async def generate_structured(
|
362
|
-
self,
|
363
|
-
message: str,
|
364
|
-
response_model: Type[ModelT],
|
365
|
-
request_params: RequestParams | None = None,
|
366
|
-
) -> ModelT:
|
367
|
-
# TODO -- simiar to the OAI version, we should create a tool call for the expected schema
|
368
|
-
response = await self.generate_str(
|
369
|
-
message=message,
|
370
|
-
request_params=request_params,
|
371
|
-
)
|
372
|
-
# Don't try to parse if we got no response
|
373
|
-
if not response:
|
374
|
-
self.logger.error("No response from generate_str")
|
375
|
-
return None
|
376
|
-
|
377
|
-
return response_model.model_validate(from_json(response, allow_partial=True))
|
378
|
-
|
379
345
|
@classmethod
|
380
346
|
def convert_message_to_message_param(cls, message: Message, **kwargs) -> MessageParam:
|
381
347
|
"""Convert a response object to an input parameter object to allow LLM calls to be chained."""
|
@@ -5,6 +5,9 @@ from mcp.types import (
|
|
5
5
|
CallToolRequest,
|
6
6
|
CallToolRequestParams,
|
7
7
|
CallToolResult,
|
8
|
+
EmbeddedResource,
|
9
|
+
ImageContent,
|
10
|
+
TextContent,
|
8
11
|
)
|
9
12
|
from openai import AuthenticationError, OpenAI
|
10
13
|
|
@@ -115,7 +118,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
115
118
|
self,
|
116
119
|
message,
|
117
120
|
request_params: RequestParams | None = None,
|
118
|
-
) -> List[
|
121
|
+
) -> List[TextContent | ImageContent | EmbeddedResource]:
|
119
122
|
"""
|
120
123
|
Process a query using an LLM and available tools.
|
121
124
|
The default implementation uses OpenAI's ChatCompletion as the LLM.
|
@@ -164,7 +167,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
164
167
|
if not available_tools:
|
165
168
|
available_tools = None # deepseek does not allow empty array
|
166
169
|
|
167
|
-
responses: List[
|
170
|
+
responses: List[TextContent | ImageContent | EmbeddedResource] = []
|
168
171
|
model = self.default_request_params.model
|
169
172
|
|
170
173
|
# we do NOT send stop sequences as this causes errors with mutlimodal processing
|
@@ -218,7 +221,9 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
218
221
|
|
219
222
|
choice = response.choices[0]
|
220
223
|
message = choice.message
|
221
|
-
|
224
|
+
# prep for image/audio gen models
|
225
|
+
if message.content:
|
226
|
+
responses.append(TextContent(type="text", text=message.content))
|
222
227
|
|
223
228
|
converted_message = self.convert_message_to_message_param(message, name=self.name)
|
224
229
|
messages.append(converted_message)
|
@@ -258,7 +263,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
258
263
|
self.show_oai_tool_result(str(result))
|
259
264
|
|
260
265
|
tool_results.append((tool_call.id, result))
|
261
|
-
|
266
|
+
responses.extend(result.content)
|
262
267
|
messages.extend(OpenAIConverter.convert_function_results_to_openai(tool_results))
|
263
268
|
|
264
269
|
self.logger.debug(
|
@@ -310,39 +315,6 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
310
315
|
|
311
316
|
return responses
|
312
317
|
|
313
|
-
async def generate_str(
|
314
|
-
self,
|
315
|
-
message,
|
316
|
-
request_params: RequestParams | None = None,
|
317
|
-
) -> str:
|
318
|
-
"""
|
319
|
-
Process a query using an LLM and available tools.
|
320
|
-
The default implementation uses OpenAI's ChatCompletion as the LLM.
|
321
|
-
Override this method to use a different LLM.
|
322
|
-
|
323
|
-
Special commands:
|
324
|
-
- "***SAVE_HISTORY <filename.md>" - Saves the conversation history to the specified file
|
325
|
-
in MCP prompt format with user/assistant delimiters.
|
326
|
-
"""
|
327
|
-
|
328
|
-
responses = await self.generate_internal(
|
329
|
-
message=message,
|
330
|
-
request_params=request_params,
|
331
|
-
)
|
332
|
-
|
333
|
-
final_text: List[str] = []
|
334
|
-
|
335
|
-
for response in responses:
|
336
|
-
content = response.content
|
337
|
-
if not content:
|
338
|
-
continue
|
339
|
-
|
340
|
-
if isinstance(content, str):
|
341
|
-
final_text.append(content)
|
342
|
-
continue
|
343
|
-
|
344
|
-
return "\n".join(final_text)
|
345
|
-
|
346
318
|
async def _apply_prompt_provider_specific(
|
347
319
|
self,
|
348
320
|
multipart_messages: List["PromptMessageMultipart"],
|
@@ -366,7 +338,13 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
366
338
|
# For user messages: Generate response to the last one
|
367
339
|
self.logger.debug("Last message in prompt is from user, generating assistant response")
|
368
340
|
message_param = OpenAIConverter.convert_to_openai(last_message)
|
369
|
-
|
341
|
+
responses: List[
|
342
|
+
TextContent | ImageContent | EmbeddedResource
|
343
|
+
] = await self.generate_internal(
|
344
|
+
message_param,
|
345
|
+
request_params,
|
346
|
+
)
|
347
|
+
return Prompt.assistant(*responses)
|
370
348
|
else:
|
371
349
|
# For assistant messages: Return the last message content as text
|
372
350
|
self.logger.debug("Last message in prompt is from assistant, returning it directly")
|
@@ -411,7 +389,8 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
411
389
|
role="system", content=self.instruction
|
412
390
|
)
|
413
391
|
messages.insert(0, system_msg)
|
414
|
-
|
392
|
+
model_name = self.default_request_params.model
|
393
|
+
self.show_user_message(prompt[-1].first_text(), model_name, self.chat_turn())
|
415
394
|
# Use the beta parse feature
|
416
395
|
try:
|
417
396
|
openai_client = OpenAI(api_key=self._api_key(), base_url=self._base_url())
|
@@ -429,8 +408,8 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
429
408
|
|
430
409
|
if response and isinstance(response[0], BaseException):
|
431
410
|
raise response[0]
|
432
|
-
|
433
411
|
parsed_result = response[0].choices[0].message
|
412
|
+
await self.show_assistant_message(parsed_result.content)
|
434
413
|
logger.debug("Successfully used OpenAI beta parse feature for structured output")
|
435
414
|
return parsed_result.parsed, Prompt.assistant(parsed_result.content)
|
436
415
|
|
@@ -3,6 +3,7 @@ Manages the lifecycle of multiple MCP server connections.
|
|
3
3
|
"""
|
4
4
|
|
5
5
|
import asyncio
|
6
|
+
import traceback
|
6
7
|
from datetime import timedelta
|
7
8
|
from typing import (
|
8
9
|
TYPE_CHECKING,
|
@@ -179,7 +180,7 @@ async def _server_lifecycle_task(server_conn: ServerConnection) -> None:
|
|
179
180
|
},
|
180
181
|
)
|
181
182
|
server_conn._error_occurred = True
|
182
|
-
server_conn._error_message =
|
183
|
+
server_conn._error_message = traceback.format_exception(exc)
|
183
184
|
# If there's an error, we should also set the event so that
|
184
185
|
# 'get_server' won't hang
|
185
186
|
server_conn._initialized_event.set()
|
@@ -270,7 +271,7 @@ class MCPConnectionManager(ContextDependent):
|
|
270
271
|
logger.debug(f"{server_name}: Creating stdio client with custom error handler")
|
271
272
|
return stdio_client(server_params, errlog=error_handler)
|
272
273
|
elif config.transport == "sse":
|
273
|
-
return sse_client(config.url)
|
274
|
+
return sse_client(config.url, config.headers)
|
274
275
|
else:
|
275
276
|
raise ValueError(f"Unsupported transport: {config.transport}")
|
276
277
|
|
@@ -328,7 +329,8 @@ class MCPConnectionManager(ContextDependent):
|
|
328
329
|
if not server_conn.is_healthy():
|
329
330
|
error_msg = server_conn._error_message or "Unknown error"
|
330
331
|
raise ServerInitializationError(
|
331
|
-
f"MCP Server: '{server_name}': Failed to initialize
|
332
|
+
f"MCP Server: '{server_name}': Failed to initialize - see details. Check fastagent.config.yaml?",
|
333
|
+
error_msg,
|
332
334
|
)
|
333
335
|
|
334
336
|
return server_conn
|
mcp_agent/mcp_server_registry.py
CHANGED
@@ -152,7 +152,7 @@ class ServerRegistry:
|
|
152
152
|
raise ValueError(f"URL is required for SSE transport: {server_name}")
|
153
153
|
|
154
154
|
# Use sse_client to get the read and write streams
|
155
|
-
async with sse_client(config.url) as (read_stream, write_stream):
|
155
|
+
async with sse_client(config.url, config.headers) as (read_stream, write_stream):
|
156
156
|
session = client_session_factory(
|
157
157
|
read_stream,
|
158
158
|
write_stream,
|
@@ -260,6 +260,7 @@ class ServerRegistry:
|
|
260
260
|
Returns:
|
261
261
|
MCPServerSettings: The server configuration.
|
262
262
|
"""
|
263
|
+
|
263
264
|
server_config = self.registry.get(server_name)
|
264
265
|
if server_config is None:
|
265
266
|
logger.warning(f"Server '{server_name}' not found in registry.")
|
mcp_agent/mcp/mcp_activity.py
DELETED
@@ -1,18 +0,0 @@
|
|
1
|
-
# import functools
|
2
|
-
# from temporalio import activity
|
3
|
-
# from typing import Dict, Any, List, Callable, Awaitable
|
4
|
-
# from .gen_client import gen_client
|
5
|
-
|
6
|
-
|
7
|
-
# def mcp_activity(server_name: str, mcp_call: Callable):
|
8
|
-
# def decorator(func):
|
9
|
-
# @activity.defn
|
10
|
-
# @functools.wraps(func)
|
11
|
-
# async def wrapper(*activity_args, **activity_kwargs):
|
12
|
-
# params = await func(*activity_args, **activity_kwargs)
|
13
|
-
# async with gen_client(server_name) as client:
|
14
|
-
# return await mcp_call(client, params)
|
15
|
-
|
16
|
-
# return wrapper
|
17
|
-
|
18
|
-
# return decorator
|
File without changes
|
File without changes
|
File without changes
|