fast-agent-mcp 0.2.6__py3-none-any.whl → 0.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.2.6.dist-info → fast_agent_mcp-0.2.8.dist-info}/METADATA +3 -4
- {fast_agent_mcp-0.2.6.dist-info → fast_agent_mcp-0.2.8.dist-info}/RECORD +20 -20
- mcp_agent/agents/base_agent.py +5 -5
- mcp_agent/agents/workflow/chain_agent.py +3 -11
- mcp_agent/agents/workflow/router_agent.py +22 -5
- mcp_agent/config.py +17 -3
- mcp_agent/core/enhanced_prompt.py +28 -9
- mcp_agent/core/fastagent.py +2 -0
- mcp_agent/core/interactive_prompt.py +130 -26
- mcp_agent/llm/augmented_llm.py +5 -1
- mcp_agent/llm/model_factory.py +5 -0
- mcp_agent/llm/providers/augmented_llm_anthropic.py +15 -49
- mcp_agent/llm/providers/augmented_llm_openai.py +19 -40
- mcp_agent/llm/providers/augmented_llm_openrouter.py +78 -0
- mcp_agent/mcp/logger_textio.py +15 -4
- mcp_agent/mcp/mcp_connection_manager.py +5 -3
- mcp_agent/mcp_server_registry.py +2 -1
- mcp_agent/mcp/mcp_activity.py +0 -18
- {fast_agent_mcp-0.2.6.dist-info → fast_agent_mcp-0.2.8.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.2.6.dist-info → fast_agent_mcp-0.2.8.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.2.6.dist-info → fast_agent_mcp-0.2.8.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: fast-agent-mcp
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.8
|
4
4
|
Summary: Define, Prompt and Test MCP enabled Agents and Workflows
|
5
5
|
Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
|
6
6
|
License: Apache License
|
@@ -260,8 +260,7 @@ The simple declarative syntax lets you concentrate on composing your Prompts and
|
|
260
260
|
|
261
261
|
`fast-agent` is multi-modal, supporting Images and PDFs for both Anthropic and OpenAI endpoints via Prompts, Resources and MCP Tool Call results. The inclusion of passthrough and playback LLMs enable rapid development and test of Python glue-code for your applications.
|
262
262
|
|
263
|
-
> [!TIP]
|
264
|
-
> `fast-agent` is now MCP Native! Coming Soon - Full Documentation Site and Further MCP Examples.
|
263
|
+
> [!TIP] > `fast-agent` is now MCP Native! Coming Soon - Full Documentation Site and Further MCP Examples.
|
265
264
|
|
266
265
|
### Agent Application Development
|
267
266
|
|
@@ -525,7 +524,7 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
|
|
525
524
|
servers=["filesystem"], # list of MCP Servers for the agent
|
526
525
|
model="o3-mini.high", # specify a model for the agent
|
527
526
|
use_history=True, # agent maintains chat history
|
528
|
-
request_params=
|
527
|
+
request_params=RequestParams(temperature= 0.7)), # additional parameters for the LLM (or RequestParams())
|
529
528
|
human_input=True, # agent can request human input
|
530
529
|
)
|
531
530
|
```
|
@@ -1,23 +1,23 @@
|
|
1
1
|
mcp_agent/__init__.py,sha256=-AIoeL4c9UAp_P4U0z-uIWTTmQWdihOis5nbQ5L_eao,1664
|
2
2
|
mcp_agent/app.py,sha256=jBmzYM_o50g8vhlTgkkf5TGiBWNbXWViYnd0WANbpzo,10276
|
3
|
-
mcp_agent/config.py,sha256=
|
3
|
+
mcp_agent/config.py,sha256=0GVtAMSiK1oPklHlH-3rbhjPfBx18JfEAn-W-HG5x6k,12167
|
4
4
|
mcp_agent/console.py,sha256=Gjf2QLFumwG1Lav__c07X_kZxxEUSkzV-1_-YbAwcwo,813
|
5
5
|
mcp_agent/context.py,sha256=pp_F1Q1jgAxGrRccSZJutn1JUxYfVue-St3S8tUyptM,7903
|
6
6
|
mcp_agent/context_dependent.py,sha256=QXfhw3RaQCKfscEEBRGuZ3sdMWqkgShz2jJ1ivGGX1I,1455
|
7
7
|
mcp_agent/event_progress.py,sha256=25iz0yyg-O4glMmtijcYpDdUmtUIKsCmR_8A52GgeC4,2716
|
8
|
-
mcp_agent/mcp_server_registry.py,sha256=
|
8
|
+
mcp_agent/mcp_server_registry.py,sha256=w0sq-5o_AVVGfwUBo0c_Ekbyjd3Tjg9bzi2r8UZry7o,9945
|
9
9
|
mcp_agent/progress_display.py,sha256=GeJU9VUt6qKsFVymG688hCMVCsAygG9ifiiEb5IcbN4,361
|
10
10
|
mcp_agent/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
11
11
|
mcp_agent/agents/agent.py,sha256=Tn2YKw_ytx9b8jC-65WYQmrnD43kYiZsLa4sVHxn9d4,3854
|
12
|
-
mcp_agent/agents/base_agent.py,sha256=
|
12
|
+
mcp_agent/agents/base_agent.py,sha256=dzyy4tDHJfRC4Sp-JqXeDwykk5SH55k89dUXQROIdQ4,23488
|
13
13
|
mcp_agent/agents/workflow/__init__.py,sha256=HloteEW6kalvgR0XewpiFAqaQlMPlPJYg5p3K33IUzI,25
|
14
|
-
mcp_agent/agents/workflow/chain_agent.py,sha256=
|
14
|
+
mcp_agent/agents/workflow/chain_agent.py,sha256=efftXdHc5F-XY8jnz5npHbKHhqnzHh28WbU5yQ4yUn0,6105
|
15
15
|
mcp_agent/agents/workflow/evaluator_optimizer.py,sha256=VWdzVIy_qSiVsDJO22ta3RB3drkvBfXk9HxBYMpsC5U,13300
|
16
16
|
mcp_agent/agents/workflow/orchestrator_agent.py,sha256=30hFQyAmtjQTX6Li_zWWIHCpdNpfZkDo57YXXW5xIsI,21561
|
17
17
|
mcp_agent/agents/workflow/orchestrator_models.py,sha256=5P_aXADVT4Et8qT4e1cb9RelmHX5dCRrzu8j8T41Kdg,7230
|
18
18
|
mcp_agent/agents/workflow/orchestrator_prompts.py,sha256=EXKEI174sshkZyPPEnWbwwNafzSPuA39MXL7iqG9cWc,9106
|
19
19
|
mcp_agent/agents/workflow/parallel_agent.py,sha256=SgIXJx2X_MSlLOv6WXYRezwjDYjU9f95eKQzTm5Y_lk,7087
|
20
|
-
mcp_agent/agents/workflow/router_agent.py,sha256=
|
20
|
+
mcp_agent/agents/workflow/router_agent.py,sha256=c4MU55U6q1DRayP0sDoyxdlnKX-N0LPbRv-MFlwbwrY,11165
|
21
21
|
mcp_agent/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
22
22
|
mcp_agent/cli/__main__.py,sha256=AVZ7tQFhU_sDOGuUGJq8ujgKtcxsYJBJwHbVaaiRDlI,166
|
23
23
|
mcp_agent/cli/main.py,sha256=PZdPJfsAJOm80vTu7j_XpMPhaDZOpqSe-ciU3YQsmA4,3149
|
@@ -30,11 +30,11 @@ mcp_agent/core/agent_app.py,sha256=5nQJNo8DocIRWiX4pVKAHUZF8s6HWpc-hJnfzl_1v1c,9
|
|
30
30
|
mcp_agent/core/agent_types.py,sha256=LuWslu9YI6JRnAWwh_A1ZejK72-e839wH7tf2MHxSIU,1389
|
31
31
|
mcp_agent/core/direct_decorators.py,sha256=Q6t3VpRPLCRzqJycPZIkKbbEJMVocxdScp5o2xn4gLU,14460
|
32
32
|
mcp_agent/core/direct_factory.py,sha256=hYFCucZVAQ2wrfqIe9Qameoa-cCRaQ53R97EMHvUZAM,17572
|
33
|
-
mcp_agent/core/enhanced_prompt.py,sha256=
|
33
|
+
mcp_agent/core/enhanced_prompt.py,sha256=A0FJ_-dr1RLq3uzmFSxiOBxw5synW2BhA4QntQyYVwg,18792
|
34
34
|
mcp_agent/core/error_handling.py,sha256=xoyS2kLe0eG0bj2eSJCJ2odIhGUve2SbDR7jP-A-uRw,624
|
35
35
|
mcp_agent/core/exceptions.py,sha256=ENAD_qGG67foxy6vDkIvc-lgopIUQy6O7zvNPpPXaQg,2289
|
36
|
-
mcp_agent/core/fastagent.py,sha256=
|
37
|
-
mcp_agent/core/interactive_prompt.py,sha256=
|
36
|
+
mcp_agent/core/fastagent.py,sha256=T2kyq32wBJCOj13Zy1G_XJjQZb1S4HVdx3OBzmEMHBg,18644
|
37
|
+
mcp_agent/core/interactive_prompt.py,sha256=y56K2ZIvj5hZZwtEDHezJCOlduRwAcj2fc4GqhKq9ZY,23357
|
38
38
|
mcp_agent/core/mcp_content.py,sha256=2D7KHY9mG_vxoDwFLKvsPQV9VRIzHItM7V-jcEnACh8,8878
|
39
39
|
mcp_agent/core/prompt.py,sha256=qnintOUGEoDPYLI9bu9G2OlgVMCe5ZPUZilgMzydXhc,7919
|
40
40
|
mcp_agent/core/request_params.py,sha256=bEjWo86fqxdiWm2U5nPDd1uCUpcIQO9oiCinhB8lQN0,1185
|
@@ -50,20 +50,21 @@ mcp_agent/human_input/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3h
|
|
50
50
|
mcp_agent/human_input/handler.py,sha256=s712Z5ssTCwjL9-VKoIdP5CtgMh43YvepynYisiWTTA,3144
|
51
51
|
mcp_agent/human_input/types.py,sha256=RtWBOVzy8vnYoQrc36jRLn8z8N3C4pDPMBN5vF6qM5Y,1476
|
52
52
|
mcp_agent/llm/__init__.py,sha256=d8zgwG-bRFuwiMNMYkywg_qytk4P8lawyld_meuUmHI,68
|
53
|
-
mcp_agent/llm/augmented_llm.py,sha256=
|
53
|
+
mcp_agent/llm/augmented_llm.py,sha256=YIB3I_taoglo_vSmZLQ50cv1qCSctaQlWVwjI-7WTkk,18304
|
54
54
|
mcp_agent/llm/augmented_llm_passthrough.py,sha256=U0LssNWNVuZRuD9I7Wuvpo7vdDW4xtoPLirnYCgBGTY,6128
|
55
55
|
mcp_agent/llm/augmented_llm_playback.py,sha256=YVR2adzjMf9Q5WfYBytryWMRqJ87a3kNBnjxhApsMcU,3413
|
56
56
|
mcp_agent/llm/memory.py,sha256=UakoBCJBf59JBtB6uyZM0OZjlxDW_VHtSfDs08ibVEc,3312
|
57
|
-
mcp_agent/llm/model_factory.py,sha256=
|
57
|
+
mcp_agent/llm/model_factory.py,sha256=fj14NMYYg7yBxq7TsVuLIEYrK6rzPW1_p9O0Yegoq00,7844
|
58
58
|
mcp_agent/llm/prompt_utils.py,sha256=yWQHykoK13QRF7evHUKxVF0SpVLN-Bsft0Yixzvn0g0,4825
|
59
59
|
mcp_agent/llm/sampling_converter.py,sha256=C7wPBlmT0eD90XWabC22zkxsrVHKCrjwIwg6cG628cI,2926
|
60
60
|
mcp_agent/llm/sampling_format_converter.py,sha256=xGz4odHpOcP7--eFaJaFtUR8eR9jxZS7MnLH6J7n0EU,1263
|
61
61
|
mcp_agent/llm/providers/__init__.py,sha256=heVxtmuqFJOnjjxHz4bWSqTAxXoN1E8twC_gQ_yJpHk,265
|
62
62
|
mcp_agent/llm/providers/anthropic_utils.py,sha256=vYDN5G5jKMhD2CQg8veJYab7tvvzYkDMq8M1g_hUAQg,3275
|
63
|
-
mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=
|
63
|
+
mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=CNKpTEvWqjOteACUx_Vha0uFpPt32C17JrkSXg_allM,14445
|
64
64
|
mcp_agent/llm/providers/augmented_llm_deepseek.py,sha256=SdYDqZZ9hM9sBvW1FSItNn_ENEKQXGNKwVHGnjqjyAA,1927
|
65
65
|
mcp_agent/llm/providers/augmented_llm_generic.py,sha256=IIgwPYsVGwDdL2mMYsc5seY3pVFblMwmnxoI5dbxras,1524
|
66
|
-
mcp_agent/llm/providers/augmented_llm_openai.py,sha256=
|
66
|
+
mcp_agent/llm/providers/augmented_llm_openai.py,sha256=Wso9GVgsq8y3sqlOzTk_iQqrkCOL3LyuG07nA1PWDng,17913
|
67
|
+
mcp_agent/llm/providers/augmented_llm_openrouter.py,sha256=AajWXFIgGEDjeEx8AWCTs3mZGTPaihdsrjEUiNAJkIM,3501
|
67
68
|
mcp_agent/llm/providers/multipart_converter_anthropic.py,sha256=t5lHYGfFUacJldnrVtMNW-8gEMoto8Y7hJkDrnyZR-Y,16650
|
68
69
|
mcp_agent/llm/providers/multipart_converter_openai.py,sha256=zCj0LBgd9FDG8aL_GeTrPo2ssloYnmC_Uj3ENWVUJAg,16753
|
69
70
|
mcp_agent/llm/providers/openai_multipart.py,sha256=qKBn7d3jSabnJmVgWweVzqh8q9mBqr09fsPmP92niAQ,6899
|
@@ -81,11 +82,10 @@ mcp_agent/logging/transport.py,sha256=m8YsLLu5T8eof_ndpLQs4gHOzqqEL98xsVwBwDsBfx
|
|
81
82
|
mcp_agent/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
82
83
|
mcp_agent/mcp/gen_client.py,sha256=fAVwFVCgSamw4PwoWOV4wrK9TABx1S_zZv8BctRyF2k,3030
|
83
84
|
mcp_agent/mcp/interfaces.py,sha256=vma7bbWbY3zp1RM6hMYxVO4aV6Vfaygm-nLwzK2jFKI,6748
|
84
|
-
mcp_agent/mcp/logger_textio.py,sha256=
|
85
|
-
mcp_agent/mcp/mcp_activity.py,sha256=CajXCFWZ2cKEX9s4-HfNVAj471ePTVs4NOkvmIh65tE,592
|
85
|
+
mcp_agent/mcp/logger_textio.py,sha256=vljC1BtNTCxBAda9ExqNB-FwVNUZIuJT3h1nWmCjMws,3172
|
86
86
|
mcp_agent/mcp/mcp_agent_client_session.py,sha256=RMYNltc2pDIzxwEJSS5589RbvPO0KWV4Y3jSyAmhKf0,4181
|
87
87
|
mcp_agent/mcp/mcp_aggregator.py,sha256=jaWbOvb3wioECohZ47CubyxfJ5QkfNSshu1hwhZksG4,40486
|
88
|
-
mcp_agent/mcp/mcp_connection_manager.py,sha256=
|
88
|
+
mcp_agent/mcp/mcp_connection_manager.py,sha256=AMIm2FBbIk7zHInb8X-kFSQFO5TKcoi9w8WU8nx8Ig0,13834
|
89
89
|
mcp_agent/mcp/mime_utils.py,sha256=difepNR_gpb4MpMLkBRAoyhDk-AjXUHTiqKvT_VwS1o,1805
|
90
90
|
mcp_agent/mcp/prompt_message_multipart.py,sha256=IpIndd75tAcCbJbfqjpAF0tOUUP1TQceDbWoxO5gvpo,3684
|
91
91
|
mcp_agent/mcp/prompt_render.py,sha256=k3v4BZDThGE2gGiOYVQtA6x8WTEdOuXIEnRafANhN1U,2996
|
@@ -134,8 +134,8 @@ mcp_agent/resources/examples/workflows/orchestrator.py,sha256=rOGilFTliWWnZ3Jx5w
|
|
134
134
|
mcp_agent/resources/examples/workflows/parallel.py,sha256=n0dFN26QvYd2wjgohcaUBflac2SzXYx-bCyxMSousJE,1884
|
135
135
|
mcp_agent/resources/examples/workflows/router.py,sha256=E4x_-c3l4YW9w1i4ARcDtkdeqIdbWEGfsMzwLYpdbVc,1677
|
136
136
|
mcp_agent/ui/console_display.py,sha256=TVGDtJ37hc6UG0ei9g7ZPZZfFNeS1MYozt-Mx8HsPCk,9752
|
137
|
-
fast_agent_mcp-0.2.
|
138
|
-
fast_agent_mcp-0.2.
|
139
|
-
fast_agent_mcp-0.2.
|
140
|
-
fast_agent_mcp-0.2.
|
141
|
-
fast_agent_mcp-0.2.
|
137
|
+
fast_agent_mcp-0.2.8.dist-info/METADATA,sha256=k7K-lDRpHTGxQ1hH17zLSapAx0DivjdSJaAqSNtuWeI,29849
|
138
|
+
fast_agent_mcp-0.2.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
139
|
+
fast_agent_mcp-0.2.8.dist-info/entry_points.txt,sha256=qPM7vwtN1_KmP3dXehxgiCxUBHtqP7yfenZigztvY-w,226
|
140
|
+
fast_agent_mcp-0.2.8.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
|
141
|
+
fast_agent_mcp-0.2.8.dist-info/RECORD,,
|
mcp_agent/agents/base_agent.py
CHANGED
@@ -215,7 +215,7 @@ class BaseAgent(MCPAggregator, AgentProtocol):
|
|
215
215
|
|
216
216
|
# Use the LLM to generate a response
|
217
217
|
response = await self.generate([prompt], None)
|
218
|
-
return response.
|
218
|
+
return response.all_text()
|
219
219
|
|
220
220
|
def _normalize_message_input(
|
221
221
|
self, message: Union[str, PromptMessage, PromptMessageMultipart]
|
@@ -625,15 +625,15 @@ class BaseAgent(MCPAggregator, AgentProtocol):
|
|
625
625
|
@property
|
626
626
|
def agent_type(self) -> str:
|
627
627
|
"""
|
628
|
-
Return the type of this agent.
|
629
|
-
|
628
|
+
Return the type of this agent.
|
629
|
+
|
630
630
|
This is used for display purposes in the interactive prompt and other UI elements.
|
631
|
-
|
631
|
+
|
632
632
|
Returns:
|
633
633
|
String representing the agent type
|
634
634
|
"""
|
635
635
|
return self.config.agent_type
|
636
|
-
|
636
|
+
|
637
637
|
@property
|
638
638
|
def message_history(self) -> List[PromptMessageMultipart]:
|
639
639
|
"""
|
@@ -23,7 +23,8 @@ class ChainAgent(BaseAgent):
|
|
23
23
|
A chain agent that processes requests through a series of specialized agents in sequence.
|
24
24
|
Passes the output of each agent to the next agent in the chain.
|
25
25
|
"""
|
26
|
-
|
26
|
+
|
27
|
+
# TODO -- consider adding "repeat" mode
|
27
28
|
@property
|
28
29
|
def agent_type(self) -> str:
|
29
30
|
"""Return the type of this agent."""
|
@@ -70,20 +71,11 @@ class ChainAgent(BaseAgent):
|
|
70
71
|
# # Get the original user message (last message in the list)
|
71
72
|
user_message = multipart_messages[-1] if multipart_messages else None
|
72
73
|
|
73
|
-
# # If no user message, return an error
|
74
|
-
# if not user_message:
|
75
|
-
# return PromptMessageMultipart(
|
76
|
-
# role="assistant",
|
77
|
-
# content=[TextContent(type="text", text="No input message provided.")],
|
78
|
-
# )
|
79
|
-
|
80
|
-
# Initialize messages with the input
|
81
|
-
|
82
74
|
if not self.cumulative:
|
83
75
|
response: PromptMessageMultipart = await self.agents[0].generate(multipart_messages)
|
84
76
|
# Process the rest of the agents in the chain
|
85
77
|
for agent in self.agents[1:]:
|
86
|
-
next_message = Prompt.user(response.content
|
78
|
+
next_message = Prompt.user(*response.content)
|
87
79
|
response = await agent.generate([next_message])
|
88
80
|
|
89
81
|
return response
|
@@ -53,12 +53,29 @@ You are a highly accurate request router that directs incoming requests to the m
|
|
53
53
|
Your task is to analyze the request and determine the most appropriate agent from the options above.
|
54
54
|
|
55
55
|
<fastagent:instruction>
|
56
|
-
Respond
|
56
|
+
Respond with JSON following the schema below:
|
57
57
|
{{
|
58
|
-
"
|
59
|
-
"
|
60
|
-
"
|
58
|
+
"type": "object",
|
59
|
+
"required": ["agent", "confidence", "reasoning"],
|
60
|
+
"properties": {{
|
61
|
+
"agent": {{
|
62
|
+
"type": "string",
|
63
|
+
"description": "The exact name of the selected agent"
|
64
|
+
}},
|
65
|
+
"confidence": {{
|
66
|
+
"type": "string",
|
67
|
+
"enum": ["high", "medium", "low"],
|
68
|
+
"description": "Your confidence level in this selection"
|
69
|
+
}},
|
70
|
+
"reasoning": {{
|
71
|
+
"type": "string",
|
72
|
+
"description": "Brief explanation for your selection"
|
73
|
+
}}
|
74
|
+
}}
|
61
75
|
}}
|
76
|
+
|
77
|
+
Supply only the JSON with no preamble. Use "reasoning" field to describe actions. NEVER EMIT CODE FENCES.
|
78
|
+
|
62
79
|
</fastagent:instruction>
|
63
80
|
"""
|
64
81
|
|
@@ -87,7 +104,7 @@ class RouterAgent(BaseAgent):
|
|
87
104
|
A simplified router that uses an LLM to determine the best agent for a request,
|
88
105
|
then dispatches the request to that agent and returns the response.
|
89
106
|
"""
|
90
|
-
|
107
|
+
|
91
108
|
@property
|
92
109
|
def agent_type(self) -> str:
|
93
110
|
"""Return the type of this agent."""
|
mcp_agent/config.py
CHANGED
@@ -75,6 +75,9 @@ class MCPServerSettings(BaseModel):
|
|
75
75
|
url: str | None = None
|
76
76
|
"""The URL for the server (e.g. for SSE transport)."""
|
77
77
|
|
78
|
+
headers: Dict[str, str] | None = None
|
79
|
+
"""Headers dictionary for SSE connections"""
|
80
|
+
|
78
81
|
auth: MCPServerAuthSettings | None = None
|
79
82
|
"""The authentication configuration for the server."""
|
80
83
|
|
@@ -84,9 +87,6 @@ class MCPServerSettings(BaseModel):
|
|
84
87
|
env: Dict[str, str] | None = None
|
85
88
|
"""Environment variables to pass to the server process."""
|
86
89
|
|
87
|
-
env: Dict[str, str] | None = None
|
88
|
-
"""Environment variables to pass to the server process."""
|
89
|
-
|
90
90
|
sampling: MCPSamplingSettings | None = None
|
91
91
|
"""Sampling settings for this Client/Server pair"""
|
92
92
|
|
@@ -146,6 +146,17 @@ class GenericSettings(BaseModel):
|
|
146
146
|
base_url: str | None = None
|
147
147
|
|
148
148
|
model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
|
149
|
+
|
150
|
+
|
151
|
+
class OpenRouterSettings(BaseModel):
|
152
|
+
"""
|
153
|
+
Settings for using OpenRouter models via its OpenAI-compatible API.
|
154
|
+
"""
|
155
|
+
api_key: str | None = None
|
156
|
+
|
157
|
+
base_url: str | None = None # Optional override, defaults handled in provider
|
158
|
+
|
159
|
+
model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
|
149
160
|
|
150
161
|
|
151
162
|
class TemporalSettings(BaseModel):
|
@@ -262,6 +273,9 @@ class Settings(BaseSettings):
|
|
262
273
|
deepseek: DeepSeekSettings | None = None
|
263
274
|
"""Settings for using DeepSeek models in the fast-agent application"""
|
264
275
|
|
276
|
+
openrouter: OpenRouterSettings | None = None
|
277
|
+
"""Settings for using OpenRouter models in the fast-agent application"""
|
278
|
+
|
265
279
|
generic: GenericSettings | None = None
|
266
280
|
"""Settings for using Generic models in the fast-agent application"""
|
267
281
|
|
@@ -285,10 +285,17 @@ async def get_enhanced_input(
|
|
285
285
|
elif cmd == "agents":
|
286
286
|
return "LIST_AGENTS"
|
287
287
|
elif cmd == "prompts":
|
288
|
-
|
288
|
+
# Return a dictionary with select_prompt action instead of a string
|
289
|
+
# This way it will match what the command handler expects
|
290
|
+
return {"select_prompt": True, "prompt_name": None}
|
289
291
|
elif cmd == "prompt" and len(cmd_parts) > 1:
|
290
|
-
# Direct prompt selection with name
|
291
|
-
|
292
|
+
# Direct prompt selection with name or number
|
293
|
+
prompt_arg = cmd_parts[1].strip()
|
294
|
+
# Check if it's a number (use as index) or a name (use directly)
|
295
|
+
if prompt_arg.isdigit():
|
296
|
+
return {"select_prompt": True, "prompt_index": int(prompt_arg)}
|
297
|
+
else:
|
298
|
+
return f"SELECT_PROMPT:{prompt_arg}"
|
292
299
|
elif cmd == "exit":
|
293
300
|
return "EXIT"
|
294
301
|
elif cmd.lower() == "stop":
|
@@ -420,13 +427,27 @@ async def get_argument_input(
|
|
420
427
|
prompt_session.app.exit()
|
421
428
|
|
422
429
|
|
423
|
-
async def handle_special_commands(command
|
424
|
-
"""
|
430
|
+
async def handle_special_commands(command, agent_app=None):
|
431
|
+
"""
|
432
|
+
Handle special input commands.
|
433
|
+
|
434
|
+
Args:
|
435
|
+
command: The command to handle, can be string or dictionary
|
436
|
+
agent_app: Optional agent app reference
|
437
|
+
|
438
|
+
Returns:
|
439
|
+
True if command was handled, False if not, or a dict with action info
|
440
|
+
"""
|
425
441
|
# Quick guard for empty or None commands
|
426
442
|
if not command:
|
427
443
|
return False
|
444
|
+
|
445
|
+
# If command is already a dictionary, it has been pre-processed
|
446
|
+
# Just return it directly (like when /prompts converts to select_prompt dict)
|
447
|
+
if isinstance(command, dict):
|
448
|
+
return command
|
428
449
|
|
429
|
-
# Check for special commands
|
450
|
+
# Check for special string commands
|
430
451
|
if command == "HELP":
|
431
452
|
rich_print("\n[bold]Available Commands:[/bold]")
|
432
453
|
rich_print(" /help - Show this help")
|
@@ -450,7 +471,7 @@ async def handle_special_commands(command: str, agent_app=None):
|
|
450
471
|
print("\033c", end="")
|
451
472
|
return True
|
452
473
|
|
453
|
-
elif command.upper() == "EXIT":
|
474
|
+
elif isinstance(command, str) and command.upper() == "EXIT":
|
454
475
|
raise PromptExitError("User requested to exit fast-agent session")
|
455
476
|
|
456
477
|
elif command == "LIST_AGENTS":
|
@@ -462,8 +483,6 @@ async def handle_special_commands(command: str, agent_app=None):
|
|
462
483
|
rich_print("[yellow]No agents available[/yellow]")
|
463
484
|
return True
|
464
485
|
|
465
|
-
# Removed LIST_PROMPTS handling as it's now covered by SELECT_PROMPT
|
466
|
-
|
467
486
|
elif command == "SELECT_PROMPT" or (
|
468
487
|
isinstance(command, str) and command.startswith("SELECT_PROMPT:")
|
469
488
|
):
|
mcp_agent/core/fastagent.py
CHANGED
@@ -235,6 +235,8 @@ class FastAgent:
|
|
235
235
|
progress_display.stop()
|
236
236
|
|
237
237
|
# Pre-flight validation
|
238
|
+
if 0 == len(self.agents):
|
239
|
+
raise AgentConfigError("No agents defined. Please define at least one agent.")
|
238
240
|
validate_server_references(self.context, self.agents)
|
239
241
|
validate_workflow_references(self.agents)
|
240
242
|
|
@@ -26,6 +26,7 @@ from mcp_agent.core.enhanced_prompt import (
|
|
26
26
|
get_selection_input,
|
27
27
|
handle_special_commands,
|
28
28
|
)
|
29
|
+
from mcp_agent.mcp.mcp_aggregator import SEP # Import SEP once at the top
|
29
30
|
from mcp_agent.progress_display import progress_display
|
30
31
|
|
31
32
|
|
@@ -96,7 +97,7 @@ class InteractivePrompt:
|
|
96
97
|
|
97
98
|
# Handle special commands - pass "True" to enable agent switching
|
98
99
|
command_result = await handle_special_commands(user_input, True)
|
99
|
-
|
100
|
+
|
100
101
|
# Check if we should switch agents
|
101
102
|
if isinstance(command_result, dict):
|
102
103
|
if "switch_agent" in command_result:
|
@@ -107,6 +108,7 @@ class InteractivePrompt:
|
|
107
108
|
else:
|
108
109
|
rich_print(f"[red]Agent '{new_agent}' not found[/red]")
|
109
110
|
continue
|
111
|
+
# Keep the existing list_prompts handler for backward compatibility
|
110
112
|
elif "list_prompts" in command_result and list_prompts_func:
|
111
113
|
# Use the list_prompts_func directly
|
112
114
|
await self._list_prompts(list_prompts_func, agent)
|
@@ -114,7 +116,29 @@ class InteractivePrompt:
|
|
114
116
|
elif "select_prompt" in command_result and (list_prompts_func and apply_prompt_func):
|
115
117
|
# Handle prompt selection, using both list_prompts and apply_prompt
|
116
118
|
prompt_name = command_result.get("prompt_name")
|
117
|
-
|
119
|
+
prompt_index = command_result.get("prompt_index")
|
120
|
+
|
121
|
+
# If a specific index was provided (from /prompt <number>)
|
122
|
+
if prompt_index is not None:
|
123
|
+
# First get a list of all prompts to look up the index
|
124
|
+
all_prompts = await self._get_all_prompts(list_prompts_func, agent)
|
125
|
+
if not all_prompts:
|
126
|
+
rich_print("[yellow]No prompts available[/yellow]")
|
127
|
+
continue
|
128
|
+
|
129
|
+
# Check if the index is valid
|
130
|
+
if 1 <= prompt_index <= len(all_prompts):
|
131
|
+
# Get the prompt at the specified index (1-based to 0-based)
|
132
|
+
selected_prompt = all_prompts[prompt_index - 1]
|
133
|
+
# Use the already created namespaced_name to ensure consistency
|
134
|
+
await self._select_prompt(list_prompts_func, apply_prompt_func, agent, selected_prompt["namespaced_name"])
|
135
|
+
else:
|
136
|
+
rich_print(f"[red]Invalid prompt number: {prompt_index}. Valid range is 1-{len(all_prompts)}[/red]")
|
137
|
+
# Show the prompt list for convenience
|
138
|
+
await self._list_prompts(list_prompts_func, agent)
|
139
|
+
else:
|
140
|
+
# Use the name-based selection
|
141
|
+
await self._select_prompt(list_prompts_func, apply_prompt_func, agent, prompt_name)
|
118
142
|
continue
|
119
143
|
|
120
144
|
# Skip further processing if command was handled
|
@@ -131,42 +155,119 @@ class InteractivePrompt:
|
|
131
155
|
|
132
156
|
return result
|
133
157
|
|
134
|
-
async def
|
158
|
+
async def _get_all_prompts(self, list_prompts_func, agent_name):
|
135
159
|
"""
|
136
|
-
|
137
|
-
|
160
|
+
Get a list of all available prompts.
|
161
|
+
|
138
162
|
Args:
|
139
163
|
list_prompts_func: Function to get available prompts
|
140
164
|
agent_name: Name of the agent
|
165
|
+
|
166
|
+
Returns:
|
167
|
+
List of prompt info dictionaries, sorted by server and name
|
141
168
|
"""
|
142
|
-
from rich import print as rich_print
|
143
|
-
|
144
169
|
try:
|
145
|
-
#
|
146
|
-
|
147
|
-
|
148
|
-
|
170
|
+
# Pass None instead of agent_name to get prompts from all servers
|
171
|
+
# the agent_name parameter should never be used as a server name
|
172
|
+
prompt_servers = await list_prompts_func(None)
|
173
|
+
all_prompts = []
|
149
174
|
|
150
175
|
# Process the returned prompt servers
|
151
176
|
if prompt_servers:
|
152
|
-
|
177
|
+
# First collect all prompts
|
153
178
|
for server_name, prompts_info in prompt_servers.items():
|
154
179
|
if prompts_info and hasattr(prompts_info, "prompts") and prompts_info.prompts:
|
155
|
-
rich_print(f"\n[bold cyan]{server_name}:[/bold cyan]")
|
156
180
|
for prompt in prompts_info.prompts:
|
157
|
-
|
158
|
-
|
181
|
+
# Use the SEP constant for proper namespacing
|
182
|
+
all_prompts.append({
|
183
|
+
"server": server_name,
|
184
|
+
"name": prompt.name,
|
185
|
+
"namespaced_name": f"{server_name}{SEP}{prompt.name}",
|
186
|
+
"description": getattr(prompt, "description", "No description"),
|
187
|
+
"arg_count": len(getattr(prompt, "arguments", [])),
|
188
|
+
"arguments": getattr(prompt, "arguments", [])
|
189
|
+
})
|
159
190
|
elif isinstance(prompts_info, list) and prompts_info:
|
160
|
-
rich_print(f"\n[bold cyan]{server_name}:[/bold cyan]")
|
161
191
|
for prompt in prompts_info:
|
162
192
|
if isinstance(prompt, dict) and "name" in prompt:
|
163
|
-
|
193
|
+
all_prompts.append({
|
194
|
+
"server": server_name,
|
195
|
+
"name": prompt["name"],
|
196
|
+
"namespaced_name": f"{server_name}{SEP}{prompt['name']}",
|
197
|
+
"description": prompt.get("description", "No description"),
|
198
|
+
"arg_count": len(prompt.get("arguments", [])),
|
199
|
+
"arguments": prompt.get("arguments", [])
|
200
|
+
})
|
164
201
|
else:
|
165
|
-
|
166
|
-
|
202
|
+
all_prompts.append({
|
203
|
+
"server": server_name,
|
204
|
+
"name": str(prompt),
|
205
|
+
"namespaced_name": f"{server_name}{SEP}{str(prompt)}",
|
206
|
+
"description": "No description",
|
207
|
+
"arg_count": 0,
|
208
|
+
"arguments": []
|
209
|
+
})
|
210
|
+
|
211
|
+
# Sort prompts by server and name for consistent ordering
|
212
|
+
all_prompts.sort(key=lambda p: (p["server"], p["name"]))
|
213
|
+
|
214
|
+
return all_prompts
|
215
|
+
|
216
|
+
except Exception as e:
|
217
|
+
import traceback
|
218
|
+
|
219
|
+
from rich import print as rich_print
|
220
|
+
rich_print(f"[red]Error getting prompts: {e}[/red]")
|
221
|
+
rich_print(f"[dim]{traceback.format_exc()}[/dim]")
|
222
|
+
return []
|
223
|
+
|
224
|
+
async def _list_prompts(self, list_prompts_func, agent_name) -> None:
|
225
|
+
"""
|
226
|
+
List available prompts for an agent.
|
227
|
+
|
228
|
+
Args:
|
229
|
+
list_prompts_func: Function to get available prompts
|
230
|
+
agent_name: Name of the agent
|
231
|
+
"""
|
232
|
+
from rich import print as rich_print
|
233
|
+
from rich.console import Console
|
234
|
+
from rich.table import Table
|
167
235
|
|
168
|
-
|
169
|
-
|
236
|
+
console = Console()
|
237
|
+
|
238
|
+
try:
|
239
|
+
# Directly call the list_prompts function for this agent
|
240
|
+
rich_print(f"\n[bold]Fetching prompts for agent [cyan]{agent_name}[/cyan]...[/bold]")
|
241
|
+
|
242
|
+
# Get all prompts using the helper function - pass None as server name
|
243
|
+
# to get prompts from all available servers
|
244
|
+
all_prompts = await self._get_all_prompts(list_prompts_func, None)
|
245
|
+
|
246
|
+
if all_prompts:
|
247
|
+
# Create a table for better display
|
248
|
+
table = Table(title="Available MCP Prompts")
|
249
|
+
table.add_column("#", justify="right", style="cyan")
|
250
|
+
table.add_column("Server", style="green")
|
251
|
+
table.add_column("Prompt Name", style="bright_blue")
|
252
|
+
table.add_column("Description")
|
253
|
+
table.add_column("Args", justify="center")
|
254
|
+
|
255
|
+
# Add prompts to table
|
256
|
+
for i, prompt in enumerate(all_prompts):
|
257
|
+
table.add_row(
|
258
|
+
str(i + 1),
|
259
|
+
prompt["server"],
|
260
|
+
prompt["name"],
|
261
|
+
prompt["description"],
|
262
|
+
str(prompt["arg_count"])
|
263
|
+
)
|
264
|
+
|
265
|
+
console.print(table)
|
266
|
+
|
267
|
+
# Add usage instructions
|
268
|
+
rich_print("\n[bold]Usage:[/bold]")
|
269
|
+
rich_print(" • Use [cyan]/prompt <number>[/cyan] to select a prompt by number")
|
270
|
+
rich_print(" • Or use [cyan]/prompts[/cyan] to open the prompt selection UI")
|
170
271
|
else:
|
171
272
|
rich_print("[yellow]No prompts available[/yellow]")
|
172
273
|
except Exception as e:
|
@@ -192,7 +293,9 @@ class InteractivePrompt:
|
|
192
293
|
try:
|
193
294
|
# Get all available prompts directly from the list_prompts function
|
194
295
|
rich_print(f"\n[bold]Fetching prompts for agent [cyan]{agent_name}[/cyan]...[/bold]")
|
195
|
-
|
296
|
+
# IMPORTANT: list_prompts_func gets MCP server prompts, not agent prompts
|
297
|
+
# So we pass None to get prompts from all servers, not using agent_name as server name
|
298
|
+
prompt_servers = await list_prompts_func(None)
|
196
299
|
|
197
300
|
if not prompt_servers:
|
198
301
|
rich_print("[yellow]No prompts available for this agent[/yellow]")
|
@@ -242,8 +345,8 @@ class InteractivePrompt:
|
|
242
345
|
else:
|
243
346
|
optional_args.append(name)
|
244
347
|
|
245
|
-
# Create namespaced version
|
246
|
-
namespaced_name = f"{server_name}
|
348
|
+
# Create namespaced version using the consistent separator
|
349
|
+
namespaced_name = f"{server_name}{SEP}{prompt_name}"
|
247
350
|
|
248
351
|
# Add to collection
|
249
352
|
all_prompts.append(
|
@@ -410,12 +513,13 @@ class InteractivePrompt:
|
|
410
513
|
arg_values[arg_name] = arg_value
|
411
514
|
|
412
515
|
# Apply the prompt
|
516
|
+
namespaced_name = selected_prompt["namespaced_name"]
|
413
517
|
rich_print(
|
414
|
-
f"\n[bold]Applying prompt [cyan]{
|
518
|
+
f"\n[bold]Applying prompt [cyan]{namespaced_name}[/cyan]...[/bold]"
|
415
519
|
)
|
416
520
|
|
417
521
|
# Call apply_prompt function with the prompt name and arguments
|
418
|
-
await apply_prompt_func(
|
522
|
+
await apply_prompt_func(namespaced_name, arg_values, agent_name)
|
419
523
|
|
420
524
|
except Exception as e:
|
421
525
|
import traceback
|
mcp_agent/llm/augmented_llm.py
CHANGED
@@ -32,6 +32,7 @@ from mcp_agent.llm.sampling_format_converter import (
|
|
32
32
|
ProviderFormatConverter,
|
33
33
|
)
|
34
34
|
from mcp_agent.logging.logger import get_logger
|
35
|
+
from mcp_agent.mcp.helpers.content_helpers import get_text
|
35
36
|
from mcp_agent.mcp.interfaces import (
|
36
37
|
AugmentedLLMProtocol,
|
37
38
|
ModelT,
|
@@ -147,8 +148,11 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
147
148
|
"""Apply the prompt and return the result as a Pydantic model, or None if coercion fails"""
|
148
149
|
try:
|
149
150
|
result: PromptMessageMultipart = await self.generate(prompt, request_params)
|
150
|
-
|
151
|
+
final_generation = get_text(result.content[-1]) or ""
|
152
|
+
await self.show_assistant_message(final_generation)
|
153
|
+
json_data = from_json(final_generation, allow_partial=True)
|
151
154
|
validated_model = model.model_validate(json_data)
|
155
|
+
|
152
156
|
return cast("ModelT", validated_model), Prompt.assistant(json_data)
|
153
157
|
except Exception as e:
|
154
158
|
logger = get_logger(__name__)
|
mcp_agent/llm/model_factory.py
CHANGED
@@ -11,6 +11,7 @@ from mcp_agent.llm.providers.augmented_llm_anthropic import AnthropicAugmentedLL
|
|
11
11
|
from mcp_agent.llm.providers.augmented_llm_deepseek import DeepSeekAugmentedLLM
|
12
12
|
from mcp_agent.llm.providers.augmented_llm_generic import GenericAugmentedLLM
|
13
13
|
from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
|
14
|
+
from mcp_agent.llm.providers.augmented_llm_openrouter import OpenRouterAugmentedLLM
|
14
15
|
from mcp_agent.mcp.interfaces import AugmentedLLMProtocol
|
15
16
|
|
16
17
|
# from mcp_agent.workflows.llm.augmented_llm_deepseek import DeekSeekAugmentedLLM
|
@@ -23,6 +24,7 @@ LLMClass = Union[
|
|
23
24
|
Type[PassthroughLLM],
|
24
25
|
Type[PlaybackLLM],
|
25
26
|
Type[DeepSeekAugmentedLLM],
|
27
|
+
Type[OpenRouterAugmentedLLM],
|
26
28
|
]
|
27
29
|
|
28
30
|
|
@@ -34,6 +36,7 @@ class Provider(Enum):
|
|
34
36
|
FAST_AGENT = auto()
|
35
37
|
DEEPSEEK = auto()
|
36
38
|
GENERIC = auto()
|
39
|
+
OPENROUTER = auto()
|
37
40
|
|
38
41
|
|
39
42
|
class ReasoningEffort(Enum):
|
@@ -63,6 +66,7 @@ class ModelFactory:
|
|
63
66
|
"fast-agent": Provider.FAST_AGENT,
|
64
67
|
"deepseek": Provider.DEEPSEEK,
|
65
68
|
"generic": Provider.GENERIC,
|
69
|
+
"openrouter": Provider.OPENROUTER,
|
66
70
|
}
|
67
71
|
|
68
72
|
# Mapping of effort strings to enum values
|
@@ -120,6 +124,7 @@ class ModelFactory:
|
|
120
124
|
Provider.FAST_AGENT: PassthroughLLM,
|
121
125
|
Provider.DEEPSEEK: DeepSeekAugmentedLLM,
|
122
126
|
Provider.GENERIC: GenericAugmentedLLM,
|
127
|
+
Provider.OPENROUTER: OpenRouterAugmentedLLM,
|
123
128
|
}
|
124
129
|
|
125
130
|
# Mapping of special model names to their specific LLM classes
|
@@ -1,5 +1,7 @@
|
|
1
1
|
import os
|
2
|
-
from typing import TYPE_CHECKING, List
|
2
|
+
from typing import TYPE_CHECKING, List
|
3
|
+
|
4
|
+
from mcp.types import EmbeddedResource, ImageContent, TextContent
|
3
5
|
|
4
6
|
from mcp_agent.core.prompt import Prompt
|
5
7
|
from mcp_agent.llm.providers.multipart_converter_anthropic import (
|
@@ -28,13 +30,11 @@ from mcp.types import (
|
|
28
30
|
CallToolRequest,
|
29
31
|
CallToolRequestParams,
|
30
32
|
)
|
31
|
-
from pydantic_core import from_json
|
32
33
|
from rich.text import Text
|
33
34
|
|
34
35
|
from mcp_agent.core.exceptions import ProviderKeyError
|
35
36
|
from mcp_agent.llm.augmented_llm import (
|
36
37
|
AugmentedLLM,
|
37
|
-
ModelT,
|
38
38
|
RequestParams,
|
39
39
|
)
|
40
40
|
from mcp_agent.logging.logger import get_logger
|
@@ -69,14 +69,15 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
69
69
|
use_history=True,
|
70
70
|
)
|
71
71
|
|
72
|
-
def _base_url(self) -> str:
|
72
|
+
def _base_url(self) -> str | None:
|
73
|
+
assert self.context.config
|
73
74
|
return self.context.config.anthropic.base_url if self.context.config.anthropic else None
|
74
75
|
|
75
76
|
async def generate_internal(
|
76
77
|
self,
|
77
78
|
message_param,
|
78
79
|
request_params: RequestParams | None = None,
|
79
|
-
):
|
80
|
+
) -> list[TextContent | ImageContent | EmbeddedResource]:
|
80
81
|
"""
|
81
82
|
Process a query using an LLM and available tools.
|
82
83
|
Override this method to use a different LLM.
|
@@ -113,7 +114,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
113
114
|
for tool in tool_list.tools
|
114
115
|
]
|
115
116
|
|
116
|
-
responses: List[
|
117
|
+
responses: List[TextContent | ImageContent | EmbeddedResource] = []
|
117
118
|
|
118
119
|
model = self.default_request_params.model
|
119
120
|
|
@@ -175,7 +176,8 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
175
176
|
|
176
177
|
response_as_message = self.convert_message_to_message_param(response)
|
177
178
|
messages.append(response_as_message)
|
178
|
-
|
179
|
+
if response.content[0].type == "text":
|
180
|
+
responses.append(TextContent(type="text", text=response.content[0].text))
|
179
181
|
|
180
182
|
if response.stop_reason == "end_turn":
|
181
183
|
message_text = ""
|
@@ -255,6 +257,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
255
257
|
|
256
258
|
# Add each result to our collection
|
257
259
|
tool_results.append((tool_use_id, result))
|
260
|
+
responses.extend(result.content)
|
258
261
|
|
259
262
|
messages.append(AnthropicConverter.create_tool_results_message(tool_results))
|
260
263
|
|
@@ -295,41 +298,22 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
295
298
|
|
296
299
|
return api_key
|
297
300
|
|
298
|
-
async def
|
301
|
+
async def generate_messages(
|
299
302
|
self,
|
300
303
|
message_param,
|
301
304
|
request_params: RequestParams | None = None,
|
302
|
-
) ->
|
305
|
+
) -> PromptMessageMultipart:
|
303
306
|
"""
|
304
307
|
Process a query using an LLM and available tools.
|
305
308
|
The default implementation uses Claude as the LLM.
|
306
309
|
Override this method to use a different LLM.
|
307
310
|
|
308
311
|
"""
|
309
|
-
|
310
|
-
responses: List[Message] = await self.generate_internal(
|
312
|
+
res = await self.generate_internal(
|
311
313
|
message_param=message_param,
|
312
314
|
request_params=request_params,
|
313
315
|
)
|
314
|
-
|
315
|
-
final_text: List[str] = []
|
316
|
-
|
317
|
-
# Process all responses and collect all text content
|
318
|
-
for response in responses:
|
319
|
-
# Extract text content from each message
|
320
|
-
message_text = ""
|
321
|
-
for content in response.content:
|
322
|
-
if content.type == "text":
|
323
|
-
# Extract text from text blocks
|
324
|
-
message_text += content.text
|
325
|
-
|
326
|
-
# Only append non-empty text
|
327
|
-
if message_text:
|
328
|
-
final_text.append(message_text)
|
329
|
-
|
330
|
-
# TODO -- make tool detail inclusion behaviour configurable
|
331
|
-
# Join all collected text
|
332
|
-
return "\n".join(final_text)
|
316
|
+
return Prompt.assistant(*res)
|
333
317
|
|
334
318
|
async def _apply_prompt_provider_specific(
|
335
319
|
self,
|
@@ -352,30 +336,12 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
|
|
352
336
|
if last_message.role == "user":
|
353
337
|
self.logger.debug("Last message in prompt is from user, generating assistant response")
|
354
338
|
message_param = AnthropicConverter.convert_to_anthropic(last_message)
|
355
|
-
return
|
339
|
+
return await self.generate_messages(message_param, request_params)
|
356
340
|
else:
|
357
341
|
# For assistant messages: Return the last message content as text
|
358
342
|
self.logger.debug("Last message in prompt is from assistant, returning it directly")
|
359
343
|
return last_message
|
360
344
|
|
361
|
-
async def generate_structured(
|
362
|
-
self,
|
363
|
-
message: str,
|
364
|
-
response_model: Type[ModelT],
|
365
|
-
request_params: RequestParams | None = None,
|
366
|
-
) -> ModelT:
|
367
|
-
# TODO -- simiar to the OAI version, we should create a tool call for the expected schema
|
368
|
-
response = await self.generate_str(
|
369
|
-
message=message,
|
370
|
-
request_params=request_params,
|
371
|
-
)
|
372
|
-
# Don't try to parse if we got no response
|
373
|
-
if not response:
|
374
|
-
self.logger.error("No response from generate_str")
|
375
|
-
return None
|
376
|
-
|
377
|
-
return response_model.model_validate(from_json(response, allow_partial=True))
|
378
|
-
|
379
345
|
@classmethod
|
380
346
|
def convert_message_to_message_param(cls, message: Message, **kwargs) -> MessageParam:
|
381
347
|
"""Convert a response object to an input parameter object to allow LLM calls to be chained."""
|
@@ -5,6 +5,9 @@ from mcp.types import (
|
|
5
5
|
CallToolRequest,
|
6
6
|
CallToolRequestParams,
|
7
7
|
CallToolResult,
|
8
|
+
EmbeddedResource,
|
9
|
+
ImageContent,
|
10
|
+
TextContent,
|
8
11
|
)
|
9
12
|
from openai import AuthenticationError, OpenAI
|
10
13
|
|
@@ -115,7 +118,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
115
118
|
self,
|
116
119
|
message,
|
117
120
|
request_params: RequestParams | None = None,
|
118
|
-
) -> List[
|
121
|
+
) -> List[TextContent | ImageContent | EmbeddedResource]:
|
119
122
|
"""
|
120
123
|
Process a query using an LLM and available tools.
|
121
124
|
The default implementation uses OpenAI's ChatCompletion as the LLM.
|
@@ -164,7 +167,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
164
167
|
if not available_tools:
|
165
168
|
available_tools = None # deepseek does not allow empty array
|
166
169
|
|
167
|
-
responses: List[
|
170
|
+
responses: List[TextContent | ImageContent | EmbeddedResource] = []
|
168
171
|
model = self.default_request_params.model
|
169
172
|
|
170
173
|
# we do NOT send stop sequences as this causes errors with mutlimodal processing
|
@@ -218,7 +221,9 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
218
221
|
|
219
222
|
choice = response.choices[0]
|
220
223
|
message = choice.message
|
221
|
-
|
224
|
+
# prep for image/audio gen models
|
225
|
+
if message.content:
|
226
|
+
responses.append(TextContent(type="text", text=message.content))
|
222
227
|
|
223
228
|
converted_message = self.convert_message_to_message_param(message, name=self.name)
|
224
229
|
messages.append(converted_message)
|
@@ -258,7 +263,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
258
263
|
self.show_oai_tool_result(str(result))
|
259
264
|
|
260
265
|
tool_results.append((tool_call.id, result))
|
261
|
-
|
266
|
+
responses.extend(result.content)
|
262
267
|
messages.extend(OpenAIConverter.convert_function_results_to_openai(tool_results))
|
263
268
|
|
264
269
|
self.logger.debug(
|
@@ -310,39 +315,6 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
310
315
|
|
311
316
|
return responses
|
312
317
|
|
313
|
-
async def generate_str(
|
314
|
-
self,
|
315
|
-
message,
|
316
|
-
request_params: RequestParams | None = None,
|
317
|
-
) -> str:
|
318
|
-
"""
|
319
|
-
Process a query using an LLM and available tools.
|
320
|
-
The default implementation uses OpenAI's ChatCompletion as the LLM.
|
321
|
-
Override this method to use a different LLM.
|
322
|
-
|
323
|
-
Special commands:
|
324
|
-
- "***SAVE_HISTORY <filename.md>" - Saves the conversation history to the specified file
|
325
|
-
in MCP prompt format with user/assistant delimiters.
|
326
|
-
"""
|
327
|
-
|
328
|
-
responses = await self.generate_internal(
|
329
|
-
message=message,
|
330
|
-
request_params=request_params,
|
331
|
-
)
|
332
|
-
|
333
|
-
final_text: List[str] = []
|
334
|
-
|
335
|
-
for response in responses:
|
336
|
-
content = response.content
|
337
|
-
if not content:
|
338
|
-
continue
|
339
|
-
|
340
|
-
if isinstance(content, str):
|
341
|
-
final_text.append(content)
|
342
|
-
continue
|
343
|
-
|
344
|
-
return "\n".join(final_text)
|
345
|
-
|
346
318
|
async def _apply_prompt_provider_specific(
|
347
319
|
self,
|
348
320
|
multipart_messages: List["PromptMessageMultipart"],
|
@@ -366,7 +338,13 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
366
338
|
# For user messages: Generate response to the last one
|
367
339
|
self.logger.debug("Last message in prompt is from user, generating assistant response")
|
368
340
|
message_param = OpenAIConverter.convert_to_openai(last_message)
|
369
|
-
|
341
|
+
responses: List[
|
342
|
+
TextContent | ImageContent | EmbeddedResource
|
343
|
+
] = await self.generate_internal(
|
344
|
+
message_param,
|
345
|
+
request_params,
|
346
|
+
)
|
347
|
+
return Prompt.assistant(*responses)
|
370
348
|
else:
|
371
349
|
# For assistant messages: Return the last message content as text
|
372
350
|
self.logger.debug("Last message in prompt is from assistant, returning it directly")
|
@@ -411,7 +389,8 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
411
389
|
role="system", content=self.instruction
|
412
390
|
)
|
413
391
|
messages.insert(0, system_msg)
|
414
|
-
|
392
|
+
model_name = self.default_request_params.model
|
393
|
+
self.show_user_message(prompt[-1].first_text(), model_name, self.chat_turn())
|
415
394
|
# Use the beta parse feature
|
416
395
|
try:
|
417
396
|
openai_client = OpenAI(api_key=self._api_key(), base_url=self._base_url())
|
@@ -429,8 +408,8 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
429
408
|
|
430
409
|
if response and isinstance(response[0], BaseException):
|
431
410
|
raise response[0]
|
432
|
-
|
433
411
|
parsed_result = response[0].choices[0].message
|
412
|
+
await self.show_assistant_message(parsed_result.content)
|
434
413
|
logger.debug("Successfully used OpenAI beta parse feature for structured output")
|
435
414
|
return parsed_result.parsed, Prompt.assistant(parsed_result.content)
|
436
415
|
|
@@ -0,0 +1,78 @@
|
|
1
|
+
import os
|
2
|
+
|
3
|
+
from mcp_agent.core.exceptions import ProviderKeyError
|
4
|
+
from mcp_agent.core.request_params import RequestParams
|
5
|
+
from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
|
6
|
+
|
7
|
+
OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1"
|
8
|
+
# No single default model for OpenRouter, users must specify full path
|
9
|
+
DEFAULT_OPENROUTER_MODEL = None
|
10
|
+
|
11
|
+
|
12
|
+
class OpenRouterAugmentedLLM(OpenAIAugmentedLLM):
|
13
|
+
"""Augmented LLM provider for OpenRouter, using an OpenAI-compatible API."""
|
14
|
+
def __init__(self, *args, **kwargs) -> None:
|
15
|
+
kwargs["provider_name"] = "OpenRouter" # Set provider name
|
16
|
+
super().__init__(*args, **kwargs)
|
17
|
+
|
18
|
+
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
19
|
+
"""Initialize OpenRouter-specific default parameters."""
|
20
|
+
# OpenRouter model names include the provider, e.g., "google/gemini-flash-1.5"
|
21
|
+
# The model should be passed in the 'model' kwarg during factory creation.
|
22
|
+
chosen_model = kwargs.get("model", DEFAULT_OPENROUTER_MODEL)
|
23
|
+
if not chosen_model:
|
24
|
+
# Unlike Deepseek, OpenRouter *requires* a model path in the identifier.
|
25
|
+
# The factory should extract this before calling the constructor.
|
26
|
+
# We rely on the model being passed correctly via kwargs.
|
27
|
+
# If it's still None here, it indicates an issue upstream (factory or user input).
|
28
|
+
# However, the base class _get_model handles the error if model is None.
|
29
|
+
pass
|
30
|
+
|
31
|
+
|
32
|
+
return RequestParams(
|
33
|
+
model=chosen_model, # Will be validated by base class
|
34
|
+
systemPrompt=self.instruction,
|
35
|
+
parallel_tool_calls=True, # Default based on OpenAI provider
|
36
|
+
max_iterations=10, # Default based on OpenAI provider
|
37
|
+
use_history=True, # Default based on OpenAI provider
|
38
|
+
)
|
39
|
+
|
40
|
+
def _api_key(self) -> str:
|
41
|
+
"""Retrieve the OpenRouter API key from config or environment variables."""
|
42
|
+
config = self.context.config
|
43
|
+
api_key = None
|
44
|
+
|
45
|
+
# Check config file first
|
46
|
+
if config and hasattr(config, 'openrouter') and config.openrouter:
|
47
|
+
api_key = getattr(config.openrouter, 'api_key', None)
|
48
|
+
if api_key == "<your-openrouter-api-key-here>" or not api_key:
|
49
|
+
api_key = None
|
50
|
+
|
51
|
+
# Fallback to environment variable
|
52
|
+
if api_key is None:
|
53
|
+
api_key = os.getenv("OPENROUTER_API_KEY")
|
54
|
+
|
55
|
+
if not api_key:
|
56
|
+
raise ProviderKeyError(
|
57
|
+
"OpenRouter API key not configured",
|
58
|
+
"The OpenRouter API key is required but not set.\n"
|
59
|
+
"Add it to your configuration file under openrouter.api_key\n"
|
60
|
+
"Or set the OPENROUTER_API_KEY environment variable.",
|
61
|
+
)
|
62
|
+
return api_key
|
63
|
+
|
64
|
+
def _base_url(self) -> str:
|
65
|
+
"""Retrieve the OpenRouter base URL from config or use the default."""
|
66
|
+
base_url = OPENROUTER_BASE_URL # Default
|
67
|
+
config = self.context.config
|
68
|
+
|
69
|
+
# Check config file for override
|
70
|
+
if config and hasattr(config, 'openrouter') and config.openrouter:
|
71
|
+
config_base_url = getattr(config.openrouter, 'base_url', None)
|
72
|
+
if config_base_url:
|
73
|
+
base_url = config_base_url
|
74
|
+
|
75
|
+
return base_url
|
76
|
+
|
77
|
+
# Other methods like _get_model, _send_request etc., are inherited from OpenAIAugmentedLLM
|
78
|
+
# We may override them later if OpenRouter deviates significantly or offers unique features.
|
mcp_agent/mcp/logger_textio.py
CHANGED
@@ -3,7 +3,7 @@ Utilities for MCP stdio client integration with our logging system.
|
|
3
3
|
"""
|
4
4
|
|
5
5
|
import io
|
6
|
-
import
|
6
|
+
import os
|
7
7
|
from typing import TextIO
|
8
8
|
|
9
9
|
from mcp_agent.logging.logger import get_logger
|
@@ -78,10 +78,21 @@ class LoggerTextIO(TextIO):
|
|
78
78
|
|
79
79
|
def fileno(self) -> int:
|
80
80
|
"""
|
81
|
-
Return a file descriptor for
|
82
|
-
|
81
|
+
Return a file descriptor for /dev/null.
|
82
|
+
This prevents output from showing on the terminal
|
83
|
+
while still allowing our write() method to capture it for logging.
|
83
84
|
"""
|
84
|
-
|
85
|
+
if not hasattr(self, '_devnull_fd'):
|
86
|
+
self._devnull_fd = os.open(os.devnull, os.O_WRONLY)
|
87
|
+
return self._devnull_fd
|
88
|
+
|
89
|
+
def __del__(self):
|
90
|
+
"""Clean up the devnull file descriptor."""
|
91
|
+
if hasattr(self, '_devnull_fd'):
|
92
|
+
try:
|
93
|
+
os.close(self._devnull_fd)
|
94
|
+
except (OSError, AttributeError):
|
95
|
+
pass
|
85
96
|
|
86
97
|
|
87
98
|
def get_stderr_handler(server_name: str) -> TextIO:
|
@@ -3,6 +3,7 @@ Manages the lifecycle of multiple MCP server connections.
|
|
3
3
|
"""
|
4
4
|
|
5
5
|
import asyncio
|
6
|
+
import traceback
|
6
7
|
from datetime import timedelta
|
7
8
|
from typing import (
|
8
9
|
TYPE_CHECKING,
|
@@ -179,7 +180,7 @@ async def _server_lifecycle_task(server_conn: ServerConnection) -> None:
|
|
179
180
|
},
|
180
181
|
)
|
181
182
|
server_conn._error_occurred = True
|
182
|
-
server_conn._error_message =
|
183
|
+
server_conn._error_message = traceback.format_exception(exc)
|
183
184
|
# If there's an error, we should also set the event so that
|
184
185
|
# 'get_server' won't hang
|
185
186
|
server_conn._initialized_event.set()
|
@@ -270,7 +271,7 @@ class MCPConnectionManager(ContextDependent):
|
|
270
271
|
logger.debug(f"{server_name}: Creating stdio client with custom error handler")
|
271
272
|
return stdio_client(server_params, errlog=error_handler)
|
272
273
|
elif config.transport == "sse":
|
273
|
-
return sse_client(config.url)
|
274
|
+
return sse_client(config.url, config.headers)
|
274
275
|
else:
|
275
276
|
raise ValueError(f"Unsupported transport: {config.transport}")
|
276
277
|
|
@@ -328,7 +329,8 @@ class MCPConnectionManager(ContextDependent):
|
|
328
329
|
if not server_conn.is_healthy():
|
329
330
|
error_msg = server_conn._error_message or "Unknown error"
|
330
331
|
raise ServerInitializationError(
|
331
|
-
f"MCP Server: '{server_name}': Failed to initialize
|
332
|
+
f"MCP Server: '{server_name}': Failed to initialize - see details. Check fastagent.config.yaml?",
|
333
|
+
error_msg,
|
332
334
|
)
|
333
335
|
|
334
336
|
return server_conn
|
mcp_agent/mcp_server_registry.py
CHANGED
@@ -152,7 +152,7 @@ class ServerRegistry:
|
|
152
152
|
raise ValueError(f"URL is required for SSE transport: {server_name}")
|
153
153
|
|
154
154
|
# Use sse_client to get the read and write streams
|
155
|
-
async with sse_client(config.url) as (read_stream, write_stream):
|
155
|
+
async with sse_client(config.url, config.headers) as (read_stream, write_stream):
|
156
156
|
session = client_session_factory(
|
157
157
|
read_stream,
|
158
158
|
write_stream,
|
@@ -260,6 +260,7 @@ class ServerRegistry:
|
|
260
260
|
Returns:
|
261
261
|
MCPServerSettings: The server configuration.
|
262
262
|
"""
|
263
|
+
|
263
264
|
server_config = self.registry.get(server_name)
|
264
265
|
if server_config is None:
|
265
266
|
logger.warning(f"Server '{server_name}' not found in registry.")
|
mcp_agent/mcp/mcp_activity.py
DELETED
@@ -1,18 +0,0 @@
|
|
1
|
-
# import functools
|
2
|
-
# from temporalio import activity
|
3
|
-
# from typing import Dict, Any, List, Callable, Awaitable
|
4
|
-
# from .gen_client import gen_client
|
5
|
-
|
6
|
-
|
7
|
-
# def mcp_activity(server_name: str, mcp_call: Callable):
|
8
|
-
# def decorator(func):
|
9
|
-
# @activity.defn
|
10
|
-
# @functools.wraps(func)
|
11
|
-
# async def wrapper(*activity_args, **activity_kwargs):
|
12
|
-
# params = await func(*activity_args, **activity_kwargs)
|
13
|
-
# async with gen_client(server_name) as client:
|
14
|
-
# return await mcp_call(client, params)
|
15
|
-
|
16
|
-
# return wrapper
|
17
|
-
|
18
|
-
# return decorator
|
File without changes
|
File without changes
|
File without changes
|