fast-agent-mcp 0.2.5__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fast-agent-mcp
3
- Version: 0.2.5
3
+ Version: 0.2.7
4
4
  Summary: Define, Prompt and Test MCP enabled Agents and Workflows
5
5
  Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
6
6
  License: Apache License
@@ -260,8 +260,7 @@ The simple declarative syntax lets you concentrate on composing your Prompts and
260
260
 
261
261
  `fast-agent` is multi-modal, supporting Images and PDFs for both Anthropic and OpenAI endpoints via Prompts, Resources and MCP Tool Call results. The inclusion of passthrough and playback LLMs enable rapid development and test of Python glue-code for your applications.
262
262
 
263
- > [!TIP]
264
- > `fast-agent` is now MCP Native! Coming Soon - Full Documentation Site and Further MCP Examples.
263
+ > [!TIP] > `fast-agent` is now MCP Native! Coming Soon - Full Documentation Site and Further MCP Examples.
265
264
 
266
265
  ### Agent Application Development
267
266
 
@@ -525,7 +524,7 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
525
524
  servers=["filesystem"], # list of MCP Servers for the agent
526
525
  model="o3-mini.high", # specify a model for the agent
527
526
  use_history=True, # agent maintains chat history
528
- request_params={"temperature": 0.7}, # additional parameters for the LLM (or RequestParams())
527
+ request_params=RequestParams(temperature= 0.7)), # additional parameters for the LLM (or RequestParams())
529
528
  human_input=True, # agent can request human input
530
529
  )
531
530
  ```
@@ -1,40 +1,40 @@
1
1
  mcp_agent/__init__.py,sha256=-AIoeL4c9UAp_P4U0z-uIWTTmQWdihOis5nbQ5L_eao,1664
2
2
  mcp_agent/app.py,sha256=jBmzYM_o50g8vhlTgkkf5TGiBWNbXWViYnd0WANbpzo,10276
3
- mcp_agent/config.py,sha256=V6TZlKOUelv5N75fypWKFVvkY5YsgpoHrdiSsKxOiM0,11725
3
+ mcp_agent/config.py,sha256=ymz8WHTM08ENeiFLy-7-oYd4rQAO_V-C4OOVcZSa41s,11715
4
4
  mcp_agent/console.py,sha256=Gjf2QLFumwG1Lav__c07X_kZxxEUSkzV-1_-YbAwcwo,813
5
5
  mcp_agent/context.py,sha256=pp_F1Q1jgAxGrRccSZJutn1JUxYfVue-St3S8tUyptM,7903
6
6
  mcp_agent/context_dependent.py,sha256=QXfhw3RaQCKfscEEBRGuZ3sdMWqkgShz2jJ1ivGGX1I,1455
7
7
  mcp_agent/event_progress.py,sha256=25iz0yyg-O4glMmtijcYpDdUmtUIKsCmR_8A52GgeC4,2716
8
- mcp_agent/mcp_server_registry.py,sha256=r24xX4BYXj4BbWbU37uwuW9e1mFOYgpb258OMb21SaY,9928
8
+ mcp_agent/mcp_server_registry.py,sha256=w0sq-5o_AVVGfwUBo0c_Ekbyjd3Tjg9bzi2r8UZry7o,9945
9
9
  mcp_agent/progress_display.py,sha256=GeJU9VUt6qKsFVymG688hCMVCsAygG9ifiiEb5IcbN4,361
10
10
  mcp_agent/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
- mcp_agent/agents/agent.py,sha256=NKz8HTCdjIBDSJwl6EHU2NDnZcAtYiaYH3YnbVGrc3Q,3882
12
- mcp_agent/agents/base_agent.py,sha256=mhLgsS_pST1LeBZmnBo9ldEthcKV9LZdBSdt5PkafCk,23184
11
+ mcp_agent/agents/agent.py,sha256=Tn2YKw_ytx9b8jC-65WYQmrnD43kYiZsLa4sVHxn9d4,3854
12
+ mcp_agent/agents/base_agent.py,sha256=dzyy4tDHJfRC4Sp-JqXeDwykk5SH55k89dUXQROIdQ4,23488
13
13
  mcp_agent/agents/workflow/__init__.py,sha256=HloteEW6kalvgR0XewpiFAqaQlMPlPJYg5p3K33IUzI,25
14
- mcp_agent/agents/workflow/chain_agent.py,sha256=ff5ksaJiAm007MMl8QO4pBTTIgQLcf9GLZpRtYkfBJQ,6201
15
- mcp_agent/agents/workflow/evaluator_optimizer.py,sha256=ArM2CySsTY0gSPndox1DdjecRdNtWIj-Qm9ApUWkygw,13103
16
- mcp_agent/agents/workflow/orchestrator_agent.py,sha256=nf7_Rq3XOiLfLsZ7MHVKAMkhaWJIz5NkWe2xYFH3yqc,21409
14
+ mcp_agent/agents/workflow/chain_agent.py,sha256=efftXdHc5F-XY8jnz5npHbKHhqnzHh28WbU5yQ4yUn0,6105
15
+ mcp_agent/agents/workflow/evaluator_optimizer.py,sha256=VWdzVIy_qSiVsDJO22ta3RB3drkvBfXk9HxBYMpsC5U,13300
16
+ mcp_agent/agents/workflow/orchestrator_agent.py,sha256=30hFQyAmtjQTX6Li_zWWIHCpdNpfZkDo57YXXW5xIsI,21561
17
17
  mcp_agent/agents/workflow/orchestrator_models.py,sha256=5P_aXADVT4Et8qT4e1cb9RelmHX5dCRrzu8j8T41Kdg,7230
18
18
  mcp_agent/agents/workflow/orchestrator_prompts.py,sha256=EXKEI174sshkZyPPEnWbwwNafzSPuA39MXL7iqG9cWc,9106
19
- mcp_agent/agents/workflow/parallel_agent.py,sha256=denkFKrvZJZ4c3Cja9cx-EOUhd-mDO30UIU48WGMnEM,6901
20
- mcp_agent/agents/workflow/router_agent.py,sha256=iOIPhMP9-w-lho50obx1rvGphI_-7Hdr9E9ohlKmtJk,10484
19
+ mcp_agent/agents/workflow/parallel_agent.py,sha256=SgIXJx2X_MSlLOv6WXYRezwjDYjU9f95eKQzTm5Y_lk,7087
20
+ mcp_agent/agents/workflow/router_agent.py,sha256=c4MU55U6q1DRayP0sDoyxdlnKX-N0LPbRv-MFlwbwrY,11165
21
21
  mcp_agent/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
22
  mcp_agent/cli/__main__.py,sha256=AVZ7tQFhU_sDOGuUGJq8ujgKtcxsYJBJwHbVaaiRDlI,166
23
23
  mcp_agent/cli/main.py,sha256=PZdPJfsAJOm80vTu7j_XpMPhaDZOpqSe-ciU3YQsmA4,3149
24
24
  mcp_agent/cli/terminal.py,sha256=GRwD-RGW7saIz2IOWZn5vD6JjiArscELBThm1GTFkuI,1065
25
25
  mcp_agent/cli/commands/bootstrap.py,sha256=Pv3LQUQLK_5-8nbOQ6iibJI7awgD04P9xh6-VpU15pw,11571
26
26
  mcp_agent/cli/commands/config.py,sha256=jU2gl4d5YESrdUboh3u6mxf7CxVT-_DT_sK8Vuh3ajw,231
27
- mcp_agent/cli/commands/setup.py,sha256=iXsKrf31Szv4Umbk9JfR5as9HcivFJchhE1KKzHxyIo,6345
27
+ mcp_agent/cli/commands/setup.py,sha256=CsmfIvKFfOhU1bOkm1cTqNseQdn3qdlfXN4BALwQ3Ik,6345
28
28
  mcp_agent/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
- mcp_agent/core/agent_app.py,sha256=9c7V-gZKudl-6u2BB__aLEvC4iemJeWpyoI8fGpXsXk,10145
30
- mcp_agent/core/agent_types.py,sha256=Qyhvzy2CcD7wMaxavuMUOQnD_rg5LZ1RT3DwXVYaM1Q,1345
31
- mcp_agent/core/direct_decorators.py,sha256=_kS0C9UbwRQ54z58vfUapFXXyJrm1XRVyLMuB7bL0b8,14471
32
- mcp_agent/core/direct_factory.py,sha256=oMozbr6kK37vWtAIvzCKtlX1i7KTez3hLdQfeXCWUjM,17580
29
+ mcp_agent/core/agent_app.py,sha256=5nQJNo8DocIRWiX4pVKAHUZF8s6HWpc-hJnfzl_1v1c,9697
30
+ mcp_agent/core/agent_types.py,sha256=LuWslu9YI6JRnAWwh_A1ZejK72-e839wH7tf2MHxSIU,1389
31
+ mcp_agent/core/direct_decorators.py,sha256=Q6t3VpRPLCRzqJycPZIkKbbEJMVocxdScp5o2xn4gLU,14460
32
+ mcp_agent/core/direct_factory.py,sha256=hYFCucZVAQ2wrfqIe9Qameoa-cCRaQ53R97EMHvUZAM,17572
33
33
  mcp_agent/core/enhanced_prompt.py,sha256=P9FAtc0rqIYQfUDkTNVXitFIZEtB3fdq_Nr0-st64Qg,17948
34
34
  mcp_agent/core/error_handling.py,sha256=xoyS2kLe0eG0bj2eSJCJ2odIhGUve2SbDR7jP-A-uRw,624
35
35
  mcp_agent/core/exceptions.py,sha256=ENAD_qGG67foxy6vDkIvc-lgopIUQy6O7zvNPpPXaQg,2289
36
- mcp_agent/core/fastagent.py,sha256=HTy1OCAhpHIM-4cd37-dxvb97eZUELN-ICAEFgqmJMk,18503
37
- mcp_agent/core/interactive_prompt.py,sha256=04yoeOX2JLatr2tuOFfnb84GMwFUIBnBC7y1M_gqOM8,17692
36
+ mcp_agent/core/fastagent.py,sha256=T2kyq32wBJCOj13Zy1G_XJjQZb1S4HVdx3OBzmEMHBg,18644
37
+ mcp_agent/core/interactive_prompt.py,sha256=zU53h8mmaJBnddYy2j57tH7jreQ9PUz7vLEo2gdDrio,17704
38
38
  mcp_agent/core/mcp_content.py,sha256=2D7KHY9mG_vxoDwFLKvsPQV9VRIzHItM7V-jcEnACh8,8878
39
39
  mcp_agent/core/prompt.py,sha256=qnintOUGEoDPYLI9bu9G2OlgVMCe5ZPUZilgMzydXhc,7919
40
40
  mcp_agent/core/request_params.py,sha256=bEjWo86fqxdiWm2U5nPDd1uCUpcIQO9oiCinhB8lQN0,1185
@@ -50,7 +50,7 @@ mcp_agent/human_input/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3h
50
50
  mcp_agent/human_input/handler.py,sha256=s712Z5ssTCwjL9-VKoIdP5CtgMh43YvepynYisiWTTA,3144
51
51
  mcp_agent/human_input/types.py,sha256=RtWBOVzy8vnYoQrc36jRLn8z8N3C4pDPMBN5vF6qM5Y,1476
52
52
  mcp_agent/llm/__init__.py,sha256=d8zgwG-bRFuwiMNMYkywg_qytk4P8lawyld_meuUmHI,68
53
- mcp_agent/llm/augmented_llm.py,sha256=K7sfUaJrFqupEbq7xi7hDjyAC4UMXQBYQxza5TFJ6VA,18117
53
+ mcp_agent/llm/augmented_llm.py,sha256=YIB3I_taoglo_vSmZLQ50cv1qCSctaQlWVwjI-7WTkk,18304
54
54
  mcp_agent/llm/augmented_llm_passthrough.py,sha256=U0LssNWNVuZRuD9I7Wuvpo7vdDW4xtoPLirnYCgBGTY,6128
55
55
  mcp_agent/llm/augmented_llm_playback.py,sha256=YVR2adzjMf9Q5WfYBytryWMRqJ87a3kNBnjxhApsMcU,3413
56
56
  mcp_agent/llm/memory.py,sha256=UakoBCJBf59JBtB6uyZM0OZjlxDW_VHtSfDs08ibVEc,3312
@@ -60,10 +60,10 @@ mcp_agent/llm/sampling_converter.py,sha256=C7wPBlmT0eD90XWabC22zkxsrVHKCrjwIwg6c
60
60
  mcp_agent/llm/sampling_format_converter.py,sha256=xGz4odHpOcP7--eFaJaFtUR8eR9jxZS7MnLH6J7n0EU,1263
61
61
  mcp_agent/llm/providers/__init__.py,sha256=heVxtmuqFJOnjjxHz4bWSqTAxXoN1E8twC_gQ_yJpHk,265
62
62
  mcp_agent/llm/providers/anthropic_utils.py,sha256=vYDN5G5jKMhD2CQg8veJYab7tvvzYkDMq8M1g_hUAQg,3275
63
- mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=9JXyweks5Joes4ERtmi2wX8i7ZsXydKM7IkMq7s7dIU,15429
63
+ mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=CNKpTEvWqjOteACUx_Vha0uFpPt32C17JrkSXg_allM,14445
64
64
  mcp_agent/llm/providers/augmented_llm_deepseek.py,sha256=SdYDqZZ9hM9sBvW1FSItNn_ENEKQXGNKwVHGnjqjyAA,1927
65
65
  mcp_agent/llm/providers/augmented_llm_generic.py,sha256=IIgwPYsVGwDdL2mMYsc5seY3pVFblMwmnxoI5dbxras,1524
66
- mcp_agent/llm/providers/augmented_llm_openai.py,sha256=6ZUEOXW-cDENAizMPUKJhhklJyQf73IcyVqT9-3To80,18215
66
+ mcp_agent/llm/providers/augmented_llm_openai.py,sha256=Wso9GVgsq8y3sqlOzTk_iQqrkCOL3LyuG07nA1PWDng,17913
67
67
  mcp_agent/llm/providers/multipart_converter_anthropic.py,sha256=t5lHYGfFUacJldnrVtMNW-8gEMoto8Y7hJkDrnyZR-Y,16650
68
68
  mcp_agent/llm/providers/multipart_converter_openai.py,sha256=zCj0LBgd9FDG8aL_GeTrPo2ssloYnmC_Uj3ENWVUJAg,16753
69
69
  mcp_agent/llm/providers/openai_multipart.py,sha256=qKBn7d3jSabnJmVgWweVzqh8q9mBqr09fsPmP92niAQ,6899
@@ -80,12 +80,11 @@ mcp_agent/logging/tracing.py,sha256=d5lSXakzzi5PtQpUkVkOnYaGX8NduGPq__S7vx-Ln8U,
80
80
  mcp_agent/logging/transport.py,sha256=m8YsLLu5T8eof_ndpLQs4gHOzqqEL98xsVwBwDsBfxI,17335
81
81
  mcp_agent/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
82
82
  mcp_agent/mcp/gen_client.py,sha256=fAVwFVCgSamw4PwoWOV4wrK9TABx1S_zZv8BctRyF2k,3030
83
- mcp_agent/mcp/interfaces.py,sha256=8fsQj8r7sMrIyrJHHbUIEP86SVLyXPpEI36KZq1abc0,6644
83
+ mcp_agent/mcp/interfaces.py,sha256=vma7bbWbY3zp1RM6hMYxVO4aV6Vfaygm-nLwzK2jFKI,6748
84
84
  mcp_agent/mcp/logger_textio.py,sha256=OpnqMam9Pu0oVzYQWFMhrX1dRg2f5Fqb3qqPA6QAATM,2778
85
- mcp_agent/mcp/mcp_activity.py,sha256=CajXCFWZ2cKEX9s4-HfNVAj471ePTVs4NOkvmIh65tE,592
86
85
  mcp_agent/mcp/mcp_agent_client_session.py,sha256=RMYNltc2pDIzxwEJSS5589RbvPO0KWV4Y3jSyAmhKf0,4181
87
86
  mcp_agent/mcp/mcp_aggregator.py,sha256=jaWbOvb3wioECohZ47CubyxfJ5QkfNSshu1hwhZksG4,40486
88
- mcp_agent/mcp/mcp_connection_manager.py,sha256=desQBreHbIcjY7AidcDO6pFomHOx9oOZPOWIcHAx1K0,13761
87
+ mcp_agent/mcp/mcp_connection_manager.py,sha256=AMIm2FBbIk7zHInb8X-kFSQFO5TKcoi9w8WU8nx8Ig0,13834
89
88
  mcp_agent/mcp/mime_utils.py,sha256=difepNR_gpb4MpMLkBRAoyhDk-AjXUHTiqKvT_VwS1o,1805
90
89
  mcp_agent/mcp/prompt_message_multipart.py,sha256=IpIndd75tAcCbJbfqjpAF0tOUUP1TQceDbWoxO5gvpo,3684
91
90
  mcp_agent/mcp/prompt_render.py,sha256=k3v4BZDThGE2gGiOYVQtA6x8WTEdOuXIEnRafANhN1U,2996
@@ -102,7 +101,7 @@ mcp_agent/mcp/prompts/prompt_load.py,sha256=VkcY6dD1jRCu-OB5AtSO8YwVATjEoYCkyAIG
102
101
  mcp_agent/mcp/prompts/prompt_server.py,sha256=tXtQd4EnH86MmdAvHlXm4oOS1dWLSCW5PvoA7uU1TvA,16493
103
102
  mcp_agent/mcp/prompts/prompt_template.py,sha256=EejiqGkau8OizORNyKTUwUjrPof5V-hH1H_MBQoQfXw,15732
104
103
  mcp_agent/mcp_server/__init__.py,sha256=zBU51ITHIEPScd9nRafnhEddsWqXRPAAvHhkrbRI2_4,155
105
- mcp_agent/mcp_server/agent_server.py,sha256=5jEHJcoKHKRhMAwn3HArqiqtiiLcFZvantABoEHSw8k,6470
104
+ mcp_agent/mcp_server/agent_server.py,sha256=LVZNML2_ysK7nVVLDou8pQuQWiEsMFZLryn_KihmkUQ,6431
106
105
  mcp_agent/resources/examples/data-analysis/analysis-campaign.py,sha256=QdNdo0-7LR4Uzw61hEU_jVKmWyk6A9YpGo81kMwVobM,7267
107
106
  mcp_agent/resources/examples/data-analysis/analysis.py,sha256=M9z8Q4YC5OGuqSa5uefYmmfmctqMn-WqCSfg5LI407o,2609
108
107
  mcp_agent/resources/examples/data-analysis/fastagent.config.yaml,sha256=ini94PHyJCfgpjcjHKMMbGuHs6LIj46F1NwY0ll5HVk,1609
@@ -134,8 +133,8 @@ mcp_agent/resources/examples/workflows/orchestrator.py,sha256=rOGilFTliWWnZ3Jx5w
134
133
  mcp_agent/resources/examples/workflows/parallel.py,sha256=n0dFN26QvYd2wjgohcaUBflac2SzXYx-bCyxMSousJE,1884
135
134
  mcp_agent/resources/examples/workflows/router.py,sha256=E4x_-c3l4YW9w1i4ARcDtkdeqIdbWEGfsMzwLYpdbVc,1677
136
135
  mcp_agent/ui/console_display.py,sha256=TVGDtJ37hc6UG0ei9g7ZPZZfFNeS1MYozt-Mx8HsPCk,9752
137
- fast_agent_mcp-0.2.5.dist-info/METADATA,sha256=oL4nXJVnNJOv5M5KccLqjAY7jZsygvnIY2-1ZdXb1SI,29839
138
- fast_agent_mcp-0.2.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
139
- fast_agent_mcp-0.2.5.dist-info/entry_points.txt,sha256=qPM7vwtN1_KmP3dXehxgiCxUBHtqP7yfenZigztvY-w,226
140
- fast_agent_mcp-0.2.5.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
141
- fast_agent_mcp-0.2.5.dist-info/RECORD,,
136
+ fast_agent_mcp-0.2.7.dist-info/METADATA,sha256=V7zvKqAqqHxiKazK3bv1CmwcWGKJEc7NhwGZ_NKG1V0,29849
137
+ fast_agent_mcp-0.2.7.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
138
+ fast_agent_mcp-0.2.7.dist-info/entry_points.txt,sha256=qPM7vwtN1_KmP3dXehxgiCxUBHtqP7yfenZigztvY-w,226
139
+ fast_agent_mcp-0.2.7.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
140
+ fast_agent_mcp-0.2.7.dist-info/RECORD,,
mcp_agent/agents/agent.py CHANGED
@@ -66,7 +66,7 @@ class Agent(BaseAgent):
66
66
  agent_name_str = str(self.name)
67
67
 
68
68
  # Create agent_types dictionary with just this agent
69
- agent_types = {agent_name_str: getattr(self.config, "agent_type", "Agent")}
69
+ agent_types = {agent_name_str: self.agent_type}
70
70
 
71
71
  # Create the interactive prompt
72
72
  prompt = InteractivePrompt(agent_types=agent_types)
@@ -215,7 +215,7 @@ class BaseAgent(MCPAggregator, AgentProtocol):
215
215
 
216
216
  # Use the LLM to generate a response
217
217
  response = await self.generate([prompt], None)
218
- return response.first_text()
218
+ return response.all_text()
219
219
 
220
220
  def _normalize_message_input(
221
221
  self, message: Union[str, PromptMessage, PromptMessageMultipart]
@@ -622,6 +622,18 @@ class BaseAgent(MCPAggregator, AgentProtocol):
622
622
  response = await self.generate(prompts, request_params)
623
623
  return response.first_text()
624
624
 
625
+ @property
626
+ def agent_type(self) -> str:
627
+ """
628
+ Return the type of this agent.
629
+
630
+ This is used for display purposes in the interactive prompt and other UI elements.
631
+
632
+ Returns:
633
+ String representing the agent type
634
+ """
635
+ return self.config.agent_type
636
+
625
637
  @property
626
638
  def message_history(self) -> List[PromptMessageMultipart]:
627
639
  """
@@ -9,8 +9,9 @@ from typing import Any, List, Optional, Tuple, Type
9
9
 
10
10
  from mcp.types import TextContent
11
11
 
12
- from mcp_agent.agents.agent import Agent, AgentConfig
12
+ from mcp_agent.agents.agent import Agent
13
13
  from mcp_agent.agents.base_agent import BaseAgent
14
+ from mcp_agent.core.agent_types import AgentConfig, AgentType
14
15
  from mcp_agent.core.prompt import Prompt
15
16
  from mcp_agent.core.request_params import RequestParams
16
17
  from mcp_agent.mcp.interfaces import ModelT
@@ -23,6 +24,12 @@ class ChainAgent(BaseAgent):
23
24
  Passes the output of each agent to the next agent in the chain.
24
25
  """
25
26
 
27
+ # TODO -- consider adding "repeat" mode
28
+ @property
29
+ def agent_type(self) -> str:
30
+ """Return the type of this agent."""
31
+ return AgentType.CHAIN.value
32
+
26
33
  def __init__(
27
34
  self,
28
35
  config: AgentConfig,
@@ -64,20 +71,11 @@ class ChainAgent(BaseAgent):
64
71
  # # Get the original user message (last message in the list)
65
72
  user_message = multipart_messages[-1] if multipart_messages else None
66
73
 
67
- # # If no user message, return an error
68
- # if not user_message:
69
- # return PromptMessageMultipart(
70
- # role="assistant",
71
- # content=[TextContent(type="text", text="No input message provided.")],
72
- # )
73
-
74
- # Initialize messages with the input
75
-
76
74
  if not self.cumulative:
77
75
  response: PromptMessageMultipart = await self.agents[0].generate(multipart_messages)
78
76
  # Process the rest of the agents in the chain
79
77
  for agent in self.agents[1:]:
80
- next_message = Prompt.user(response.content[0].text)
78
+ next_message = Prompt.user(*response.content)
81
79
  response = await agent.generate([next_message])
82
80
 
83
81
  return response
@@ -14,6 +14,7 @@ from pydantic import BaseModel, Field
14
14
 
15
15
  from mcp_agent.agents.agent import Agent
16
16
  from mcp_agent.agents.base_agent import BaseAgent
17
+ from mcp_agent.core.agent_types import AgentType
17
18
  from mcp_agent.core.exceptions import AgentConfigError
18
19
  from mcp_agent.core.prompt import Prompt
19
20
  from mcp_agent.core.request_params import RequestParams
@@ -63,6 +64,11 @@ class EvaluatorOptimizerAgent(BaseAgent):
63
64
  for refinement, continuing until a quality threshold is reached or a maximum
64
65
  number of refinement cycles is completed.
65
66
  """
67
+
68
+ @property
69
+ def agent_type(self) -> str:
70
+ """Return the type of this agent."""
71
+ return AgentType.EVALUATOR_OPTIMIZER.value
66
72
 
67
73
  def __init__(
68
74
  self,
@@ -27,7 +27,7 @@ from mcp_agent.agents.workflow.orchestrator_prompts import (
27
27
  SYNTHESIZE_PLAN_PROMPT_TEMPLATE,
28
28
  TASK_PROMPT_TEMPLATE,
29
29
  )
30
- from mcp_agent.core.agent_types import AgentConfig
30
+ from mcp_agent.core.agent_types import AgentConfig, AgentType
31
31
  from mcp_agent.core.exceptions import AgentConfigError
32
32
  from mcp_agent.core.prompt import Prompt
33
33
  from mcp_agent.core.request_params import RequestParams
@@ -46,6 +46,11 @@ class OrchestratorAgent(BaseAgent):
46
46
  to specialized worker agents, synthesizing their results into a cohesive output.
47
47
  Supports both full planning and iterative planning modes.
48
48
  """
49
+
50
+ @property
51
+ def agent_type(self) -> str:
52
+ """Return the type of this agent."""
53
+ return AgentType.ORCHESTRATOR.value
49
54
 
50
55
  def __init__(
51
56
  self,
@@ -3,8 +3,9 @@ from typing import Any, List, Optional, Tuple
3
3
 
4
4
  from mcp.types import TextContent
5
5
 
6
- from mcp_agent.agents.agent import Agent, AgentConfig
6
+ from mcp_agent.agents.agent import Agent
7
7
  from mcp_agent.agents.base_agent import BaseAgent
8
+ from mcp_agent.core.agent_types import AgentConfig, AgentType
8
9
  from mcp_agent.core.request_params import RequestParams
9
10
  from mcp_agent.mcp.interfaces import ModelT
10
11
  from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
@@ -17,6 +18,11 @@ class ParallelAgent(BaseAgent):
17
18
  This workflow performs both the fan-out and fan-in operations using LLMs.
18
19
  From the user's perspective, an input is specified and the output is returned.
19
20
  """
21
+
22
+ @property
23
+ def agent_type(self) -> str:
24
+ """Return the type of this agent."""
25
+ return AgentType.PARALLEL.value
20
26
 
21
27
  def __init__(
22
28
  self,
@@ -12,7 +12,7 @@ from pydantic import BaseModel
12
12
 
13
13
  from mcp_agent.agents.agent import Agent
14
14
  from mcp_agent.agents.base_agent import BaseAgent
15
- from mcp_agent.core.agent_types import AgentConfig
15
+ from mcp_agent.core.agent_types import AgentConfig, AgentType
16
16
  from mcp_agent.core.exceptions import AgentConfigError
17
17
  from mcp_agent.core.prompt import Prompt
18
18
  from mcp_agent.core.request_params import RequestParams
@@ -53,12 +53,29 @@ You are a highly accurate request router that directs incoming requests to the m
53
53
  Your task is to analyze the request and determine the most appropriate agent from the options above.
54
54
 
55
55
  <fastagent:instruction>
56
- Respond in JSON format. NEVER include Code Fences:
56
+ Respond with JSON following the schema below:
57
57
  {{
58
- "agent": "<agent name>",
59
- "confidence": "<high, medium or low>",
60
- "reasoning": "<brief explanation>"
58
+ "type": "object",
59
+ "required": ["agent", "confidence", "reasoning"],
60
+ "properties": {{
61
+ "agent": {{
62
+ "type": "string",
63
+ "description": "The exact name of the selected agent"
64
+ }},
65
+ "confidence": {{
66
+ "type": "string",
67
+ "enum": ["high", "medium", "low"],
68
+ "description": "Your confidence level in this selection"
69
+ }},
70
+ "reasoning": {{
71
+ "type": "string",
72
+ "description": "Brief explanation for your selection"
73
+ }}
74
+ }}
61
75
  }}
76
+
77
+ Supply only the JSON with no preamble. Use "reasoning" field to describe actions. NEVER EMIT CODE FENCES.
78
+
62
79
  </fastagent:instruction>
63
80
  """
64
81
 
@@ -88,6 +105,11 @@ class RouterAgent(BaseAgent):
88
105
  then dispatches the request to that agent and returns the response.
89
106
  """
90
107
 
108
+ @property
109
+ def agent_type(self) -> str:
110
+ """Return the type of this agent."""
111
+ return AgentType.ROUTER.value
112
+
91
113
  def __init__(
92
114
  self,
93
115
  config: AgentConfig,
@@ -73,7 +73,7 @@ mcp:
73
73
 
74
74
  GITIGNORE_TEMPLATE = """
75
75
  # FastAgent secrets file
76
- fastagent-secrets.yaml
76
+ fastagent.secrets.yaml
77
77
 
78
78
  # Python
79
79
  __pycache__/
@@ -215,7 +215,7 @@ def init(
215
215
  if "fastagent.secrets.yaml" in created:
216
216
  console.print("\n[yellow]Important:[/yellow] Remember to:")
217
217
  console.print(
218
- "1. Add your API keys to fastagent-secrets.yaml or set OPENAI_API_KEY and ANTHROPIC_API_KEY environment variables"
218
+ "1. Add your API keys to fastagent.secrets.yaml or set OPENAI_API_KEY and ANTHROPIC_API_KEY environment variables"
219
219
  )
220
220
  console.print(
221
221
  "2. Keep fastagent.secrets.yaml secure and never commit it to version control"
mcp_agent/config.py CHANGED
@@ -75,6 +75,9 @@ class MCPServerSettings(BaseModel):
75
75
  url: str | None = None
76
76
  """The URL for the server (e.g. for SSE transport)."""
77
77
 
78
+ headers: Dict[str, str] | None = None
79
+ """Headers dictionary for SSE connections"""
80
+
78
81
  auth: MCPServerAuthSettings | None = None
79
82
  """The authentication configuration for the server."""
80
83
 
@@ -84,9 +87,6 @@ class MCPServerSettings(BaseModel):
84
87
  env: Dict[str, str] | None = None
85
88
  """Environment variables to pass to the server process."""
86
89
 
87
- env: Dict[str, str] | None = None
88
- """Environment variables to pass to the server process."""
89
-
90
90
  sampling: MCPSamplingSettings | None = None
91
91
  """Sampling settings for this Client/Server pair"""
92
92
 
@@ -69,7 +69,7 @@ class AgentApp:
69
69
  if message:
70
70
  return await self._agent(agent_name).send(message)
71
71
 
72
- return await self.interactive(agent_name=agent_name, default_prompt=default_prompt)
72
+ return await self.interactive(agent=agent_name, default_prompt=default_prompt)
73
73
 
74
74
  async def send(
75
75
  self,
@@ -220,9 +220,9 @@ class AgentApp:
220
220
  """
221
221
  Deprecated - use interactive() instead.
222
222
  """
223
- return await self.interactive(agent_name=agent_name, default_prompt=default_prompt)
223
+ return await self.interactive(agent=agent_name, default_prompt=default_prompt)
224
224
 
225
- async def interactive(self, agent_name: str | None = None, default_prompt: str = "") -> str:
225
+ async def interactive(self, agent: str | None = None, default_prompt: str = "") -> str:
226
226
  """
227
227
  Interactive prompt for sending messages with advanced features.
228
228
 
@@ -235,11 +235,11 @@ class AgentApp:
235
235
  """
236
236
 
237
237
  # Get the default agent name if none specified
238
- if agent_name:
238
+ if agent:
239
239
  # Validate that this agent exists
240
- if agent_name not in self._agents:
241
- raise ValueError(f"Agent '{agent_name}' not found")
242
- target_name = agent_name
240
+ if agent not in self._agents:
241
+ raise ValueError(f"Agent '{agent}' not found")
242
+ target_name = agent
243
243
  else:
244
244
  # Use the first agent's name as default
245
245
  target_name = next(iter(self._agents.keys()))
@@ -248,18 +248,7 @@ class AgentApp:
248
248
  # The agent's prompt method doesn't fully support switching between agents
249
249
 
250
250
  # Create agent_types dictionary mapping agent names to their types
251
- agent_types = {}
252
- for name, agent in self._agents.items():
253
- # Determine agent type if possible
254
- agent_type = "Agent" # Default type
255
-
256
- # Try to get the type from the agent directly
257
- if hasattr(agent, "agent_type"):
258
- agent_type = agent.agent_type
259
- elif hasattr(agent, "config") and hasattr(agent.config, "agent_type"):
260
- agent_type = agent.config.agent_type
261
-
262
- agent_types[name] = agent_type
251
+ agent_types = {name: agent.agent_type for name, agent in self._agents.items()}
263
252
 
264
253
  # Create the interactive prompt
265
254
  prompt = InteractivePrompt(agent_types=agent_types)
@@ -33,6 +33,7 @@ class AgentConfig:
33
33
  use_history: bool = True
34
34
  default_request_params: RequestParams | None = None
35
35
  human_input: bool = False
36
+ agent_type: str = AgentType.BASIC.value
36
37
 
37
38
  def __post_init__(self) -> None:
38
39
  """Ensure default_request_params exists with proper history setting"""
@@ -269,6 +269,7 @@ def router(
269
269
  *,
270
270
  agents: List[str],
271
271
  instruction: Optional[str] = None,
272
+ servers: List[str] = [],
272
273
  model: Optional[str] = None,
273
274
  use_history: bool = False,
274
275
  request_params: RequestParams | None = None,
@@ -301,7 +302,7 @@ def router(
301
302
  AgentType.ROUTER,
302
303
  name=name,
303
304
  instruction=instruction or default_instruction,
304
- servers=[], # Routers don't connect to servers directly
305
+ servers=servers,
305
306
  model=model,
306
307
  use_history=use_history,
307
308
  request_params=request_params,
@@ -236,7 +236,7 @@ async def create_agents_by_type(
236
236
  config=config,
237
237
  context=app_instance.context,
238
238
  agents=router_agents,
239
- routing_instruction=agent_data.get("routing_instruction"),
239
+ routing_instruction=agent_data.get("instruction"),
240
240
  )
241
241
  await router.initialize()
242
242
 
@@ -235,6 +235,8 @@ class FastAgent:
235
235
  progress_display.stop()
236
236
 
237
237
  # Pre-flight validation
238
+ if 0 == len(self.agents):
239
+ raise AgentConfigError("No agents defined. Please define at least one agent.")
238
240
  validate_server_references(self.context, self.agents)
239
241
  validate_workflow_references(self.agents)
240
242
 
@@ -77,7 +77,7 @@ class InteractivePrompt:
77
77
  if agent not in available_agents:
78
78
  raise ValueError(f"No agent named '{agent}'")
79
79
 
80
- # Create agent_types dictionary if not provided
80
+ # Ensure we track available agents in a set for fast lookup
81
81
  available_agents_set = set(available_agents)
82
82
 
83
83
  result = ""
@@ -32,6 +32,7 @@ from mcp_agent.llm.sampling_format_converter import (
32
32
  ProviderFormatConverter,
33
33
  )
34
34
  from mcp_agent.logging.logger import get_logger
35
+ from mcp_agent.mcp.helpers.content_helpers import get_text
35
36
  from mcp_agent.mcp.interfaces import (
36
37
  AugmentedLLMProtocol,
37
38
  ModelT,
@@ -147,8 +148,11 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
147
148
  """Apply the prompt and return the result as a Pydantic model, or None if coercion fails"""
148
149
  try:
149
150
  result: PromptMessageMultipart = await self.generate(prompt, request_params)
150
- json_data = from_json(result.first_text(), allow_partial=True)
151
+ final_generation = get_text(result.content[-1]) or ""
152
+ await self.show_assistant_message(final_generation)
153
+ json_data = from_json(final_generation, allow_partial=True)
151
154
  validated_model = model.model_validate(json_data)
155
+
152
156
  return cast("ModelT", validated_model), Prompt.assistant(json_data)
153
157
  except Exception as e:
154
158
  logger = get_logger(__name__)
@@ -1,5 +1,7 @@
1
1
  import os
2
- from typing import TYPE_CHECKING, List, Type
2
+ from typing import TYPE_CHECKING, List
3
+
4
+ from mcp.types import EmbeddedResource, ImageContent, TextContent
3
5
 
4
6
  from mcp_agent.core.prompt import Prompt
5
7
  from mcp_agent.llm.providers.multipart_converter_anthropic import (
@@ -28,13 +30,11 @@ from mcp.types import (
28
30
  CallToolRequest,
29
31
  CallToolRequestParams,
30
32
  )
31
- from pydantic_core import from_json
32
33
  from rich.text import Text
33
34
 
34
35
  from mcp_agent.core.exceptions import ProviderKeyError
35
36
  from mcp_agent.llm.augmented_llm import (
36
37
  AugmentedLLM,
37
- ModelT,
38
38
  RequestParams,
39
39
  )
40
40
  from mcp_agent.logging.logger import get_logger
@@ -69,14 +69,15 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
69
69
  use_history=True,
70
70
  )
71
71
 
72
- def _base_url(self) -> str:
72
+ def _base_url(self) -> str | None:
73
+ assert self.context.config
73
74
  return self.context.config.anthropic.base_url if self.context.config.anthropic else None
74
75
 
75
76
  async def generate_internal(
76
77
  self,
77
78
  message_param,
78
79
  request_params: RequestParams | None = None,
79
- ):
80
+ ) -> list[TextContent | ImageContent | EmbeddedResource]:
80
81
  """
81
82
  Process a query using an LLM and available tools.
82
83
  Override this method to use a different LLM.
@@ -113,7 +114,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
113
114
  for tool in tool_list.tools
114
115
  ]
115
116
 
116
- responses: List[Message] = []
117
+ responses: List[TextContent | ImageContent | EmbeddedResource] = []
117
118
 
118
119
  model = self.default_request_params.model
119
120
 
@@ -175,7 +176,8 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
175
176
 
176
177
  response_as_message = self.convert_message_to_message_param(response)
177
178
  messages.append(response_as_message)
178
- responses.append(response)
179
+ if response.content[0].type == "text":
180
+ responses.append(TextContent(type="text", text=response.content[0].text))
179
181
 
180
182
  if response.stop_reason == "end_turn":
181
183
  message_text = ""
@@ -255,6 +257,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
255
257
 
256
258
  # Add each result to our collection
257
259
  tool_results.append((tool_use_id, result))
260
+ responses.extend(result.content)
258
261
 
259
262
  messages.append(AnthropicConverter.create_tool_results_message(tool_results))
260
263
 
@@ -295,41 +298,22 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
295
298
 
296
299
  return api_key
297
300
 
298
- async def generate_str(
301
+ async def generate_messages(
299
302
  self,
300
303
  message_param,
301
304
  request_params: RequestParams | None = None,
302
- ) -> str:
305
+ ) -> PromptMessageMultipart:
303
306
  """
304
307
  Process a query using an LLM and available tools.
305
308
  The default implementation uses Claude as the LLM.
306
309
  Override this method to use a different LLM.
307
310
 
308
311
  """
309
-
310
- responses: List[Message] = await self.generate_internal(
312
+ res = await self.generate_internal(
311
313
  message_param=message_param,
312
314
  request_params=request_params,
313
315
  )
314
-
315
- final_text: List[str] = []
316
-
317
- # Process all responses and collect all text content
318
- for response in responses:
319
- # Extract text content from each message
320
- message_text = ""
321
- for content in response.content:
322
- if content.type == "text":
323
- # Extract text from text blocks
324
- message_text += content.text
325
-
326
- # Only append non-empty text
327
- if message_text:
328
- final_text.append(message_text)
329
-
330
- # TODO -- make tool detail inclusion behaviour configurable
331
- # Join all collected text
332
- return "\n".join(final_text)
316
+ return Prompt.assistant(*res)
333
317
 
334
318
  async def _apply_prompt_provider_specific(
335
319
  self,
@@ -352,30 +336,12 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
352
336
  if last_message.role == "user":
353
337
  self.logger.debug("Last message in prompt is from user, generating assistant response")
354
338
  message_param = AnthropicConverter.convert_to_anthropic(last_message)
355
- return Prompt.assistant(await self.generate_str(message_param, request_params))
339
+ return await self.generate_messages(message_param, request_params)
356
340
  else:
357
341
  # For assistant messages: Return the last message content as text
358
342
  self.logger.debug("Last message in prompt is from assistant, returning it directly")
359
343
  return last_message
360
344
 
361
- async def generate_structured(
362
- self,
363
- message: str,
364
- response_model: Type[ModelT],
365
- request_params: RequestParams | None = None,
366
- ) -> ModelT:
367
- # TODO -- simiar to the OAI version, we should create a tool call for the expected schema
368
- response = await self.generate_str(
369
- message=message,
370
- request_params=request_params,
371
- )
372
- # Don't try to parse if we got no response
373
- if not response:
374
- self.logger.error("No response from generate_str")
375
- return None
376
-
377
- return response_model.model_validate(from_json(response, allow_partial=True))
378
-
379
345
  @classmethod
380
346
  def convert_message_to_message_param(cls, message: Message, **kwargs) -> MessageParam:
381
347
  """Convert a response object to an input parameter object to allow LLM calls to be chained."""
@@ -5,6 +5,9 @@ from mcp.types import (
5
5
  CallToolRequest,
6
6
  CallToolRequestParams,
7
7
  CallToolResult,
8
+ EmbeddedResource,
9
+ ImageContent,
10
+ TextContent,
8
11
  )
9
12
  from openai import AuthenticationError, OpenAI
10
13
 
@@ -115,7 +118,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
115
118
  self,
116
119
  message,
117
120
  request_params: RequestParams | None = None,
118
- ) -> List[ChatCompletionMessage]:
121
+ ) -> List[TextContent | ImageContent | EmbeddedResource]:
119
122
  """
120
123
  Process a query using an LLM and available tools.
121
124
  The default implementation uses OpenAI's ChatCompletion as the LLM.
@@ -164,7 +167,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
164
167
  if not available_tools:
165
168
  available_tools = None # deepseek does not allow empty array
166
169
 
167
- responses: List[ChatCompletionMessage] = []
170
+ responses: List[TextContent | ImageContent | EmbeddedResource] = []
168
171
  model = self.default_request_params.model
169
172
 
170
173
  # we do NOT send stop sequences as this causes errors with mutlimodal processing
@@ -218,7 +221,9 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
218
221
 
219
222
  choice = response.choices[0]
220
223
  message = choice.message
221
- responses.append(message)
224
+ # prep for image/audio gen models
225
+ if message.content:
226
+ responses.append(TextContent(type="text", text=message.content))
222
227
 
223
228
  converted_message = self.convert_message_to_message_param(message, name=self.name)
224
229
  messages.append(converted_message)
@@ -258,7 +263,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
258
263
  self.show_oai_tool_result(str(result))
259
264
 
260
265
  tool_results.append((tool_call.id, result))
261
-
266
+ responses.extend(result.content)
262
267
  messages.extend(OpenAIConverter.convert_function_results_to_openai(tool_results))
263
268
 
264
269
  self.logger.debug(
@@ -310,39 +315,6 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
310
315
 
311
316
  return responses
312
317
 
313
- async def generate_str(
314
- self,
315
- message,
316
- request_params: RequestParams | None = None,
317
- ) -> str:
318
- """
319
- Process a query using an LLM and available tools.
320
- The default implementation uses OpenAI's ChatCompletion as the LLM.
321
- Override this method to use a different LLM.
322
-
323
- Special commands:
324
- - "***SAVE_HISTORY <filename.md>" - Saves the conversation history to the specified file
325
- in MCP prompt format with user/assistant delimiters.
326
- """
327
-
328
- responses = await self.generate_internal(
329
- message=message,
330
- request_params=request_params,
331
- )
332
-
333
- final_text: List[str] = []
334
-
335
- for response in responses:
336
- content = response.content
337
- if not content:
338
- continue
339
-
340
- if isinstance(content, str):
341
- final_text.append(content)
342
- continue
343
-
344
- return "\n".join(final_text)
345
-
346
318
  async def _apply_prompt_provider_specific(
347
319
  self,
348
320
  multipart_messages: List["PromptMessageMultipart"],
@@ -366,7 +338,13 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
366
338
  # For user messages: Generate response to the last one
367
339
  self.logger.debug("Last message in prompt is from user, generating assistant response")
368
340
  message_param = OpenAIConverter.convert_to_openai(last_message)
369
- return Prompt.assistant(await self.generate_str(message_param, request_params))
341
+ responses: List[
342
+ TextContent | ImageContent | EmbeddedResource
343
+ ] = await self.generate_internal(
344
+ message_param,
345
+ request_params,
346
+ )
347
+ return Prompt.assistant(*responses)
370
348
  else:
371
349
  # For assistant messages: Return the last message content as text
372
350
  self.logger.debug("Last message in prompt is from assistant, returning it directly")
@@ -411,7 +389,8 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
411
389
  role="system", content=self.instruction
412
390
  )
413
391
  messages.insert(0, system_msg)
414
-
392
+ model_name = self.default_request_params.model
393
+ self.show_user_message(prompt[-1].first_text(), model_name, self.chat_turn())
415
394
  # Use the beta parse feature
416
395
  try:
417
396
  openai_client = OpenAI(api_key=self._api_key(), base_url=self._base_url())
@@ -429,8 +408,8 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
429
408
 
430
409
  if response and isinstance(response[0], BaseException):
431
410
  raise response[0]
432
-
433
411
  parsed_result = response[0].choices[0].message
412
+ await self.show_assistant_message(parsed_result.content)
434
413
  logger.debug("Successfully used OpenAI beta parse feature for structured output")
435
414
  return parsed_result.parsed, Prompt.assistant(parsed_result.content)
436
415
 
@@ -136,6 +136,11 @@ class AgentProtocol(AugmentedLLMProtocol, Protocol):
136
136
 
137
137
  name: str
138
138
 
139
+ @property
140
+ def agent_type(self) -> str:
141
+ """Return the type of this agent"""
142
+ ...
143
+
139
144
  async def __call__(self, message: Union[str, PromptMessage, PromptMessageMultipart]) -> str:
140
145
  """Make the agent callable for sending messages directly."""
141
146
  ...
@@ -3,6 +3,7 @@ Manages the lifecycle of multiple MCP server connections.
3
3
  """
4
4
 
5
5
  import asyncio
6
+ import traceback
6
7
  from datetime import timedelta
7
8
  from typing import (
8
9
  TYPE_CHECKING,
@@ -179,7 +180,7 @@ async def _server_lifecycle_task(server_conn: ServerConnection) -> None:
179
180
  },
180
181
  )
181
182
  server_conn._error_occurred = True
182
- server_conn._error_message = str(exc)
183
+ server_conn._error_message = traceback.format_exception(exc)
183
184
  # If there's an error, we should also set the event so that
184
185
  # 'get_server' won't hang
185
186
  server_conn._initialized_event.set()
@@ -270,7 +271,7 @@ class MCPConnectionManager(ContextDependent):
270
271
  logger.debug(f"{server_name}: Creating stdio client with custom error handler")
271
272
  return stdio_client(server_params, errlog=error_handler)
272
273
  elif config.transport == "sse":
273
- return sse_client(config.url)
274
+ return sse_client(config.url, config.headers)
274
275
  else:
275
276
  raise ValueError(f"Unsupported transport: {config.transport}")
276
277
 
@@ -328,7 +329,8 @@ class MCPConnectionManager(ContextDependent):
328
329
  if not server_conn.is_healthy():
329
330
  error_msg = server_conn._error_message or "Unknown error"
330
331
  raise ServerInitializationError(
331
- f"MCP Server: '{server_name}': Failed to initialize with error: '{error_msg}'. Check fastagent.config.yaml"
332
+ f"MCP Server: '{server_name}': Failed to initialize - see details. Check fastagent.config.yaml?",
333
+ error_msg,
332
334
  )
333
335
 
334
336
  return server_conn
@@ -38,7 +38,7 @@ class AgentMCPServer:
38
38
 
39
39
  # Basic send message tool
40
40
  @self.mcp_server.tool(
41
- name=f"{agent_name}.send",
41
+ name=f"{agent_name}_send",
42
42
  description=f"Send a message to the {agent_name} agent",
43
43
  )
44
44
  async def send_message(message: str, ctx: MCPContext) -> str:
@@ -58,23 +58,23 @@ class AgentMCPServer:
58
58
  return await execute_send()
59
59
 
60
60
  # Register a history prompt for this agent
61
- @self.mcp_server.prompt(name=f"{agent_name}.history", description=f"Conversation history for the {agent_name} agent")
61
+ @self.mcp_server.prompt(
62
+ name=f"{agent_name}_history",
63
+ description=f"Conversation history for the {agent_name} agent",
64
+ )
62
65
  async def get_history_prompt() -> list:
63
66
  """Return the conversation history as MCP messages."""
64
67
  # Get the conversation history from the agent's LLM
65
68
  if not hasattr(agent, "_llm") or agent._llm is None:
66
69
  return []
67
-
70
+
68
71
  # Convert the multipart message history to standard PromptMessages
69
72
  multipart_history = agent._llm.message_history
70
73
  prompt_messages = mcp_agent.core.prompt.Prompt.from_multipart(multipart_history)
71
-
74
+
72
75
  # In FastMCP, we need to return the raw list of messages
73
76
  # that matches the structure that FastMCP expects (list of dicts with role/content)
74
- return [
75
- {"role": msg.role, "content": msg.content}
76
- for msg in prompt_messages
77
- ]
77
+ return [{"role": msg.role, "content": msg.content} for msg in prompt_messages]
78
78
 
79
79
  def run(self, transport: str = "sse", host: str = "0.0.0.0", port: int = 8000) -> None:
80
80
  """Run the MCP server."""
@@ -152,7 +152,7 @@ class ServerRegistry:
152
152
  raise ValueError(f"URL is required for SSE transport: {server_name}")
153
153
 
154
154
  # Use sse_client to get the read and write streams
155
- async with sse_client(config.url) as (read_stream, write_stream):
155
+ async with sse_client(config.url, config.headers) as (read_stream, write_stream):
156
156
  session = client_session_factory(
157
157
  read_stream,
158
158
  write_stream,
@@ -260,6 +260,7 @@ class ServerRegistry:
260
260
  Returns:
261
261
  MCPServerSettings: The server configuration.
262
262
  """
263
+
263
264
  server_config = self.registry.get(server_name)
264
265
  if server_config is None:
265
266
  logger.warning(f"Server '{server_name}' not found in registry.")
@@ -1,18 +0,0 @@
1
- # import functools
2
- # from temporalio import activity
3
- # from typing import Dict, Any, List, Callable, Awaitable
4
- # from .gen_client import gen_client
5
-
6
-
7
- # def mcp_activity(server_name: str, mcp_call: Callable):
8
- # def decorator(func):
9
- # @activity.defn
10
- # @functools.wraps(func)
11
- # async def wrapper(*activity_args, **activity_kwargs):
12
- # params = await func(*activity_args, **activity_kwargs)
13
- # async with gen_client(server_name) as client:
14
- # return await mcp_call(client, params)
15
-
16
- # return wrapper
17
-
18
- # return decorator