fast-agent-mcp 0.2.18__py3-none-any.whl → 0.2.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fast-agent-mcp
3
- Version: 0.2.18
3
+ Version: 0.2.19
4
4
  Summary: Define, Prompt and Test MCP enabled Agents and Workflows
5
5
  Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
6
6
  License: Apache License
@@ -214,16 +214,16 @@ Requires-Dist: aiohttp>=3.11.13
214
214
  Requires-Dist: anthropic>=0.49.0
215
215
  Requires-Dist: fastapi>=0.115.6
216
216
  Requires-Dist: mcp==1.6.0
217
- Requires-Dist: numpy>=2.2.1
218
217
  Requires-Dist: openai>=1.63.2
219
218
  Requires-Dist: opentelemetry-distro>=0.50b0
220
219
  Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.29.0
220
+ Requires-Dist: opentelemetry-instrumentation-anthropic>=0.39.3
221
+ Requires-Dist: opentelemetry-instrumentation-openai>=0.39.3
221
222
  Requires-Dist: prompt-toolkit>=3.0.50
222
223
  Requires-Dist: pydantic-settings>=2.7.0
223
224
  Requires-Dist: pydantic>=2.10.4
224
225
  Requires-Dist: pyyaml>=6.0.2
225
226
  Requires-Dist: rich>=13.9.4
226
- Requires-Dist: scikit-learn>=1.6.0
227
227
  Requires-Dist: typer>=0.15.1
228
228
  Provides-Extra: dev
229
229
  Requires-Dist: anthropic>=0.42.0; extra == 'dev'
@@ -251,7 +251,7 @@ Description-Content-Type: text/markdown
251
251
  ## Overview
252
252
 
253
253
  > [!TIP]
254
- > Documentation site is in production here : https://fast-agent.ai. Feel free to feed back what's helpful and what's not. llms.txt link is here: https://fast-agent.ai/llms.txt
254
+ > Documentation site is in production here : https://fast-agent.ai. Feel free to feed back what's helpful and what's not. There is also an LLMs.txt [here](https://fast-agent.ai/llms.txt)
255
255
 
256
256
  **`fast-agent`** enables you to create and interact with sophisticated Agents and Workflows in minutes. It is the first framework with complete, end-to-end tested MCP Feature support including Sampling. Both Anthropic (Haiku, Sonnet, Opus) and OpenAI models (gpt-4o/gpt-4.1 family, o1/o3 family) are supported.
257
257
 
@@ -259,8 +259,8 @@ The simple declarative syntax lets you concentrate on composing your Prompts and
259
259
 
260
260
  `fast-agent` is multi-modal, supporting Images and PDFs for both Anthropic and OpenAI endpoints via Prompts, Resources and MCP Tool Call results. The inclusion of passthrough and playback LLMs enable rapid development and test of Python glue-code for your applications.
261
261
 
262
- > [!TIP]
263
- > `fast-agent` is now MCP Native! Coming Soon - Full Documentation Site and Further MCP Examples.
262
+ > [!IMPORTANT]
263
+ > `fast-agent` The fast-agent documentation repo is here: https://github.com/evalstate/fast-agent-docs. Please feel free to submit PRs for documentation, experience reports or other content you think others may find helpful. All help and feedback warmly received.
264
264
 
265
265
  ### Agent Application Development
266
266
 
@@ -450,10 +450,10 @@ If the Generator has `use_history` off, the previous iteration is returned when
450
450
 
451
451
  ```python
452
452
  @fast.evaluator_optimizer(
453
- name="researcher"
454
- generator="web_searcher"
455
- evaluator="quality_assurance"
456
- min_rating="EXCELLENT"
453
+ name="researcher",
454
+ generator="web_searcher",
455
+ evaluator="quality_assurance",
456
+ min_rating="EXCELLENT",
457
457
  max_refinements=3
458
458
  )
459
459
 
@@ -471,8 +471,8 @@ Routers use an LLM to assess a message, and route it to the most appropriate Age
471
471
 
472
472
  ```python
473
473
  @fast.router(
474
- name="route"
475
- agents["agent1","agent2","agent3"]
474
+ name="route",
475
+ agents=["agent1","agent2","agent3"]
476
476
  )
477
477
  ```
478
478
 
@@ -484,7 +484,7 @@ Given a complex task, the Orchestrator uses an LLM to generate a plan to divide
484
484
 
485
485
  ```python
486
486
  @fast.orchestrator(
487
- name="orchestrate"
487
+ name="orchestrate",
488
488
  agents=["task1","task2","task3"]
489
489
  )
490
490
  ```
@@ -524,7 +524,7 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
524
524
  servers=["filesystem"], # list of MCP Servers for the agent
525
525
  model="o3-mini.high", # specify a model for the agent
526
526
  use_history=True, # agent maintains chat history
527
- request_params=RequestParams(temperature= 0.7)), # additional parameters for the LLM (or RequestParams())
527
+ request_params=RequestParams(temperature= 0.7), # additional parameters for the LLM (or RequestParams())
528
528
  human_input=True, # agent can request human input
529
529
  )
530
530
  ```
@@ -536,7 +536,7 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
536
536
  name="chain", # name of the chain
537
537
  sequence=["agent1", "agent2", ...], # list of agents in execution order
538
538
  instruction="instruction", # instruction to describe the chain for other workflows
539
- cumulative=False # whether to accumulate messages through the chain
539
+ cumulative=False, # whether to accumulate messages through the chain
540
540
  continue_with_final=True, # open chat with agent at end of chain after prompting
541
541
  )
542
542
  ```
@@ -1,23 +1,23 @@
1
1
  mcp_agent/__init__.py,sha256=-AIoeL4c9UAp_P4U0z-uIWTTmQWdihOis5nbQ5L_eao,1664
2
2
  mcp_agent/app.py,sha256=WRsiUdwy_9IAnaGRDwuLm7pzgQpt2wgsg10vBOpfcwM,5539
3
- mcp_agent/config.py,sha256=eEknK8I7DCrh1iGqzva0TemHMzjlbfhFhgwULqaKjDs,12218
3
+ mcp_agent/config.py,sha256=4MB8QBwGb6MPPRc85p-xdCzRloGwpWsPfEgxsoS_4N0,12159
4
4
  mcp_agent/console.py,sha256=Gjf2QLFumwG1Lav__c07X_kZxxEUSkzV-1_-YbAwcwo,813
5
- mcp_agent/context.py,sha256=GEZQ64UCvLj6bRXGNm35Hxd5jxrI_P52vhC3opnWwsg,7246
5
+ mcp_agent/context.py,sha256=fHyDjeZpHYRBOCVY58hVcFQxybOXSJmuui2o51kLbuA,7307
6
6
  mcp_agent/context_dependent.py,sha256=QXfhw3RaQCKfscEEBRGuZ3sdMWqkgShz2jJ1ivGGX1I,1455
7
7
  mcp_agent/event_progress.py,sha256=3dqk5Pn1tAG_m_wn4IPNwLWLyzm7CyKIidqHN-4l-JY,2736
8
8
  mcp_agent/mcp_server_registry.py,sha256=pSD3euU-Oc2LAVenqkLU7UmutAzk6A9liYVLjCj4J70,10068
9
9
  mcp_agent/progress_display.py,sha256=GeJU9VUt6qKsFVymG688hCMVCsAygG9ifiiEb5IcbN4,361
10
10
  mcp_agent/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  mcp_agent/agents/agent.py,sha256=GgaUHoilgqzh9PQYr5k2WiPj4pagwicf9-ZLFsHkNNo,3848
12
- mcp_agent/agents/base_agent.py,sha256=c_jhC6tt6LdvS1IY4AVdMNj3n_OtluzHQ1-_NEOseEQ,25039
12
+ mcp_agent/agents/base_agent.py,sha256=fjDr01-hZ9sB3ghI4DlXYVePP0s5f9pmtLH-N3X8bRg,25294
13
13
  mcp_agent/agents/workflow/__init__.py,sha256=HloteEW6kalvgR0XewpiFAqaQlMPlPJYg5p3K33IUzI,25
14
14
  mcp_agent/agents/workflow/chain_agent.py,sha256=eIlImirrSXkqBJmPuAJgOKis81Cl6lZEGM0-6IyaUV8,6105
15
15
  mcp_agent/agents/workflow/evaluator_optimizer.py,sha256=ysUMGM2NzeCIutgr_vXH6kUPpZMw0cX4J_Wl1r8eT84,13296
16
16
  mcp_agent/agents/workflow/orchestrator_agent.py,sha256=byZe4bx7D_7BSZZ3hN8BNUWVFPYeqeUwDUCLTRC8mlI,21583
17
17
  mcp_agent/agents/workflow/orchestrator_models.py,sha256=5P_aXADVT4Et8qT4e1cb9RelmHX5dCRrzu8j8T41Kdg,7230
18
18
  mcp_agent/agents/workflow/orchestrator_prompts.py,sha256=EXKEI174sshkZyPPEnWbwwNafzSPuA39MXL7iqG9cWc,9106
19
- mcp_agent/agents/workflow/parallel_agent.py,sha256=GQTxAqwrPEdle-rPWMvoLOuzE_X69_HYEgYSm98eXdM,7087
20
- mcp_agent/agents/workflow/router_agent.py,sha256=NugAJcA1ooZ-TNLNh7H26xIFChZoryxofJ7fTkrw4cU,9128
19
+ mcp_agent/agents/workflow/parallel_agent.py,sha256=JaQFp35nmAdoBRLAwx8BfnK7kirVq9PMw24LQ3ZEzoc,7705
20
+ mcp_agent/agents/workflow/router_agent.py,sha256=yZUUhAmni2wAutQJ32EQ264Uh_MIriVP-AmhKFrYTeQ,9472
21
21
  mcp_agent/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
22
  mcp_agent/cli/__main__.py,sha256=AVZ7tQFhU_sDOGuUGJq8ujgKtcxsYJBJwHbVaaiRDlI,166
23
23
  mcp_agent/cli/main.py,sha256=XjrgXMBaPKkVqAFo8T9LJz6Tp1-ivrKDOuNYWke99YA,3090
@@ -34,21 +34,21 @@ mcp_agent/core/direct_factory.py,sha256=d96OM1yS3eIocIiaA9FQt6C2zr6VDUyCJBTZCp_D
34
34
  mcp_agent/core/enhanced_prompt.py,sha256=bzvcengS7XzHWB7NWhyxHM3hhO2HI4zP5DbGXAOw0Jw,19155
35
35
  mcp_agent/core/error_handling.py,sha256=xoyS2kLe0eG0bj2eSJCJ2odIhGUve2SbDR7jP-A-uRw,624
36
36
  mcp_agent/core/exceptions.py,sha256=ENAD_qGG67foxy6vDkIvc-lgopIUQy6O7zvNPpPXaQg,2289
37
- mcp_agent/core/fastagent.py,sha256=nj2o7T-ClI8ZeUHEr0pSfVxGIkbUffoJ9jT11DNKD_c,20195
37
+ mcp_agent/core/fastagent.py,sha256=OkS1mazgMUJyA02RbW-9z2nI6XQ4kKCLfyjgAkgv0O8,22708
38
38
  mcp_agent/core/interactive_prompt.py,sha256=w3VyRzW4hzn0xhWZRwo_qRRAD5WVSrJYe8QDe1XZ55Y,24252
39
39
  mcp_agent/core/mcp_content.py,sha256=2D7KHY9mG_vxoDwFLKvsPQV9VRIzHItM7V-jcEnACh8,8878
40
40
  mcp_agent/core/prompt.py,sha256=qnintOUGEoDPYLI9bu9G2OlgVMCe5ZPUZilgMzydXhc,7919
41
41
  mcp_agent/core/request_params.py,sha256=loYf13DN7e-DsdYRd37jWkJWJGwVBL-iFkcANP1J60Q,1366
42
42
  mcp_agent/core/validation.py,sha256=RIBKFlh0GJg4rTcFQXoXp8A0sK1HpsCigKcYSK3gFaY,12090
43
43
  mcp_agent/executor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
44
- mcp_agent/executor/executor.py,sha256=MzLSnW9nHrLHYChR3oQa5B8dajQGX26q6-S2BJCxv0o,9507
44
+ mcp_agent/executor/executor.py,sha256=E44p6d-o3OMRoP_dNs_cDnyti91LQ3P9eNU88mSi1kc,9462
45
45
  mcp_agent/executor/task_registry.py,sha256=PCALFeYtkQrPBg4RBJnlA0aDI8nHclrNkHGUS4kV3W8,1242
46
46
  mcp_agent/executor/workflow_signal.py,sha256=Cg1uZBk3fn8kXhPOg-wINNuVaf3v9pvLD6NbqWy5Z6E,11142
47
47
  mcp_agent/human_input/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
48
48
  mcp_agent/human_input/handler.py,sha256=s712Z5ssTCwjL9-VKoIdP5CtgMh43YvepynYisiWTTA,3144
49
49
  mcp_agent/human_input/types.py,sha256=RtWBOVzy8vnYoQrc36jRLn8z8N3C4pDPMBN5vF6qM5Y,1476
50
50
  mcp_agent/llm/__init__.py,sha256=d8zgwG-bRFuwiMNMYkywg_qytk4P8lawyld_meuUmHI,68
51
- mcp_agent/llm/augmented_llm.py,sha256=4zKEErhSROrcc1fojCXRFb13UlaOzWvB0jO_XdlcCWM,23378
51
+ mcp_agent/llm/augmented_llm.py,sha256=fP2uWIFY9qaEuY0mehti4A3NjhvGuj-TebLI0FVTbcM,23380
52
52
  mcp_agent/llm/augmented_llm_passthrough.py,sha256=zHcctNpwg4EFJvD1x9Eg443SVX-uyzFphLikwF_yVE0,6288
53
53
  mcp_agent/llm/augmented_llm_playback.py,sha256=6L_RWIK__R67oZK7u3Xt3hWy1T2LnHXIO-efqgP3tPw,4177
54
54
  mcp_agent/llm/memory.py,sha256=HQ_c1QemOUjrkY6Z2omE6BG5fXga7y4jN7KCMOuGjPs,3345
@@ -64,7 +64,7 @@ mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=poouQMsDoZSH-5a_TL2Z2E
64
64
  mcp_agent/llm/providers/augmented_llm_deepseek.py,sha256=NiZK5nv91ZS2VgVFXpbsFNFYLsLcppcbo_RstlRMd7I,1145
65
65
  mcp_agent/llm/providers/augmented_llm_generic.py,sha256=5Uq8ZBhcFuQTt7koP_5ykolREh2iWu8zKhNbh3pM9lQ,1210
66
66
  mcp_agent/llm/providers/augmented_llm_google.py,sha256=N0a2fphVtkvNYxKQpEX6J4tlO1C_mRw4sw3LBXnrOeI,1130
67
- mcp_agent/llm/providers/augmented_llm_openai.py,sha256=ypK1pDwBKjvdemyEsfzhebinQ6fSAnJS5OUsDVlvdOw,14001
67
+ mcp_agent/llm/providers/augmented_llm_openai.py,sha256=XFFoIMmXCoigC98zrR0_1c7DsyS4ep7hLvklmdn4jqU,14085
68
68
  mcp_agent/llm/providers/augmented_llm_openrouter.py,sha256=V_TlVKm92GHBxYIo6gpvH_6cAaIdppS25Tz6x5T7LW0,2341
69
69
  mcp_agent/llm/providers/multipart_converter_anthropic.py,sha256=t5lHYGfFUacJldnrVtMNW-8gEMoto8Y7hJkDrnyZR-Y,16650
70
70
  mcp_agent/llm/providers/multipart_converter_openai.py,sha256=XPIulWntNpZWNGWrc240StPzok2RqrDAV7OigDwQ1uU,15850
@@ -78,15 +78,14 @@ mcp_agent/logging/json_serializer.py,sha256=qkfxnR9ka6OgvwSpM2CggELbEtzzkApm0s_K
78
78
  mcp_agent/logging/listeners.py,sha256=_S4Jp5_KWp0kUfrx4BxDdNCeQK3MNT3Zi9AaolPri7A,6648
79
79
  mcp_agent/logging/logger.py,sha256=l02OGX_c5FOyH0rspd4ZvnkJcbb0FahhUhlh2KI8mqE,10724
80
80
  mcp_agent/logging/rich_progress.py,sha256=oY9fjb4Tyw6887v8sgO6EGIK4lnmIoR3NNxhA_-Ln_M,4893
81
- mcp_agent/logging/tracing.py,sha256=d5lSXakzzi5PtQpUkVkOnYaGX8NduGPq__S7vx-Ln8U,5187
82
81
  mcp_agent/logging/transport.py,sha256=m8YsLLu5T8eof_ndpLQs4gHOzqqEL98xsVwBwDsBfxI,17335
83
82
  mcp_agent/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
84
83
  mcp_agent/mcp/gen_client.py,sha256=fAVwFVCgSamw4PwoWOV4wrK9TABx1S_zZv8BctRyF2k,3030
85
84
  mcp_agent/mcp/interfaces.py,sha256=PAou8znAl2HgtvfCpLQOZFbKra9F72OcVRfBJbboNX8,6965
86
85
  mcp_agent/mcp/logger_textio.py,sha256=vljC1BtNTCxBAda9ExqNB-FwVNUZIuJT3h1nWmCjMws,3172
87
86
  mcp_agent/mcp/mcp_agent_client_session.py,sha256=RMYNltc2pDIzxwEJSS5589RbvPO0KWV4Y3jSyAmhKf0,4181
88
- mcp_agent/mcp/mcp_aggregator.py,sha256=YVLqcv5dS5Z8yEpQggru69NzqQBFKpXG8_CG0XAN3qk,40219
89
- mcp_agent/mcp/mcp_connection_manager.py,sha256=FGFF3DruVcHD_8J-VadrRyyrOiiq-N9-_ZzIdx4NUOA,13973
87
+ mcp_agent/mcp/mcp_aggregator.py,sha256=RjRcYHMKt5Wn85JWVar6X0hZLYtBeHrctiBBAK5AYcc,40584
88
+ mcp_agent/mcp/mcp_connection_manager.py,sha256=R_oGvFkolZJ_i3SizIIlKS_NPjXscsWLSOf1x9Zu0dM,14008
90
89
  mcp_agent/mcp/mime_utils.py,sha256=difepNR_gpb4MpMLkBRAoyhDk-AjXUHTiqKvT_VwS1o,1805
91
90
  mcp_agent/mcp/prompt_message_multipart.py,sha256=BDwRdNwyWHb2q2bccDb2iR2VlORqVvkvoG3xYzcMpCE,4403
92
91
  mcp_agent/mcp/prompt_render.py,sha256=k3v4BZDThGE2gGiOYVQtA6x8WTEdOuXIEnRafANhN1U,2996
@@ -144,8 +143,8 @@ mcp_agent/resources/examples/workflows/parallel.py,sha256=DQ5vY5-h8Qa5QHcYjsWXhZ
144
143
  mcp_agent/resources/examples/workflows/router.py,sha256=E4x_-c3l4YW9w1i4ARcDtkdeqIdbWEGfsMzwLYpdbVc,1677
145
144
  mcp_agent/resources/examples/workflows/short_story.txt,sha256=X3y_1AyhLFN2AKzCKvucJtDgAFIJfnlbsbGZO5bBWu0,1187
146
145
  mcp_agent/ui/console_display.py,sha256=TVGDtJ37hc6UG0ei9g7ZPZZfFNeS1MYozt-Mx8HsPCk,9752
147
- fast_agent_mcp-0.2.18.dist-info/METADATA,sha256=gFL_3akF6ZiJirx5mn_IxBhdp_wJ4OkJGx5YF2hAu38,29893
148
- fast_agent_mcp-0.2.18.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
149
- fast_agent_mcp-0.2.18.dist-info/entry_points.txt,sha256=bRniFM5zk3Kix5z7scX0gf9VnmGQ2Cz_Q1Gh7Ir4W00,186
150
- fast_agent_mcp-0.2.18.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
151
- fast_agent_mcp-0.2.18.dist-info/RECORD,,
146
+ fast_agent_mcp-0.2.19.dist-info/METADATA,sha256=slU0l8N_MAEEmGn3bla4jeHibkCq3DGTtngvii6xrro,30142
147
+ fast_agent_mcp-0.2.19.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
148
+ fast_agent_mcp-0.2.19.dist-info/entry_points.txt,sha256=bRniFM5zk3Kix5z7scX0gf9VnmGQ2Cz_Q1Gh7Ir4W00,186
149
+ fast_agent_mcp-0.2.19.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
150
+ fast_agent_mcp-0.2.19.dist-info/RECORD,,
@@ -31,6 +31,7 @@ from mcp.types import (
31
31
  TextContent,
32
32
  Tool,
33
33
  )
34
+ from opentelemetry import trace
34
35
  from pydantic import BaseModel
35
36
 
36
37
  from mcp_agent.core.agent_types import AgentConfig, AgentType
@@ -92,6 +93,7 @@ class BaseAgent(MCPAggregator, AgentProtocol):
92
93
  )
93
94
 
94
95
  self._context = context
96
+ self.tracer = trace.get_tracer(__name__)
95
97
  self.name = self.config.name
96
98
  self.instruction = self.config.instruction
97
99
  self.functions = functions or []
@@ -588,7 +590,8 @@ class BaseAgent(MCPAggregator, AgentProtocol):
588
590
  The LLM's response as a PromptMessageMultipart
589
591
  """
590
592
  assert self._llm
591
- return await self._llm.generate(multipart_messages, request_params)
593
+ with self.tracer.start_as_current_span(f"Agent: '{self.name}' generate"):
594
+ return await self._llm.generate(multipart_messages, request_params)
592
595
 
593
596
  async def structured(
594
597
  self,
@@ -609,7 +612,8 @@ class BaseAgent(MCPAggregator, AgentProtocol):
609
612
  An instance of the specified model, or None if coercion fails
610
613
  """
611
614
  assert self._llm
612
- return await self._llm.structured(multipart_messages, model, request_params)
615
+ with self.tracer.start_as_current_span(f"Agent: '{self.name}' structured"):
616
+ return await self._llm.structured(multipart_messages, model, request_params)
613
617
 
614
618
  async def apply_prompt_messages(
615
619
  self, prompts: List[PromptMessageMultipart], request_params: RequestParams | None = None
@@ -2,6 +2,7 @@ import asyncio
2
2
  from typing import Any, List, Optional, Tuple
3
3
 
4
4
  from mcp.types import TextContent
5
+ from opentelemetry import trace
5
6
 
6
7
  from mcp_agent.agents.agent import Agent
7
8
  from mcp_agent.agents.base_agent import BaseAgent
@@ -18,7 +19,7 @@ class ParallelAgent(BaseAgent):
18
19
  This workflow performs both the fan-out and fan-in operations using LLMs.
19
20
  From the user's perspective, an input is specified and the output is returned.
20
21
  """
21
-
22
+
22
23
  @property
23
24
  def agent_type(self) -> AgentType:
24
25
  """Return the type of this agent."""
@@ -62,31 +63,37 @@ class ParallelAgent(BaseAgent):
62
63
  Returns:
63
64
  The aggregated response from the fan-in agent
64
65
  """
65
- # Execute all fan-out agents in parallel
66
- responses: List[PromptMessageMultipart] = await asyncio.gather(
67
- *[agent.generate(multipart_messages, request_params) for agent in self.fan_out_agents]
68
- )
69
66
 
70
- # Extract the received message from the input
71
- received_message: Optional[str] = (
72
- multipart_messages[-1].all_text() if multipart_messages else None
73
- )
67
+ tracer = trace.get_tracer(__name__)
68
+ with tracer.start_as_current_span(f"Parallel: '{self.name}' generate"):
69
+ # Execute all fan-out agents in parallel
70
+ responses: List[PromptMessageMultipart] = await asyncio.gather(
71
+ *[
72
+ agent.generate(multipart_messages, request_params)
73
+ for agent in self.fan_out_agents
74
+ ]
75
+ )
74
76
 
75
- # Convert responses to strings for aggregation
76
- string_responses = []
77
- for response in responses:
78
- string_responses.append(response.all_text())
77
+ # Extract the received message from the input
78
+ received_message: Optional[str] = (
79
+ multipart_messages[-1].all_text() if multipart_messages else None
80
+ )
79
81
 
80
- # Format the responses and send to the fan-in agent
81
- aggregated_prompt = self._format_responses(string_responses, received_message)
82
+ # Convert responses to strings for aggregation
83
+ string_responses = []
84
+ for response in responses:
85
+ string_responses.append(response.all_text())
82
86
 
83
- # Create a new multipart message with the formatted responses
84
- formatted_prompt = PromptMessageMultipart(
85
- role="user", content=[TextContent(type="text", text=aggregated_prompt)]
86
- )
87
+ # Format the responses and send to the fan-in agent
88
+ aggregated_prompt = self._format_responses(string_responses, received_message)
89
+
90
+ # Create a new multipart message with the formatted responses
91
+ formatted_prompt = PromptMessageMultipart(
92
+ role="user", content=[TextContent(type="text", text=aggregated_prompt)]
93
+ )
87
94
 
88
- # Use the fan-in agent to aggregate the responses
89
- return await self.fan_in_agent.generate([formatted_prompt], request_params)
95
+ # Use the fan-in agent to aggregate the responses
96
+ return await self.fan_in_agent.generate([formatted_prompt], request_params)
90
97
 
91
98
  def _format_responses(self, responses: List[Any], message: Optional[str] = None) -> str:
92
99
  """
@@ -116,7 +123,7 @@ class ParallelAgent(BaseAgent):
116
123
 
117
124
  async def structured(
118
125
  self,
119
- prompt: List[PromptMessageMultipart],
126
+ multipart_messages: List[PromptMessageMultipart],
120
127
  model: type[ModelT],
121
128
  request_params: Optional[RequestParams] = None,
122
129
  ) -> Tuple[ModelT | None, PromptMessageMultipart]:
@@ -133,27 +140,35 @@ class ParallelAgent(BaseAgent):
133
140
  Returns:
134
141
  An instance of the specified model, or None if coercion fails
135
142
  """
136
- # Generate parallel responses first
137
- responses: List[PromptMessageMultipart] = await asyncio.gather(
138
- *[agent.generate(prompt, request_params) for agent in self.fan_out_agents]
139
- )
140
143
 
141
- # Extract the received message
142
- received_message: Optional[str] = prompt[-1].all_text() if prompt else None
144
+ tracer = trace.get_tracer(__name__)
145
+ with tracer.start_as_current_span(f"Parallel: '{self.name}' generate"):
146
+ # Generate parallel responses first
147
+ responses: List[PromptMessageMultipart] = await asyncio.gather(
148
+ *[
149
+ agent.generate(multipart_messages, request_params)
150
+ for agent in self.fan_out_agents
151
+ ]
152
+ )
143
153
 
144
- # Convert responses to strings
145
- string_responses = [response.all_text() for response in responses]
154
+ # Extract the received message
155
+ received_message: Optional[str] = (
156
+ multipart_messages[-1].all_text() if multipart_messages else None
157
+ )
146
158
 
147
- # Format the responses for the fan-in agent
148
- aggregated_prompt = self._format_responses(string_responses, received_message)
159
+ # Convert responses to strings
160
+ string_responses = [response.all_text() for response in responses]
149
161
 
150
- # Create a multipart message
151
- formatted_prompt = PromptMessageMultipart(
152
- role="user", content=[TextContent(type="text", text=aggregated_prompt)]
153
- )
162
+ # Format the responses for the fan-in agent
163
+ aggregated_prompt = self._format_responses(string_responses, received_message)
164
+
165
+ # Create a multipart message
166
+ formatted_prompt = PromptMessageMultipart(
167
+ role="user", content=[TextContent(type="text", text=aggregated_prompt)]
168
+ )
154
169
 
155
- # Use the fan-in agent to parse the structured output
156
- return await self.fan_in_agent.structured([formatted_prompt], model, request_params)
170
+ # Use the fan-in agent to parse the structured output
171
+ return await self.fan_in_agent.structured([formatted_prompt], model, request_params)
157
172
 
158
173
  async def initialize(self) -> None:
159
174
  """
@@ -7,6 +7,7 @@ by determining the best agent for a request and dispatching to it.
7
7
 
8
8
  from typing import TYPE_CHECKING, Callable, List, Optional, Tuple, Type
9
9
 
10
+ from opentelemetry import trace
10
11
  from pydantic import BaseModel
11
12
 
12
13
  from mcp_agent.agents.agent import Agent
@@ -158,17 +159,18 @@ class RouterAgent(BaseAgent):
158
159
  Returns:
159
160
  The response from the selected agent
160
161
  """
162
+ tracer = trace.get_tracer(__name__)
163
+ with tracer.start_as_current_span(f"Routing: '{self.name}' generate"):
164
+ route, warn = await self._route_request(multipart_messages[-1])
161
165
 
162
- route, warn = await self._route_request(multipart_messages[-1])
166
+ if not route:
167
+ return Prompt.assistant(warn or "No routing result or warning received")
163
168
 
164
- if not route:
165
- return Prompt.assistant(warn or "No routing result or warning received")
169
+ # Get the selected agent
170
+ agent: Agent = self.agent_map[route.agent]
166
171
 
167
- # Get the selected agent
168
- agent: Agent = self.agent_map[route.agent]
169
-
170
- # Dispatch the request to the selected agent
171
- return await agent.generate(multipart_messages, request_params)
172
+ # Dispatch the request to the selected agent
173
+ return await agent.generate(multipart_messages, request_params)
172
174
 
173
175
  async def structured(
174
176
  self,
@@ -187,18 +189,21 @@ class RouterAgent(BaseAgent):
187
189
  Returns:
188
190
  The parsed response from the selected agent, or None if parsing fails
189
191
  """
190
- route, warn = await self._route_request(multipart_messages[-1])
191
192
 
192
- if not route:
193
- return None, Prompt.assistant(
194
- warn or "No routing result or warning received (structured)"
195
- )
193
+ tracer = trace.get_tracer(__name__)
194
+ with tracer.start_as_current_span(f"Routing: '{self.name}' structured"):
195
+ route, warn = await self._route_request(multipart_messages[-1])
196
+
197
+ if not route:
198
+ return None, Prompt.assistant(
199
+ warn or "No routing result or warning received (structured)"
200
+ )
196
201
 
197
- # Get the selected agent
198
- agent: Agent = self.agent_map[route.agent]
202
+ # Get the selected agent
203
+ agent: Agent = self.agent_map[route.agent]
199
204
 
200
- # Dispatch the request to the selected agent
201
- return await agent.structured(multipart_messages, model, request_params)
205
+ # Dispatch the request to the selected agent
206
+ return await agent.structured(multipart_messages, model, request_params)
202
207
 
203
208
  async def _route_request(
204
209
  self, message: PromptMessageMultipart
mcp_agent/config.py CHANGED
@@ -181,13 +181,11 @@ class OpenTelemetrySettings(BaseModel):
181
181
  OTEL settings for the fast-agent application.
182
182
  """
183
183
 
184
- enabled: bool = True
184
+ enabled: bool = False
185
185
 
186
186
  service_name: str = "fast-agent"
187
- service_instance_id: str | None = None
188
- service_version: str | None = None
189
187
 
190
- otlp_endpoint: str | None = None
188
+ otlp_endpoint: str = "http://localhost:4318/v1/traces"
191
189
  """OTLP endpoint for OpenTelemetry tracing"""
192
190
 
193
191
  console_debug: bool = False
mcp_agent/context.py CHANGED
@@ -9,6 +9,8 @@ from typing import TYPE_CHECKING, Any, Optional, Union
9
9
  from mcp import ServerSession
10
10
  from opentelemetry import trace
11
11
  from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
12
+ from opentelemetry.instrumentation.anthropic import AnthropicInstrumentor
13
+ from opentelemetry.instrumentation.openai import OpenAIInstrumentor
12
14
  from opentelemetry.propagate import set_global_textmap
13
15
  from opentelemetry.sdk.resources import Resource
14
16
  from opentelemetry.sdk.trace import TracerProvider
@@ -51,7 +53,7 @@ class Context(BaseModel):
51
53
  server_registry: Optional[ServerRegistry] = None
52
54
  task_registry: Optional[ActivityRegistry] = None
53
55
 
54
- tracer: Optional[trace.Tracer] = None
56
+ tracer: trace.Tracer | None = None
55
57
 
56
58
  model_config = ConfigDict(
57
59
  extra="allow",
@@ -63,19 +65,19 @@ async def configure_otel(config: "Settings") -> None:
63
65
  """
64
66
  Configure OpenTelemetry based on the application config.
65
67
  """
66
- if not config.otel.enabled:
67
- return
68
-
69
- # Check if a provider is already set to avoid re-initialization
70
- if trace.get_tracer_provider().__class__.__name__ != "NoOpTracerProvider":
68
+ if not config.otel or not config.otel.enabled:
71
69
  return
72
70
 
73
71
  # Set up global textmap propagator first
74
72
  set_global_textmap(TraceContextTextMapPropagator())
75
73
 
76
74
  service_name = config.otel.service_name
77
- service_instance_id = config.otel.service_instance_id
78
- service_version = config.otel.service_version
75
+ from importlib.metadata import version
76
+
77
+ try:
78
+ app_version = version("fast-agent-mcp")
79
+ except: # noqa: E722
80
+ app_version = "unknown"
79
81
 
80
82
  # Create resource identifying this service
81
83
  resource = Resource.create(
@@ -83,8 +85,7 @@ async def configure_otel(config: "Settings") -> None:
83
85
  key: value
84
86
  for key, value in {
85
87
  "service.name": service_name,
86
- "service.instance.id": service_instance_id,
87
- "service.version": service_version,
88
+ "service.version": app_version,
88
89
  }.items()
89
90
  if value is not None
90
91
  }
@@ -107,6 +108,8 @@ async def configure_otel(config: "Settings") -> None:
107
108
 
108
109
  # Set as global tracer provider
109
110
  trace.set_tracer_provider(tracer_provider)
111
+ AnthropicInstrumentor().instrument()
112
+ OpenAIInstrumentor().instrument()
110
113
 
111
114
 
112
115
  async def configure_logger(config: "Settings") -> None:
@@ -13,6 +13,7 @@ from pathlib import Path
13
13
  from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, TypeVar
14
14
 
15
15
  import yaml
16
+ from opentelemetry import trace
16
17
 
17
18
  from mcp_agent import config
18
19
  from mcp_agent.app import MCPApp
@@ -76,81 +77,97 @@ class FastAgent:
76
77
  name: str,
77
78
  config_path: str | None = None,
78
79
  ignore_unknown_args: bool = False,
80
+ parse_cli_args: bool = True, # Add new parameter with default True
79
81
  ) -> None:
80
82
  """
81
- Initialize the DirectFastAgent application.
83
+ Initialize the fast-agent application.
82
84
 
83
85
  Args:
84
86
  name: Name of the application
85
87
  config_path: Optional path to config file
86
88
  ignore_unknown_args: Whether to ignore unknown command line arguments
89
+ when parse_cli_args is True.
90
+ parse_cli_args: If True, parse command line arguments using argparse.
91
+ Set to False when embedding FastAgent in another framework
92
+ (like FastAPI/Uvicorn) that handles its own arguments.
87
93
  """
88
- # Setup command line argument parsing
89
- parser = argparse.ArgumentParser(description="DirectFastAgent Application")
90
- parser.add_argument(
91
- "--model",
92
- help="Override the default model for all agents",
93
- )
94
- parser.add_argument(
95
- "--agent",
96
- default="default",
97
- help="Specify the agent to send a message to (used with --message)",
98
- )
99
- parser.add_argument(
100
- "-m",
101
- "--message",
102
- help="Message to send to the specified agent",
103
- )
104
- parser.add_argument(
105
- "-p", "--prompt-file", help="Path to a prompt file to use (either text or JSON)"
106
- )
107
- parser.add_argument(
108
- "--quiet",
109
- action="store_true",
110
- help="Disable progress display, tool and message logging for cleaner output",
111
- )
112
- parser.add_argument(
113
- "--version",
114
- action="store_true",
115
- help="Show version and exit",
116
- )
117
- parser.add_argument(
118
- "--server",
119
- action="store_true",
120
- help="Run as an MCP server",
121
- )
122
- parser.add_argument(
123
- "--transport",
124
- choices=["sse", "stdio"],
125
- default="sse",
126
- help="Transport protocol to use when running as a server (sse or stdio)",
127
- )
128
- parser.add_argument(
129
- "--port",
130
- type=int,
131
- default=8000,
132
- help="Port to use when running as a server with SSE transport",
133
- )
134
- parser.add_argument(
135
- "--host",
136
- default="0.0.0.0",
137
- help="Host address to bind to when running as a server with SSE transport",
138
- )
139
-
140
- if ignore_unknown_args:
141
- known_args, _ = parser.parse_known_args()
142
- self.args = known_args
143
- else:
144
- self.args = parser.parse_args()
94
+ self.args = argparse.Namespace() # Initialize args always
95
+
96
+ # --- Wrap argument parsing logic ---
97
+ if parse_cli_args:
98
+ # Setup command line argument parsing
99
+ parser = argparse.ArgumentParser(description="DirectFastAgent Application")
100
+ parser.add_argument(
101
+ "--model",
102
+ help="Override the default model for all agents",
103
+ )
104
+ parser.add_argument(
105
+ "--agent",
106
+ default="default",
107
+ help="Specify the agent to send a message to (used with --message)",
108
+ )
109
+ parser.add_argument(
110
+ "-m",
111
+ "--message",
112
+ help="Message to send to the specified agent",
113
+ )
114
+ parser.add_argument(
115
+ "-p", "--prompt-file", help="Path to a prompt file to use (either text or JSON)"
116
+ )
117
+ parser.add_argument(
118
+ "--quiet",
119
+ action="store_true",
120
+ help="Disable progress display, tool and message logging for cleaner output",
121
+ )
122
+ parser.add_argument(
123
+ "--version",
124
+ action="store_true",
125
+ help="Show version and exit",
126
+ )
127
+ parser.add_argument(
128
+ "--server",
129
+ action="store_true",
130
+ help="Run as an MCP server",
131
+ )
132
+ parser.add_argument(
133
+ "--transport",
134
+ choices=["sse", "stdio"],
135
+ default="sse",
136
+ help="Transport protocol to use when running as a server (sse or stdio)",
137
+ )
138
+ parser.add_argument(
139
+ "--port",
140
+ type=int,
141
+ default=8000,
142
+ help="Port to use when running as a server with SSE transport",
143
+ )
144
+ parser.add_argument(
145
+ "--host",
146
+ default="0.0.0.0",
147
+ help="Host address to bind to when running as a server with SSE transport",
148
+ )
145
149
 
146
- # Handle version flag
147
- if self.args.version:
148
- try:
149
- app_version = get_version("fast-agent-mcp")
150
- except: # noqa: E722
151
- app_version = "unknown"
152
- print(f"fast-agent-mcp v{app_version}")
153
- sys.exit(0)
150
+ if ignore_unknown_args:
151
+ known_args, _ = parser.parse_known_args()
152
+ self.args = known_args
153
+ else:
154
+ # Use parse_known_args here too, to avoid crashing on uvicorn args etc.
155
+ # even if ignore_unknown_args is False, we only care about *our* args.
156
+ known_args, unknown = parser.parse_known_args()
157
+ self.args = known_args
158
+ # Optionally, warn about unknown args if not ignoring?
159
+ # if unknown and not ignore_unknown_args:
160
+ # logger.warning(f"Ignoring unknown command line arguments: {unknown}")
161
+
162
+ # Handle version flag
163
+ if self.args.version:
164
+ try:
165
+ app_version = get_version("fast-agent-mcp")
166
+ except: # noqa: E722
167
+ app_version = "unknown"
168
+ print(f"fast-agent-mcp v{app_version}")
169
+ sys.exit(0)
170
+ # --- End of wrapped logic ---
154
171
 
155
172
  self.name = name
156
173
  self.config_path = config_path
@@ -220,164 +237,175 @@ class FastAgent:
220
237
  had_error = False
221
238
  await self.app.initialize()
222
239
 
223
- # Handle quiet mode
224
- quiet_mode = hasattr(self, "args") and self.args.quiet
225
-
226
- try:
227
- async with self.app.run():
228
- # Apply quiet mode if requested
229
- if (
230
- quiet_mode
231
- and hasattr(self.app.context, "config")
232
- and hasattr(self.app.context.config, "logger")
233
- ):
234
- # Update our app's config directly
235
- self.app.context.config.logger.progress_display = False
236
- self.app.context.config.logger.show_chat = False
237
- self.app.context.config.logger.show_tools = False
238
-
239
- # Directly disable the progress display singleton
240
- from mcp_agent.progress_display import progress_display
241
-
242
- progress_display.stop()
243
-
244
- # Pre-flight validation
245
- if 0 == len(self.agents):
246
- raise AgentConfigError("No agents defined. Please define at least one agent.")
247
- validate_server_references(self.context, self.agents)
248
- validate_workflow_references(self.agents)
249
-
250
- # Get a model factory function
251
- def model_factory_func(model=None, request_params=None):
252
- return get_model_factory(
253
- self.context,
254
- model=model,
255
- request_params=request_params,
256
- cli_model=self.args.model if hasattr(self, "args") else None,
257
- )
258
-
259
- # Create all agents in dependency order
260
- active_agents = await create_agents_in_dependency_order(
261
- self.app,
262
- self.agents,
263
- model_factory_func,
264
- )
265
-
266
- # Create a wrapper with all agents for simplified access
267
- wrapper = AgentApp(active_agents)
268
-
269
- # Handle command line options that should be processed after agent initialization
270
-
271
- # Handle --server option
272
- if hasattr(self, "args") and self.args.server:
273
- try:
274
- # Print info message if not in quiet mode
275
- if not quiet_mode:
276
- print(f"Starting FastAgent '{self.name}' in server mode")
277
- print(f"Transport: {self.args.transport}")
278
- if self.args.transport == "sse":
279
- print(f"Listening on {self.args.host}:{self.args.port}")
280
- print("Press Ctrl+C to stop")
281
-
282
- # Create the MCP server
283
- from mcp_agent.mcp_server import AgentMCPServer
284
-
285
- mcp_server = AgentMCPServer(
286
- agent_app=wrapper,
287
- server_name=f"{self.name}-MCP-Server",
288
- )
289
-
290
- # Run the server directly (this is a blocking call)
291
- await mcp_server.run_async(
292
- transport=self.args.transport, host=self.args.host, port=self.args.port
240
+ # Handle quiet mode and CLI model override safely
241
+ # Define these *before* they are used, checking if self.args exists and has the attributes
242
+ quiet_mode = hasattr(self.args, "quiet") and self.args.quiet
243
+ cli_model_override = (
244
+ self.args.model if hasattr(self.args, "model") and self.args.model else None
245
+ ) # Define cli_model_override here
246
+ tracer = trace.get_tracer(__name__)
247
+ with tracer.start_as_current_span(self.name):
248
+ try:
249
+ async with self.app.run():
250
+ # Apply quiet mode if requested
251
+ if (
252
+ quiet_mode
253
+ and hasattr(self.app.context, "config")
254
+ and hasattr(self.app.context.config, "logger")
255
+ ):
256
+ # Update our app's config directly
257
+ self.app.context.config.logger.progress_display = False
258
+ self.app.context.config.logger.show_chat = False
259
+ self.app.context.config.logger.show_tools = False
260
+
261
+ # Directly disable the progress display singleton
262
+ from mcp_agent.progress_display import progress_display
263
+
264
+ progress_display.stop()
265
+
266
+ # Pre-flight validation
267
+ if 0 == len(self.agents):
268
+ raise AgentConfigError(
269
+ "No agents defined. Please define at least one agent."
293
270
  )
294
- except KeyboardInterrupt:
295
- if not quiet_mode:
296
- print("\nServer stopped by user (Ctrl+C)")
297
- except Exception as e:
298
- if not quiet_mode:
299
- print(f"\nServer stopped with error: {e}")
300
-
301
- # Exit after server shutdown
302
- raise SystemExit(0)
303
-
304
- # Handle direct message sending if --message is provided
305
- if self.args.message:
306
- agent_name = self.args.agent
307
- message = self.args.message
308
-
309
- if agent_name not in active_agents:
310
- available_agents = ", ".join(active_agents.keys())
311
- print(
312
- f"\n\nError: Agent '{agent_name}' not found. Available agents: {available_agents}"
271
+ validate_server_references(self.context, self.agents)
272
+ validate_workflow_references(self.agents)
273
+
274
+ # Get a model factory function
275
+ # Now cli_model_override is guaranteed to be defined
276
+ def model_factory_func(model=None, request_params=None):
277
+ return get_model_factory(
278
+ self.context,
279
+ model=model,
280
+ request_params=request_params,
281
+ cli_model=cli_model_override, # Use the variable defined above
313
282
  )
314
- raise SystemExit(1)
315
283
 
316
- try:
317
- # Get response from the agent
318
- agent = active_agents[agent_name]
319
- response = await agent.send(message)
320
-
321
- # In quiet mode, just print the raw response
322
- # The chat display should already be turned off by the configuration
323
- if self.args.quiet:
324
- print(f"{response}")
325
-
326
- raise SystemExit(0)
327
- except Exception as e:
328
- print(f"\n\nError sending message to agent '{agent_name}': {str(e)}")
329
- raise SystemExit(1)
330
-
331
- if self.args.prompt_file:
332
- agent_name = self.args.agent
333
- prompt: List[PromptMessageMultipart] = load_prompt_multipart(
334
- Path(self.args.prompt_file)
284
+ # Create all agents in dependency order
285
+ active_agents = await create_agents_in_dependency_order(
286
+ self.app,
287
+ self.agents,
288
+ model_factory_func,
335
289
  )
336
- if agent_name not in active_agents:
337
- available_agents = ", ".join(active_agents.keys())
338
- print(
339
- f"\n\nError: Agent '{agent_name}' not found. Available agents: {available_agents}"
340
- )
341
- raise SystemExit(1)
342
-
343
- try:
344
- # Get response from the agent
345
- agent = active_agents[agent_name]
346
- response = await agent.generate(prompt)
347
-
348
- # In quiet mode, just print the raw response
349
- # The chat display should already be turned off by the configuration
350
- if self.args.quiet:
351
- print(f"{response.last_text()}")
352
290
 
291
+ # Create a wrapper with all agents for simplified access
292
+ wrapper = AgentApp(active_agents)
293
+
294
+ # Handle command line options that should be processed after agent initialization
295
+
296
+ # Handle --server option
297
+ # Check if parse_cli_args was True before checking self.args.server
298
+ if hasattr(self.args, "server") and self.args.server:
299
+ try:
300
+ # Print info message if not in quiet mode
301
+ if not quiet_mode:
302
+ print(f"Starting FastAgent '{self.name}' in server mode")
303
+ print(f"Transport: {self.args.transport}")
304
+ if self.args.transport == "sse":
305
+ print(f"Listening on {self.args.host}:{self.args.port}")
306
+ print("Press Ctrl+C to stop")
307
+
308
+ # Create the MCP server
309
+ from mcp_agent.mcp_server import AgentMCPServer
310
+
311
+ mcp_server = AgentMCPServer(
312
+ agent_app=wrapper,
313
+ server_name=f"{self.name}-MCP-Server",
314
+ )
315
+
316
+ # Run the server directly (this is a blocking call)
317
+ await mcp_server.run_async(
318
+ transport=self.args.transport,
319
+ host=self.args.host,
320
+ port=self.args.port,
321
+ )
322
+ except KeyboardInterrupt:
323
+ if not quiet_mode:
324
+ print("\nServer stopped by user (Ctrl+C)")
325
+ except Exception as e:
326
+ if not quiet_mode:
327
+ print(f"\nServer stopped with error: {e}")
328
+
329
+ # Exit after server shutdown
353
330
  raise SystemExit(0)
354
- except Exception as e:
355
- print(f"\n\nError sending message to agent '{agent_name}': {str(e)}")
356
- raise SystemExit(1)
357
-
358
- yield wrapper
359
-
360
- except (
361
- ServerConfigError,
362
- ProviderKeyError,
363
- AgentConfigError,
364
- ServerInitializationError,
365
- ModelConfigError,
366
- CircularDependencyError,
367
- PromptExitError,
368
- ) as e:
369
- had_error = True
370
- self._handle_error(e)
371
- raise SystemExit(1)
372
331
 
373
- finally:
374
- # Clean up any active agents
375
- if active_agents and not had_error:
376
- for agent in active_agents.values():
377
- try:
378
- await agent.shutdown()
379
- except Exception:
380
- pass
332
+ # Handle direct message sending if --message is provided
333
+ if self.args.message:
334
+ agent_name = self.args.agent
335
+ message = self.args.message
336
+
337
+ if agent_name not in active_agents:
338
+ available_agents = ", ".join(active_agents.keys())
339
+ print(
340
+ f"\n\nError: Agent '{agent_name}' not found. Available agents: {available_agents}"
341
+ )
342
+ raise SystemExit(1)
343
+
344
+ try:
345
+ # Get response from the agent
346
+ agent = active_agents[agent_name]
347
+ response = await agent.send(message)
348
+
349
+ # In quiet mode, just print the raw response
350
+ # The chat display should already be turned off by the configuration
351
+ if self.args.quiet:
352
+ print(f"{response}")
353
+
354
+ raise SystemExit(0)
355
+ except Exception as e:
356
+ print(f"\n\nError sending message to agent '{agent_name}': {str(e)}")
357
+ raise SystemExit(1)
358
+
359
+ if self.args.prompt_file:
360
+ agent_name = self.args.agent
361
+ prompt: List[PromptMessageMultipart] = load_prompt_multipart(
362
+ Path(self.args.prompt_file)
363
+ )
364
+ if agent_name not in active_agents:
365
+ available_agents = ", ".join(active_agents.keys())
366
+ print(
367
+ f"\n\nError: Agent '{agent_name}' not found. Available agents: {available_agents}"
368
+ )
369
+ raise SystemExit(1)
370
+
371
+ try:
372
+ # Get response from the agent
373
+ agent = active_agents[agent_name]
374
+ response = await agent.generate(prompt)
375
+
376
+ # In quiet mode, just print the raw response
377
+ # The chat display should already be turned off by the configuration
378
+ if self.args.quiet:
379
+ print(f"{response.last_text()}")
380
+
381
+ raise SystemExit(0)
382
+ except Exception as e:
383
+ print(f"\n\nError sending message to agent '{agent_name}': {str(e)}")
384
+ raise SystemExit(1)
385
+
386
+ yield wrapper
387
+
388
+ except (
389
+ ServerConfigError,
390
+ ProviderKeyError,
391
+ AgentConfigError,
392
+ ServerInitializationError,
393
+ ModelConfigError,
394
+ CircularDependencyError,
395
+ PromptExitError,
396
+ ) as e:
397
+ had_error = True
398
+ self._handle_error(e)
399
+ raise SystemExit(1)
400
+
401
+ finally:
402
+ # Clean up any active agents
403
+ if active_agents and not had_error:
404
+ for agent in active_agents.values():
405
+ try:
406
+ await agent.shutdown()
407
+ except Exception:
408
+ pass
381
409
 
382
410
  def _handle_error(self, e: Exception, error_type: Optional[str] = None) -> None:
383
411
  """
@@ -1,4 +1,5 @@
1
1
  import asyncio
2
+ import contextvars
2
3
  import functools
3
4
  from abc import ABC, abstractmethod
4
5
  from contextlib import asynccontextmanager
@@ -206,13 +207,13 @@ class AsyncioExecutor(Executor):
206
207
  else:
207
208
  # Execute the callable and await if it returns a coroutine
208
209
  loop = asyncio.get_running_loop()
209
-
210
+ ctx = contextvars.copy_context()
210
211
  # If kwargs are provided, wrap the function with partial
211
212
  if kwargs:
212
213
  wrapped_task = functools.partial(task, **kwargs)
213
- result = await loop.run_in_executor(None, wrapped_task)
214
+ result = await loop.run_in_executor(None, lambda: ctx.run(wrapped_task))
214
215
  else:
215
- result = await loop.run_in_executor(None, task)
216
+ result = await loop.run_in_executor(None, lambda: ctx.run(task))
216
217
 
217
218
  # Handle case where the sync function returns a coroutine
218
219
  if asyncio.iscoroutine(result):
@@ -234,12 +235,10 @@ class AsyncioExecutor(Executor):
234
235
  *tasks: Callable[..., R] | Coroutine[Any, Any, R],
235
236
  **kwargs: Any,
236
237
  ) -> List[R | BaseException]:
237
- # TODO: saqadri - validate if async with self.execution_context() is needed here
238
- async with self.execution_context():
239
- return await asyncio.gather(
240
- *(self._execute_task(task, **kwargs) for task in tasks),
241
- return_exceptions=True,
242
- )
238
+ return await asyncio.gather(
239
+ *(self._execute_task(task, **kwargs) for task in tasks),
240
+ return_exceptions=True,
241
+ )
243
242
 
244
243
  async def execute_streaming(
245
244
  self,
@@ -171,6 +171,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
171
171
  # We never expect this for structured() calls - this is for interactive use - developers
172
172
  # can do this programatically
173
173
  # TODO -- create a "fast-agent" control role rather than magic strings
174
+
174
175
  if multipart_messages[-1].first_text().startswith("***SAVE_HISTORY"):
175
176
  parts: list[str] = multipart_messages[-1].first_text().split(" ", 1)
176
177
  filename: str = (
@@ -220,6 +221,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
220
221
  request_params: RequestParams | None = None,
221
222
  ) -> Tuple[ModelT | None, PromptMessageMultipart]:
222
223
  """Return a structured response from the LLM using the provided messages."""
224
+
223
225
  self._precall(multipart_messages)
224
226
  result, assistant_response = await self._apply_prompt_provider_specific_structured(
225
227
  multipart_messages, model, request_params
@@ -222,7 +222,10 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
222
222
  method="tools/call",
223
223
  params=CallToolRequestParams(
224
224
  name=tool_call.function.name,
225
- arguments={} if not tool_call.function.arguments or tool_call.function.arguments.strip() == '' else from_json(tool_call.function.arguments, allow_partial=True),
225
+ arguments={}
226
+ if not tool_call.function.arguments
227
+ or tool_call.function.arguments.strip() == ""
228
+ else from_json(tool_call.function.arguments, allow_partial=True),
226
229
  ),
227
230
  )
228
231
  result = await self.call_tool(tool_call_request, tool_call.id)
@@ -21,6 +21,7 @@ from mcp.types import (
21
21
  TextContent,
22
22
  Tool,
23
23
  )
24
+ from opentelemetry import trace
24
25
  from pydantic import AnyUrl, BaseModel, ConfigDict
25
26
 
26
27
  from mcp_agent.context_dependent import ContextDependent
@@ -469,16 +470,20 @@ class MCPAggregator(ContextDependent):
469
470
  },
470
471
  )
471
472
 
472
- return await self._execute_on_server(
473
- server_name=server_name,
474
- operation_type="tool",
475
- operation_name=local_tool_name,
476
- method_name="call_tool",
477
- method_args={"name": local_tool_name, "arguments": arguments},
478
- error_factory=lambda msg: CallToolResult(
479
- isError=True, content=[TextContent(type="text", text=msg)]
480
- ),
481
- )
473
+ tracer = trace.get_tracer(__name__)
474
+ with tracer.start_as_current_span(f"MCP Tool: {server_name}/{local_tool_name}"):
475
+ trace.get_current_span().set_attribute("tool_name", local_tool_name)
476
+ trace.get_current_span().set_attribute("server_name", server_name)
477
+ return await self._execute_on_server(
478
+ server_name=server_name,
479
+ operation_type="tool",
480
+ operation_name=local_tool_name,
481
+ method_name="call_tool",
482
+ method_args={"name": local_tool_name, "arguments": arguments},
483
+ error_factory=lambda msg: CallToolResult(
484
+ isError=True, content=[TextContent(type="text", text=msg)]
485
+ ),
486
+ )
482
487
 
483
488
  async def get_prompt(
484
489
  self,
@@ -262,7 +262,7 @@ class MCPConnectionManager(ContextDependent):
262
262
  if config.transport == "stdio":
263
263
  server_params = StdioServerParameters(
264
264
  command=config.command,
265
- args=config.args,
265
+ args=config.args if config.args is not None else [],
266
266
  env={**get_default_environment(), **(config.env or {})},
267
267
  )
268
268
  # Create custom error handler to ensure all output is captured
@@ -1,138 +0,0 @@
1
- """
2
- Telemetry manager that defines distributed tracing decorators for OpenTelemetry traces/spans
3
- for the Logger module for MCP Agent
4
- """
5
-
6
- import asyncio
7
- import functools
8
- from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple
9
-
10
- from opentelemetry import trace
11
- from opentelemetry.context import Context as OtelContext
12
- from opentelemetry.propagate import extract as otel_extract
13
- from opentelemetry.trace import SpanKind, Status, StatusCode, set_span_in_context
14
- from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
15
-
16
- from mcp_agent.context_dependent import ContextDependent
17
-
18
- if TYPE_CHECKING:
19
- from mcp_agent.context import Context
20
-
21
-
22
- class TelemetryManager(ContextDependent):
23
- """
24
- Simple manager for creating OpenTelemetry spans automatically.
25
- Decorator usage: @telemetry.traced("SomeSpanName")
26
- """
27
-
28
- def __init__(self, context: Optional["Context"] = None, **kwargs) -> None:
29
- # If needed, configure resources, exporters, etc.
30
- # E.g.: from opentelemetry.sdk.trace import TracerProvider
31
- # trace.set_tracer_provider(TracerProvider(...))
32
- super().__init__(context=context, **kwargs)
33
-
34
- def traced(
35
- self,
36
- name: str | None = None,
37
- kind: SpanKind = SpanKind.INTERNAL,
38
- attributes: Dict[str, Any] = None,
39
- ) -> Callable:
40
- """
41
- Decorator that automatically creates and manages a span for a function.
42
- Works for both async and sync functions.
43
- """
44
-
45
- def decorator(func):
46
- span_name = name or f"{func.__module__}.{func.__qualname__}"
47
-
48
- tracer = self.context.tracer or trace.get_tracer("mcp_agent")
49
-
50
- @functools.wraps(func)
51
- async def async_wrapper(*args, **kwargs):
52
- with tracer.start_as_current_span(span_name, kind=kind) as span:
53
- if attributes:
54
- for k, v in attributes.items():
55
- span.set_attribute(k, v)
56
- # Record simple args
57
- self._record_args(span, args, kwargs)
58
- try:
59
- res = await func(*args, **kwargs)
60
- return res
61
- except Exception as e:
62
- span.record_exception(e)
63
- span.set_status(Status(StatusCode.ERROR))
64
- raise
65
-
66
- @functools.wraps(func)
67
- def sync_wrapper(*args, **kwargs):
68
- with tracer.start_as_current_span(span_name, kind=kind) as span:
69
- if attributes:
70
- for k, v in attributes.items():
71
- span.set_attribute(k, v)
72
- # Record simple args
73
- self._record_args(span, args, kwargs)
74
- try:
75
- res = func(*args, **kwargs)
76
- return res
77
- except Exception as e:
78
- span.record_exception(e)
79
- span.set_status(Status(StatusCode.ERROR))
80
- raise
81
-
82
- if asyncio.iscoroutinefunction(func):
83
- return async_wrapper
84
- else:
85
- return sync_wrapper
86
-
87
- return decorator
88
-
89
- def _record_args(self, span, args, kwargs) -> None:
90
- """Optionally record primitive args as span attributes."""
91
- for i, arg in enumerate(args):
92
- if isinstance(arg, (str, int, float, bool)):
93
- span.set_attribute(f"arg_{i}", str(arg))
94
- for k, v in kwargs.items():
95
- if isinstance(v, (str, int, float, bool)):
96
- span.set_attribute(k, str(v))
97
-
98
-
99
- class MCPRequestTrace:
100
- """Helper class for trace context propagation in MCP"""
101
-
102
- @staticmethod
103
- def start_span_from_mcp_request(
104
- method: str, params: Dict[str, Any]
105
- ) -> Tuple[trace.Span, OtelContext]:
106
- """Extract trace context from incoming MCP request and start a new span"""
107
- # Extract trace context from _meta if present
108
- carrier = {}
109
- _meta = params.get("_meta", {})
110
- if "traceparent" in _meta:
111
- carrier["traceparent"] = _meta["traceparent"]
112
- if "tracestate" in _meta:
113
- carrier["tracestate"] = _meta["tracestate"]
114
-
115
- # Extract context and start span
116
- ctx = otel_extract(carrier, context=OtelContext())
117
- tracer = trace.get_tracer(__name__)
118
- span = tracer.start_span(method, context=ctx, kind=SpanKind.SERVER)
119
- return span, set_span_in_context(span)
120
-
121
- @staticmethod
122
- def inject_trace_context(arguments: Dict[str, Any]) -> Dict[str, Any]:
123
- """Inject current trace context into outgoing MCP request arguments"""
124
- carrier = {}
125
- TraceContextTextMapPropagator().inject(carrier)
126
-
127
- # Create or update _meta with trace context
128
- _meta = arguments.get("_meta", {})
129
- if "traceparent" in carrier:
130
- _meta["traceparent"] = carrier["traceparent"]
131
- if "tracestate" in carrier:
132
- _meta["tracestate"] = carrier["tracestate"]
133
- arguments["_meta"] = _meta
134
-
135
- return arguments
136
-
137
-
138
- telemetry = TelemetryManager()