fast-agent-mcp 0.2.17__py3-none-any.whl → 0.2.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fast-agent-mcp
3
- Version: 0.2.17
3
+ Version: 0.2.19
4
4
  Summary: Define, Prompt and Test MCP enabled Agents and Workflows
5
5
  Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
6
6
  License: Apache License
@@ -214,16 +214,16 @@ Requires-Dist: aiohttp>=3.11.13
214
214
  Requires-Dist: anthropic>=0.49.0
215
215
  Requires-Dist: fastapi>=0.115.6
216
216
  Requires-Dist: mcp==1.6.0
217
- Requires-Dist: numpy>=2.2.1
218
217
  Requires-Dist: openai>=1.63.2
219
218
  Requires-Dist: opentelemetry-distro>=0.50b0
220
219
  Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.29.0
220
+ Requires-Dist: opentelemetry-instrumentation-anthropic>=0.39.3
221
+ Requires-Dist: opentelemetry-instrumentation-openai>=0.39.3
221
222
  Requires-Dist: prompt-toolkit>=3.0.50
222
223
  Requires-Dist: pydantic-settings>=2.7.0
223
224
  Requires-Dist: pydantic>=2.10.4
224
225
  Requires-Dist: pyyaml>=6.0.2
225
226
  Requires-Dist: rich>=13.9.4
226
- Requires-Dist: scikit-learn>=1.6.0
227
227
  Requires-Dist: typer>=0.15.1
228
228
  Provides-Extra: dev
229
229
  Requires-Dist: anthropic>=0.42.0; extra == 'dev'
@@ -251,7 +251,7 @@ Description-Content-Type: text/markdown
251
251
  ## Overview
252
252
 
253
253
  > [!TIP]
254
- > Documentation site is in production here : https://fast-agent.ai. Feel free to feed back what's helpful and what's not. llms.txt link is here: https://fast-agent.ai/llms.txt
254
+ > Documentation site is in production here : https://fast-agent.ai. Feel free to feed back what's helpful and what's not. There is also an LLMs.txt [here](https://fast-agent.ai/llms.txt)
255
255
 
256
256
  **`fast-agent`** enables you to create and interact with sophisticated Agents and Workflows in minutes. It is the first framework with complete, end-to-end tested MCP Feature support including Sampling. Both Anthropic (Haiku, Sonnet, Opus) and OpenAI models (gpt-4o/gpt-4.1 family, o1/o3 family) are supported.
257
257
 
@@ -259,7 +259,8 @@ The simple declarative syntax lets you concentrate on composing your Prompts and
259
259
 
260
260
  `fast-agent` is multi-modal, supporting Images and PDFs for both Anthropic and OpenAI endpoints via Prompts, Resources and MCP Tool Call results. The inclusion of passthrough and playback LLMs enable rapid development and test of Python glue-code for your applications.
261
261
 
262
- > [!TIP] > `fast-agent` is now MCP Native! Coming Soon - Full Documentation Site and Further MCP Examples.
262
+ > [!IMPORTANT]
263
+ > `fast-agent` The fast-agent documentation repo is here: https://github.com/evalstate/fast-agent-docs. Please feel free to submit PRs for documentation, experience reports or other content you think others may find helpful. All help and feedback warmly received.
263
264
 
264
265
  ### Agent Application Development
265
266
 
@@ -449,10 +450,10 @@ If the Generator has `use_history` off, the previous iteration is returned when
449
450
 
450
451
  ```python
451
452
  @fast.evaluator_optimizer(
452
- name="researcher"
453
- generator="web_searcher"
454
- evaluator="quality_assurance"
455
- min_rating="EXCELLENT"
453
+ name="researcher",
454
+ generator="web_searcher",
455
+ evaluator="quality_assurance",
456
+ min_rating="EXCELLENT",
456
457
  max_refinements=3
457
458
  )
458
459
 
@@ -470,8 +471,8 @@ Routers use an LLM to assess a message, and route it to the most appropriate Age
470
471
 
471
472
  ```python
472
473
  @fast.router(
473
- name="route"
474
- agents["agent1","agent2","agent3"]
474
+ name="route",
475
+ agents=["agent1","agent2","agent3"]
475
476
  )
476
477
  ```
477
478
 
@@ -483,7 +484,7 @@ Given a complex task, the Orchestrator uses an LLM to generate a plan to divide
483
484
 
484
485
  ```python
485
486
  @fast.orchestrator(
486
- name="orchestrate"
487
+ name="orchestrate",
487
488
  agents=["task1","task2","task3"]
488
489
  )
489
490
  ```
@@ -523,7 +524,7 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
523
524
  servers=["filesystem"], # list of MCP Servers for the agent
524
525
  model="o3-mini.high", # specify a model for the agent
525
526
  use_history=True, # agent maintains chat history
526
- request_params=RequestParams(temperature= 0.7)), # additional parameters for the LLM (or RequestParams())
527
+ request_params=RequestParams(temperature= 0.7), # additional parameters for the LLM (or RequestParams())
527
528
  human_input=True, # agent can request human input
528
529
  )
529
530
  ```
@@ -535,7 +536,7 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
535
536
  name="chain", # name of the chain
536
537
  sequence=["agent1", "agent2", ...], # list of agents in execution order
537
538
  instruction="instruction", # instruction to describe the chain for other workflows
538
- cumulative=False # whether to accumulate messages through the chain
539
+ cumulative=False, # whether to accumulate messages through the chain
539
540
  continue_with_final=True, # open chat with agent at end of chain after prompting
540
541
  )
541
542
  ```
@@ -1,53 +1,54 @@
1
1
  mcp_agent/__init__.py,sha256=-AIoeL4c9UAp_P4U0z-uIWTTmQWdihOis5nbQ5L_eao,1664
2
2
  mcp_agent/app.py,sha256=WRsiUdwy_9IAnaGRDwuLm7pzgQpt2wgsg10vBOpfcwM,5539
3
- mcp_agent/config.py,sha256=eEknK8I7DCrh1iGqzva0TemHMzjlbfhFhgwULqaKjDs,12218
3
+ mcp_agent/config.py,sha256=4MB8QBwGb6MPPRc85p-xdCzRloGwpWsPfEgxsoS_4N0,12159
4
4
  mcp_agent/console.py,sha256=Gjf2QLFumwG1Lav__c07X_kZxxEUSkzV-1_-YbAwcwo,813
5
- mcp_agent/context.py,sha256=GEZQ64UCvLj6bRXGNm35Hxd5jxrI_P52vhC3opnWwsg,7246
5
+ mcp_agent/context.py,sha256=fHyDjeZpHYRBOCVY58hVcFQxybOXSJmuui2o51kLbuA,7307
6
6
  mcp_agent/context_dependent.py,sha256=QXfhw3RaQCKfscEEBRGuZ3sdMWqkgShz2jJ1ivGGX1I,1455
7
7
  mcp_agent/event_progress.py,sha256=3dqk5Pn1tAG_m_wn4IPNwLWLyzm7CyKIidqHN-4l-JY,2736
8
8
  mcp_agent/mcp_server_registry.py,sha256=pSD3euU-Oc2LAVenqkLU7UmutAzk6A9liYVLjCj4J70,10068
9
9
  mcp_agent/progress_display.py,sha256=GeJU9VUt6qKsFVymG688hCMVCsAygG9ifiiEb5IcbN4,361
10
10
  mcp_agent/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  mcp_agent/agents/agent.py,sha256=GgaUHoilgqzh9PQYr5k2WiPj4pagwicf9-ZLFsHkNNo,3848
12
- mcp_agent/agents/base_agent.py,sha256=c_jhC6tt6LdvS1IY4AVdMNj3n_OtluzHQ1-_NEOseEQ,25039
12
+ mcp_agent/agents/base_agent.py,sha256=fjDr01-hZ9sB3ghI4DlXYVePP0s5f9pmtLH-N3X8bRg,25294
13
13
  mcp_agent/agents/workflow/__init__.py,sha256=HloteEW6kalvgR0XewpiFAqaQlMPlPJYg5p3K33IUzI,25
14
14
  mcp_agent/agents/workflow/chain_agent.py,sha256=eIlImirrSXkqBJmPuAJgOKis81Cl6lZEGM0-6IyaUV8,6105
15
15
  mcp_agent/agents/workflow/evaluator_optimizer.py,sha256=ysUMGM2NzeCIutgr_vXH6kUPpZMw0cX4J_Wl1r8eT84,13296
16
16
  mcp_agent/agents/workflow/orchestrator_agent.py,sha256=byZe4bx7D_7BSZZ3hN8BNUWVFPYeqeUwDUCLTRC8mlI,21583
17
17
  mcp_agent/agents/workflow/orchestrator_models.py,sha256=5P_aXADVT4Et8qT4e1cb9RelmHX5dCRrzu8j8T41Kdg,7230
18
18
  mcp_agent/agents/workflow/orchestrator_prompts.py,sha256=EXKEI174sshkZyPPEnWbwwNafzSPuA39MXL7iqG9cWc,9106
19
- mcp_agent/agents/workflow/parallel_agent.py,sha256=GQTxAqwrPEdle-rPWMvoLOuzE_X69_HYEgYSm98eXdM,7087
20
- mcp_agent/agents/workflow/router_agent.py,sha256=NugAJcA1ooZ-TNLNh7H26xIFChZoryxofJ7fTkrw4cU,9128
19
+ mcp_agent/agents/workflow/parallel_agent.py,sha256=JaQFp35nmAdoBRLAwx8BfnK7kirVq9PMw24LQ3ZEzoc,7705
20
+ mcp_agent/agents/workflow/router_agent.py,sha256=yZUUhAmni2wAutQJ32EQ264Uh_MIriVP-AmhKFrYTeQ,9472
21
21
  mcp_agent/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
22
  mcp_agent/cli/__main__.py,sha256=AVZ7tQFhU_sDOGuUGJq8ujgKtcxsYJBJwHbVaaiRDlI,166
23
- mcp_agent/cli/main.py,sha256=m0Ndb8y5qCfYyTDBDaJ4g1TuUiX-xtTKNJJDPsltw6o,2884
23
+ mcp_agent/cli/main.py,sha256=XjrgXMBaPKkVqAFo8T9LJz6Tp1-ivrKDOuNYWke99YA,3090
24
24
  mcp_agent/cli/terminal.py,sha256=GRwD-RGW7saIz2IOWZn5vD6JjiArscELBThm1GTFkuI,1065
25
25
  mcp_agent/cli/commands/check_config.py,sha256=9Ryxo_fLInm3YKdYv46yLrAJgnQtMisGreu6Kkriw2g,16677
26
+ mcp_agent/cli/commands/go.py,sha256=DJpmq4n-p5r8BXH10UqBOexmLND-zSODl5f-w4noR5Q,4304
26
27
  mcp_agent/cli/commands/quickstart.py,sha256=SM3CHMzDgvTxIpKjFuX9BrS_N1vRoXNBDaO90aWx1Rk,14586
27
- mcp_agent/cli/commands/setup.py,sha256=hToVzlCDucf1RI6Jri7BPuSeNA6Y67ONG-nyUT-n7tE,6472
28
+ mcp_agent/cli/commands/setup.py,sha256=eOEd4TL-b0DaDeSJMGOfNOsTEItoZ67W88eTP4aP-bo,6482
28
29
  mcp_agent/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
30
  mcp_agent/core/agent_app.py,sha256=5nQJNo8DocIRWiX4pVKAHUZF8s6HWpc-hJnfzl_1v1c,9697
30
31
  mcp_agent/core/agent_types.py,sha256=bQVQMTwKH7qHIJsNglj4C_d6PNFBBzC_0RIkcENSII4,1459
31
32
  mcp_agent/core/direct_decorators.py,sha256=aaVR4G6a8H9pVg6X_PGEZ8GzreP0ZO1-48ksIKvMNDI,14452
32
33
  mcp_agent/core/direct_factory.py,sha256=d96OM1yS3eIocIiaA9FQt6C2zr6VDUyCJBTZCp_D4bs,17912
33
- mcp_agent/core/enhanced_prompt.py,sha256=kEgeD7F1s8LRZDc3Xr6CIxQxewxXy9z-0xZsxfIblsY,18851
34
+ mcp_agent/core/enhanced_prompt.py,sha256=bzvcengS7XzHWB7NWhyxHM3hhO2HI4zP5DbGXAOw0Jw,19155
34
35
  mcp_agent/core/error_handling.py,sha256=xoyS2kLe0eG0bj2eSJCJ2odIhGUve2SbDR7jP-A-uRw,624
35
36
  mcp_agent/core/exceptions.py,sha256=ENAD_qGG67foxy6vDkIvc-lgopIUQy6O7zvNPpPXaQg,2289
36
- mcp_agent/core/fastagent.py,sha256=NahciY0tY6K-rvSzkSgahzGXAWa-1nN2JgMlvRTze6E,18645
37
- mcp_agent/core/interactive_prompt.py,sha256=9s5c-XXGAKAqYh1SUVQIMRGFIcxfFjkaPmke1tyInaA,23854
37
+ mcp_agent/core/fastagent.py,sha256=OkS1mazgMUJyA02RbW-9z2nI6XQ4kKCLfyjgAkgv0O8,22708
38
+ mcp_agent/core/interactive_prompt.py,sha256=w3VyRzW4hzn0xhWZRwo_qRRAD5WVSrJYe8QDe1XZ55Y,24252
38
39
  mcp_agent/core/mcp_content.py,sha256=2D7KHY9mG_vxoDwFLKvsPQV9VRIzHItM7V-jcEnACh8,8878
39
40
  mcp_agent/core/prompt.py,sha256=qnintOUGEoDPYLI9bu9G2OlgVMCe5ZPUZilgMzydXhc,7919
40
41
  mcp_agent/core/request_params.py,sha256=loYf13DN7e-DsdYRd37jWkJWJGwVBL-iFkcANP1J60Q,1366
41
- mcp_agent/core/validation.py,sha256=Kmio1Xx-xNyCVd03RLfTxofEAWmVDPxUnQoyOSUMjR0,11445
42
+ mcp_agent/core/validation.py,sha256=RIBKFlh0GJg4rTcFQXoXp8A0sK1HpsCigKcYSK3gFaY,12090
42
43
  mcp_agent/executor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
43
- mcp_agent/executor/executor.py,sha256=MzLSnW9nHrLHYChR3oQa5B8dajQGX26q6-S2BJCxv0o,9507
44
+ mcp_agent/executor/executor.py,sha256=E44p6d-o3OMRoP_dNs_cDnyti91LQ3P9eNU88mSi1kc,9462
44
45
  mcp_agent/executor/task_registry.py,sha256=PCALFeYtkQrPBg4RBJnlA0aDI8nHclrNkHGUS4kV3W8,1242
45
46
  mcp_agent/executor/workflow_signal.py,sha256=Cg1uZBk3fn8kXhPOg-wINNuVaf3v9pvLD6NbqWy5Z6E,11142
46
47
  mcp_agent/human_input/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
48
  mcp_agent/human_input/handler.py,sha256=s712Z5ssTCwjL9-VKoIdP5CtgMh43YvepynYisiWTTA,3144
48
49
  mcp_agent/human_input/types.py,sha256=RtWBOVzy8vnYoQrc36jRLn8z8N3C4pDPMBN5vF6qM5Y,1476
49
50
  mcp_agent/llm/__init__.py,sha256=d8zgwG-bRFuwiMNMYkywg_qytk4P8lawyld_meuUmHI,68
50
- mcp_agent/llm/augmented_llm.py,sha256=4zKEErhSROrcc1fojCXRFb13UlaOzWvB0jO_XdlcCWM,23378
51
+ mcp_agent/llm/augmented_llm.py,sha256=fP2uWIFY9qaEuY0mehti4A3NjhvGuj-TebLI0FVTbcM,23380
51
52
  mcp_agent/llm/augmented_llm_passthrough.py,sha256=zHcctNpwg4EFJvD1x9Eg443SVX-uyzFphLikwF_yVE0,6288
52
53
  mcp_agent/llm/augmented_llm_playback.py,sha256=6L_RWIK__R67oZK7u3Xt3hWy1T2LnHXIO-efqgP3tPw,4177
53
54
  mcp_agent/llm/memory.py,sha256=HQ_c1QemOUjrkY6Z2omE6BG5fXga7y4jN7KCMOuGjPs,3345
@@ -59,11 +60,11 @@ mcp_agent/llm/sampling_converter.py,sha256=C7wPBlmT0eD90XWabC22zkxsrVHKCrjwIwg6c
59
60
  mcp_agent/llm/sampling_format_converter.py,sha256=xGz4odHpOcP7--eFaJaFtUR8eR9jxZS7MnLH6J7n0EU,1263
60
61
  mcp_agent/llm/providers/__init__.py,sha256=heVxtmuqFJOnjjxHz4bWSqTAxXoN1E8twC_gQ_yJpHk,265
61
62
  mcp_agent/llm/providers/anthropic_utils.py,sha256=vYDN5G5jKMhD2CQg8veJYab7tvvzYkDMq8M1g_hUAQg,3275
62
- mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=GuqBJvkaM8SQ3NYBnMlKrV56dACLafhKec-Hl84qOi8,15344
63
- mcp_agent/llm/providers/augmented_llm_deepseek.py,sha256=MHsCffnrXpiWdUCpVTUblILVBTQtBnbX7atwny0X2N8,1210
63
+ mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=poouQMsDoZSH-5a_TL2Z2EFPSqFlsAgXuKuYcqge-Gg,15468
64
+ mcp_agent/llm/providers/augmented_llm_deepseek.py,sha256=NiZK5nv91ZS2VgVFXpbsFNFYLsLcppcbo_RstlRMd7I,1145
64
65
  mcp_agent/llm/providers/augmented_llm_generic.py,sha256=5Uq8ZBhcFuQTt7koP_5ykolREh2iWu8zKhNbh3pM9lQ,1210
65
66
  mcp_agent/llm/providers/augmented_llm_google.py,sha256=N0a2fphVtkvNYxKQpEX6J4tlO1C_mRw4sw3LBXnrOeI,1130
66
- mcp_agent/llm/providers/augmented_llm_openai.py,sha256=YDumEOjuhvAcqOOdw6xsy902lyC30iqgl1V2lS6PfTA,13911
67
+ mcp_agent/llm/providers/augmented_llm_openai.py,sha256=XFFoIMmXCoigC98zrR0_1c7DsyS4ep7hLvklmdn4jqU,14085
67
68
  mcp_agent/llm/providers/augmented_llm_openrouter.py,sha256=V_TlVKm92GHBxYIo6gpvH_6cAaIdppS25Tz6x5T7LW0,2341
68
69
  mcp_agent/llm/providers/multipart_converter_anthropic.py,sha256=t5lHYGfFUacJldnrVtMNW-8gEMoto8Y7hJkDrnyZR-Y,16650
69
70
  mcp_agent/llm/providers/multipart_converter_openai.py,sha256=XPIulWntNpZWNGWrc240StPzok2RqrDAV7OigDwQ1uU,15850
@@ -77,15 +78,14 @@ mcp_agent/logging/json_serializer.py,sha256=qkfxnR9ka6OgvwSpM2CggELbEtzzkApm0s_K
77
78
  mcp_agent/logging/listeners.py,sha256=_S4Jp5_KWp0kUfrx4BxDdNCeQK3MNT3Zi9AaolPri7A,6648
78
79
  mcp_agent/logging/logger.py,sha256=l02OGX_c5FOyH0rspd4ZvnkJcbb0FahhUhlh2KI8mqE,10724
79
80
  mcp_agent/logging/rich_progress.py,sha256=oY9fjb4Tyw6887v8sgO6EGIK4lnmIoR3NNxhA_-Ln_M,4893
80
- mcp_agent/logging/tracing.py,sha256=d5lSXakzzi5PtQpUkVkOnYaGX8NduGPq__S7vx-Ln8U,5187
81
81
  mcp_agent/logging/transport.py,sha256=m8YsLLu5T8eof_ndpLQs4gHOzqqEL98xsVwBwDsBfxI,17335
82
82
  mcp_agent/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
83
83
  mcp_agent/mcp/gen_client.py,sha256=fAVwFVCgSamw4PwoWOV4wrK9TABx1S_zZv8BctRyF2k,3030
84
84
  mcp_agent/mcp/interfaces.py,sha256=PAou8znAl2HgtvfCpLQOZFbKra9F72OcVRfBJbboNX8,6965
85
85
  mcp_agent/mcp/logger_textio.py,sha256=vljC1BtNTCxBAda9ExqNB-FwVNUZIuJT3h1nWmCjMws,3172
86
86
  mcp_agent/mcp/mcp_agent_client_session.py,sha256=RMYNltc2pDIzxwEJSS5589RbvPO0KWV4Y3jSyAmhKf0,4181
87
- mcp_agent/mcp/mcp_aggregator.py,sha256=YVLqcv5dS5Z8yEpQggru69NzqQBFKpXG8_CG0XAN3qk,40219
88
- mcp_agent/mcp/mcp_connection_manager.py,sha256=FGFF3DruVcHD_8J-VadrRyyrOiiq-N9-_ZzIdx4NUOA,13973
87
+ mcp_agent/mcp/mcp_aggregator.py,sha256=RjRcYHMKt5Wn85JWVar6X0hZLYtBeHrctiBBAK5AYcc,40584
88
+ mcp_agent/mcp/mcp_connection_manager.py,sha256=R_oGvFkolZJ_i3SizIIlKS_NPjXscsWLSOf1x9Zu0dM,14008
89
89
  mcp_agent/mcp/mime_utils.py,sha256=difepNR_gpb4MpMLkBRAoyhDk-AjXUHTiqKvT_VwS1o,1805
90
90
  mcp_agent/mcp/prompt_message_multipart.py,sha256=BDwRdNwyWHb2q2bccDb2iR2VlORqVvkvoG3xYzcMpCE,4403
91
91
  mcp_agent/mcp/prompt_render.py,sha256=k3v4BZDThGE2gGiOYVQtA6x8WTEdOuXIEnRafANhN1U,2996
@@ -143,8 +143,8 @@ mcp_agent/resources/examples/workflows/parallel.py,sha256=DQ5vY5-h8Qa5QHcYjsWXhZ
143
143
  mcp_agent/resources/examples/workflows/router.py,sha256=E4x_-c3l4YW9w1i4ARcDtkdeqIdbWEGfsMzwLYpdbVc,1677
144
144
  mcp_agent/resources/examples/workflows/short_story.txt,sha256=X3y_1AyhLFN2AKzCKvucJtDgAFIJfnlbsbGZO5bBWu0,1187
145
145
  mcp_agent/ui/console_display.py,sha256=TVGDtJ37hc6UG0ei9g7ZPZZfFNeS1MYozt-Mx8HsPCk,9752
146
- fast_agent_mcp-0.2.17.dist-info/METADATA,sha256=qH-sDtu59orBDwzB0M73kx9CZUfey-5g2izt3Ewjz0A,29893
147
- fast_agent_mcp-0.2.17.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
148
- fast_agent_mcp-0.2.17.dist-info/entry_points.txt,sha256=bRniFM5zk3Kix5z7scX0gf9VnmGQ2Cz_Q1Gh7Ir4W00,186
149
- fast_agent_mcp-0.2.17.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
150
- fast_agent_mcp-0.2.17.dist-info/RECORD,,
146
+ fast_agent_mcp-0.2.19.dist-info/METADATA,sha256=slU0l8N_MAEEmGn3bla4jeHibkCq3DGTtngvii6xrro,30142
147
+ fast_agent_mcp-0.2.19.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
148
+ fast_agent_mcp-0.2.19.dist-info/entry_points.txt,sha256=bRniFM5zk3Kix5z7scX0gf9VnmGQ2Cz_Q1Gh7Ir4W00,186
149
+ fast_agent_mcp-0.2.19.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
150
+ fast_agent_mcp-0.2.19.dist-info/RECORD,,
@@ -31,6 +31,7 @@ from mcp.types import (
31
31
  TextContent,
32
32
  Tool,
33
33
  )
34
+ from opentelemetry import trace
34
35
  from pydantic import BaseModel
35
36
 
36
37
  from mcp_agent.core.agent_types import AgentConfig, AgentType
@@ -92,6 +93,7 @@ class BaseAgent(MCPAggregator, AgentProtocol):
92
93
  )
93
94
 
94
95
  self._context = context
96
+ self.tracer = trace.get_tracer(__name__)
95
97
  self.name = self.config.name
96
98
  self.instruction = self.config.instruction
97
99
  self.functions = functions or []
@@ -588,7 +590,8 @@ class BaseAgent(MCPAggregator, AgentProtocol):
588
590
  The LLM's response as a PromptMessageMultipart
589
591
  """
590
592
  assert self._llm
591
- return await self._llm.generate(multipart_messages, request_params)
593
+ with self.tracer.start_as_current_span(f"Agent: '{self.name}' generate"):
594
+ return await self._llm.generate(multipart_messages, request_params)
592
595
 
593
596
  async def structured(
594
597
  self,
@@ -609,7 +612,8 @@ class BaseAgent(MCPAggregator, AgentProtocol):
609
612
  An instance of the specified model, or None if coercion fails
610
613
  """
611
614
  assert self._llm
612
- return await self._llm.structured(multipart_messages, model, request_params)
615
+ with self.tracer.start_as_current_span(f"Agent: '{self.name}' structured"):
616
+ return await self._llm.structured(multipart_messages, model, request_params)
613
617
 
614
618
  async def apply_prompt_messages(
615
619
  self, prompts: List[PromptMessageMultipart], request_params: RequestParams | None = None
@@ -2,6 +2,7 @@ import asyncio
2
2
  from typing import Any, List, Optional, Tuple
3
3
 
4
4
  from mcp.types import TextContent
5
+ from opentelemetry import trace
5
6
 
6
7
  from mcp_agent.agents.agent import Agent
7
8
  from mcp_agent.agents.base_agent import BaseAgent
@@ -18,7 +19,7 @@ class ParallelAgent(BaseAgent):
18
19
  This workflow performs both the fan-out and fan-in operations using LLMs.
19
20
  From the user's perspective, an input is specified and the output is returned.
20
21
  """
21
-
22
+
22
23
  @property
23
24
  def agent_type(self) -> AgentType:
24
25
  """Return the type of this agent."""
@@ -62,31 +63,37 @@ class ParallelAgent(BaseAgent):
62
63
  Returns:
63
64
  The aggregated response from the fan-in agent
64
65
  """
65
- # Execute all fan-out agents in parallel
66
- responses: List[PromptMessageMultipart] = await asyncio.gather(
67
- *[agent.generate(multipart_messages, request_params) for agent in self.fan_out_agents]
68
- )
69
66
 
70
- # Extract the received message from the input
71
- received_message: Optional[str] = (
72
- multipart_messages[-1].all_text() if multipart_messages else None
73
- )
67
+ tracer = trace.get_tracer(__name__)
68
+ with tracer.start_as_current_span(f"Parallel: '{self.name}' generate"):
69
+ # Execute all fan-out agents in parallel
70
+ responses: List[PromptMessageMultipart] = await asyncio.gather(
71
+ *[
72
+ agent.generate(multipart_messages, request_params)
73
+ for agent in self.fan_out_agents
74
+ ]
75
+ )
74
76
 
75
- # Convert responses to strings for aggregation
76
- string_responses = []
77
- for response in responses:
78
- string_responses.append(response.all_text())
77
+ # Extract the received message from the input
78
+ received_message: Optional[str] = (
79
+ multipart_messages[-1].all_text() if multipart_messages else None
80
+ )
79
81
 
80
- # Format the responses and send to the fan-in agent
81
- aggregated_prompt = self._format_responses(string_responses, received_message)
82
+ # Convert responses to strings for aggregation
83
+ string_responses = []
84
+ for response in responses:
85
+ string_responses.append(response.all_text())
82
86
 
83
- # Create a new multipart message with the formatted responses
84
- formatted_prompt = PromptMessageMultipart(
85
- role="user", content=[TextContent(type="text", text=aggregated_prompt)]
86
- )
87
+ # Format the responses and send to the fan-in agent
88
+ aggregated_prompt = self._format_responses(string_responses, received_message)
89
+
90
+ # Create a new multipart message with the formatted responses
91
+ formatted_prompt = PromptMessageMultipart(
92
+ role="user", content=[TextContent(type="text", text=aggregated_prompt)]
93
+ )
87
94
 
88
- # Use the fan-in agent to aggregate the responses
89
- return await self.fan_in_agent.generate([formatted_prompt], request_params)
95
+ # Use the fan-in agent to aggregate the responses
96
+ return await self.fan_in_agent.generate([formatted_prompt], request_params)
90
97
 
91
98
  def _format_responses(self, responses: List[Any], message: Optional[str] = None) -> str:
92
99
  """
@@ -116,7 +123,7 @@ class ParallelAgent(BaseAgent):
116
123
 
117
124
  async def structured(
118
125
  self,
119
- prompt: List[PromptMessageMultipart],
126
+ multipart_messages: List[PromptMessageMultipart],
120
127
  model: type[ModelT],
121
128
  request_params: Optional[RequestParams] = None,
122
129
  ) -> Tuple[ModelT | None, PromptMessageMultipart]:
@@ -133,27 +140,35 @@ class ParallelAgent(BaseAgent):
133
140
  Returns:
134
141
  An instance of the specified model, or None if coercion fails
135
142
  """
136
- # Generate parallel responses first
137
- responses: List[PromptMessageMultipart] = await asyncio.gather(
138
- *[agent.generate(prompt, request_params) for agent in self.fan_out_agents]
139
- )
140
143
 
141
- # Extract the received message
142
- received_message: Optional[str] = prompt[-1].all_text() if prompt else None
144
+ tracer = trace.get_tracer(__name__)
145
+ with tracer.start_as_current_span(f"Parallel: '{self.name}' generate"):
146
+ # Generate parallel responses first
147
+ responses: List[PromptMessageMultipart] = await asyncio.gather(
148
+ *[
149
+ agent.generate(multipart_messages, request_params)
150
+ for agent in self.fan_out_agents
151
+ ]
152
+ )
143
153
 
144
- # Convert responses to strings
145
- string_responses = [response.all_text() for response in responses]
154
+ # Extract the received message
155
+ received_message: Optional[str] = (
156
+ multipart_messages[-1].all_text() if multipart_messages else None
157
+ )
146
158
 
147
- # Format the responses for the fan-in agent
148
- aggregated_prompt = self._format_responses(string_responses, received_message)
159
+ # Convert responses to strings
160
+ string_responses = [response.all_text() for response in responses]
149
161
 
150
- # Create a multipart message
151
- formatted_prompt = PromptMessageMultipart(
152
- role="user", content=[TextContent(type="text", text=aggregated_prompt)]
153
- )
162
+ # Format the responses for the fan-in agent
163
+ aggregated_prompt = self._format_responses(string_responses, received_message)
164
+
165
+ # Create a multipart message
166
+ formatted_prompt = PromptMessageMultipart(
167
+ role="user", content=[TextContent(type="text", text=aggregated_prompt)]
168
+ )
154
169
 
155
- # Use the fan-in agent to parse the structured output
156
- return await self.fan_in_agent.structured([formatted_prompt], model, request_params)
170
+ # Use the fan-in agent to parse the structured output
171
+ return await self.fan_in_agent.structured([formatted_prompt], model, request_params)
157
172
 
158
173
  async def initialize(self) -> None:
159
174
  """
@@ -7,6 +7,7 @@ by determining the best agent for a request and dispatching to it.
7
7
 
8
8
  from typing import TYPE_CHECKING, Callable, List, Optional, Tuple, Type
9
9
 
10
+ from opentelemetry import trace
10
11
  from pydantic import BaseModel
11
12
 
12
13
  from mcp_agent.agents.agent import Agent
@@ -158,17 +159,18 @@ class RouterAgent(BaseAgent):
158
159
  Returns:
159
160
  The response from the selected agent
160
161
  """
162
+ tracer = trace.get_tracer(__name__)
163
+ with tracer.start_as_current_span(f"Routing: '{self.name}' generate"):
164
+ route, warn = await self._route_request(multipart_messages[-1])
161
165
 
162
- route, warn = await self._route_request(multipart_messages[-1])
166
+ if not route:
167
+ return Prompt.assistant(warn or "No routing result or warning received")
163
168
 
164
- if not route:
165
- return Prompt.assistant(warn or "No routing result or warning received")
169
+ # Get the selected agent
170
+ agent: Agent = self.agent_map[route.agent]
166
171
 
167
- # Get the selected agent
168
- agent: Agent = self.agent_map[route.agent]
169
-
170
- # Dispatch the request to the selected agent
171
- return await agent.generate(multipart_messages, request_params)
172
+ # Dispatch the request to the selected agent
173
+ return await agent.generate(multipart_messages, request_params)
172
174
 
173
175
  async def structured(
174
176
  self,
@@ -187,18 +189,21 @@ class RouterAgent(BaseAgent):
187
189
  Returns:
188
190
  The parsed response from the selected agent, or None if parsing fails
189
191
  """
190
- route, warn = await self._route_request(multipart_messages[-1])
191
192
 
192
- if not route:
193
- return None, Prompt.assistant(
194
- warn or "No routing result or warning received (structured)"
195
- )
193
+ tracer = trace.get_tracer(__name__)
194
+ with tracer.start_as_current_span(f"Routing: '{self.name}' structured"):
195
+ route, warn = await self._route_request(multipart_messages[-1])
196
+
197
+ if not route:
198
+ return None, Prompt.assistant(
199
+ warn or "No routing result or warning received (structured)"
200
+ )
196
201
 
197
- # Get the selected agent
198
- agent: Agent = self.agent_map[route.agent]
202
+ # Get the selected agent
203
+ agent: Agent = self.agent_map[route.agent]
199
204
 
200
- # Dispatch the request to the selected agent
201
- return await agent.structured(multipart_messages, model, request_params)
205
+ # Dispatch the request to the selected agent
206
+ return await agent.structured(multipart_messages, model, request_params)
202
207
 
203
208
  async def _route_request(
204
209
  self, message: PromptMessageMultipart
@@ -0,0 +1,133 @@
1
+ """Run an interactive agent directly from the command line."""
2
+
3
+ import asyncio
4
+ import sys
5
+ from typing import List, Optional
6
+
7
+ import typer
8
+
9
+ from mcp_agent.core.fastagent import FastAgent
10
+
11
+ app = typer.Typer(
12
+ help="Run an interactive agent directly from the command line without creating an agent.py file"
13
+ )
14
+
15
+ async def _run_agent(
16
+ name: str = "FastAgent CLI",
17
+ instruction: str = "You are a helpful AI Agent.",
18
+ config_path: Optional[str] = None,
19
+ server_list: Optional[List[str]] = None,
20
+ model: Optional[str] = None,
21
+ ) -> None:
22
+ """Async implementation to run an interactive agent."""
23
+
24
+ # Create the FastAgent instance with CLI arg parsing enabled
25
+ # It will automatically parse args like --model, --quiet, etc.
26
+ fast_kwargs = {
27
+ "name": name,
28
+ "config_path": config_path,
29
+ "ignore_unknown_args": True,
30
+ }
31
+
32
+ fast = FastAgent(**fast_kwargs)
33
+
34
+ # Define the agent with specified parameters
35
+ agent_kwargs = {"instruction": instruction}
36
+ if server_list:
37
+ agent_kwargs["servers"] = server_list
38
+ if model:
39
+ agent_kwargs["model"] = model
40
+
41
+ @fast.agent(**agent_kwargs)
42
+ async def cli_agent():
43
+ async with fast.run() as agent:
44
+ await agent.interactive()
45
+
46
+ # Run the agent
47
+ await cli_agent()
48
+
49
+ def run_async_agent(
50
+ name: str,
51
+ instruction: str,
52
+ config_path: Optional[str] = None,
53
+ servers: Optional[str] = None,
54
+ model: Optional[str] = None
55
+ ):
56
+ """Run the async agent function with proper loop handling."""
57
+ server_list = servers.split(',') if servers else None
58
+
59
+ # Check if we're already in an event loop
60
+ try:
61
+ loop = asyncio.get_event_loop()
62
+ if loop.is_running():
63
+ # We're inside a running event loop, so we can't use asyncio.run
64
+ # Instead, create a new loop
65
+ loop = asyncio.new_event_loop()
66
+ asyncio.set_event_loop(loop)
67
+ except RuntimeError:
68
+ # No event loop exists, so we'll create one
69
+ loop = asyncio.new_event_loop()
70
+ asyncio.set_event_loop(loop)
71
+
72
+ try:
73
+ loop.run_until_complete(_run_agent(
74
+ name=name,
75
+ instruction=instruction,
76
+ config_path=config_path,
77
+ server_list=server_list,
78
+ model=model
79
+ ))
80
+ finally:
81
+ try:
82
+ # Clean up the loop
83
+ tasks = asyncio.all_tasks(loop)
84
+ for task in tasks:
85
+ task.cancel()
86
+
87
+ # Run the event loop until all tasks are done
88
+ if sys.version_info >= (3, 7):
89
+ loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
90
+ loop.run_until_complete(loop.shutdown_asyncgens())
91
+ loop.close()
92
+ except Exception:
93
+ pass
94
+
95
+ @app.callback(invoke_without_command=True)
96
+ def go(
97
+ ctx: typer.Context,
98
+ name: str = typer.Option("FastAgent CLI", "--name", help="Name for the agent"),
99
+ instruction: str = typer.Option(
100
+ "You are a helpful AI Agent.", "--instruction", "-i", help="Instruction for the agent"
101
+ ),
102
+ config_path: Optional[str] = typer.Option(
103
+ None, "--config-path", "-c", help="Path to config file"
104
+ ),
105
+ servers: Optional[str] = typer.Option(
106
+ None, "--servers", help="Comma-separated list of server names to enable from config"
107
+ ),
108
+ model: Optional[str] = typer.Option(
109
+ None, "--model", help="Override the default model (e.g., haiku, sonnet, gpt-4)"
110
+ ),
111
+ ) -> None:
112
+ """
113
+ Run an interactive agent directly from the command line.
114
+
115
+ Example:
116
+ fast-agent go --model=haiku --instruction="You are a coding assistant" --servers=fetch,filesystem
117
+
118
+ This will start an interactive session with the agent, using the specified model
119
+ and instruction. It will use the default configuration from fastagent.config.yaml
120
+ unless --config-path is specified.
121
+
122
+ Common options:
123
+ --model: Override the default model (e.g., --model=haiku)
124
+ --quiet: Disable progress display and logging
125
+ --servers: Comma-separated list of server names to enable from config
126
+ """
127
+ run_async_agent(
128
+ name=name,
129
+ instruction=instruction,
130
+ config_path=config_path,
131
+ servers=servers,
132
+ model=model
133
+ )
@@ -221,7 +221,7 @@ def init(
221
221
  if "fastagent.secrets.yaml" in created:
222
222
  console.print("\n[yellow]Important:[/yellow] Remember to:")
223
223
  console.print(
224
- "1. Add your API keys to fastagent.secrets.yaml or set OPENAI_API_KEY and ANTHROPIC_API_KEY environment variables"
224
+ "1. Add your API keys to fastagent.secrets.yaml, or set environment variables. Use [cyan]fast-agent check[/cyan] to verify."
225
225
  )
226
226
  console.print(
227
227
  "2. Keep fastagent.secrets.yaml secure and never commit it to version control"
mcp_agent/cli/main.py CHANGED
@@ -4,7 +4,7 @@ import typer
4
4
  from rich.console import Console
5
5
  from rich.table import Table
6
6
 
7
- from mcp_agent.cli.commands import check_config, quickstart, setup
7
+ from mcp_agent.cli.commands import check_config, go, quickstart, setup
8
8
  from mcp_agent.cli.terminal import Application
9
9
 
10
10
  app = typer.Typer(
@@ -13,6 +13,7 @@ app = typer.Typer(
13
13
  )
14
14
 
15
15
  # Subcommands
16
+ app.add_typer(go.app, name="go", help="Run an interactive agent directly from the command line")
16
17
  app.add_typer(setup.app, name="setup", help="Set up a new agent project")
17
18
  app.add_typer(check_config.app, name="check", help="Show or diagnose fast-agent configuration")
18
19
  app.add_typer(quickstart.app, name="bootstrap", help="Create example applications")
@@ -39,14 +40,15 @@ def show_welcome() -> None:
39
40
  table.add_column("Command", style="green")
40
41
  table.add_column("Description")
41
42
 
42
- table.add_row("setup", "Create a new agent and configuration files")
43
+ table.add_row("[bold]go[/bold]", "Start an interactive session with an agent")
44
+ table.add_row("setup", "Create a new agent template and configuration files")
43
45
  table.add_row("check", "Show or diagnose fast-agent configuration")
44
46
  table.add_row("quickstart", "Create example applications (workflow, researcher, etc.)")
45
47
 
46
48
  console.print(table)
47
49
 
48
50
  console.print(
49
- "\n[italic]get started with:[/italic] [cyan]fast-agent[/cyan] [green]setup[/green]"
51
+ "\n[italic]get started with:[/italic] [bold][cyan]fast-agent[/cyan][/bold] [green]setup[/green]"
50
52
  )
51
53
 
52
54
 
mcp_agent/config.py CHANGED
@@ -181,13 +181,11 @@ class OpenTelemetrySettings(BaseModel):
181
181
  OTEL settings for the fast-agent application.
182
182
  """
183
183
 
184
- enabled: bool = True
184
+ enabled: bool = False
185
185
 
186
186
  service_name: str = "fast-agent"
187
- service_instance_id: str | None = None
188
- service_version: str | None = None
189
187
 
190
- otlp_endpoint: str | None = None
188
+ otlp_endpoint: str = "http://localhost:4318/v1/traces"
191
189
  """OTLP endpoint for OpenTelemetry tracing"""
192
190
 
193
191
  console_debug: bool = False