fast-agent-mcp 0.2.18__py3-none-any.whl → 0.2.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.2.18.dist-info → fast_agent_mcp-0.2.20.dist-info}/METADATA +15 -15
- {fast_agent_mcp-0.2.18.dist-info → fast_agent_mcp-0.2.20.dist-info}/RECORD +20 -21
- mcp_agent/__init__.py +1 -2
- mcp_agent/agents/base_agent.py +6 -2
- mcp_agent/agents/workflow/parallel_agent.py +53 -38
- mcp_agent/agents/workflow/router_agent.py +22 -17
- mcp_agent/config.py +5 -4
- mcp_agent/context.py +15 -11
- mcp_agent/core/fastagent.py +248 -217
- mcp_agent/executor/executor.py +8 -9
- mcp_agent/llm/augmented_llm.py +37 -3
- mcp_agent/llm/providers/augmented_llm_anthropic.py +1 -1
- mcp_agent/llm/providers/augmented_llm_openai.py +5 -2
- mcp_agent/mcp/mcp_aggregator.py +114 -119
- mcp_agent/mcp/mcp_connection_manager.py +2 -1
- mcp_agent/mcp_server/agent_server.py +4 -1
- mcp_agent/mcp_server_registry.py +1 -0
- mcp_agent/logging/tracing.py +0 -138
- {fast_agent_mcp-0.2.18.dist-info → fast_agent_mcp-0.2.20.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.2.18.dist-info → fast_agent_mcp-0.2.20.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.2.18.dist-info → fast_agent_mcp-0.2.20.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: fast-agent-mcp
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.20
|
4
4
|
Summary: Define, Prompt and Test MCP enabled Agents and Workflows
|
5
5
|
Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
|
6
6
|
License: Apache License
|
@@ -214,16 +214,16 @@ Requires-Dist: aiohttp>=3.11.13
|
|
214
214
|
Requires-Dist: anthropic>=0.49.0
|
215
215
|
Requires-Dist: fastapi>=0.115.6
|
216
216
|
Requires-Dist: mcp==1.6.0
|
217
|
-
Requires-Dist: numpy>=2.2.1
|
218
217
|
Requires-Dist: openai>=1.63.2
|
219
218
|
Requires-Dist: opentelemetry-distro>=0.50b0
|
220
219
|
Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.29.0
|
220
|
+
Requires-Dist: opentelemetry-instrumentation-anthropic>=0.39.3
|
221
|
+
Requires-Dist: opentelemetry-instrumentation-openai>=0.39.3
|
221
222
|
Requires-Dist: prompt-toolkit>=3.0.50
|
222
223
|
Requires-Dist: pydantic-settings>=2.7.0
|
223
224
|
Requires-Dist: pydantic>=2.10.4
|
224
225
|
Requires-Dist: pyyaml>=6.0.2
|
225
226
|
Requires-Dist: rich>=13.9.4
|
226
|
-
Requires-Dist: scikit-learn>=1.6.0
|
227
227
|
Requires-Dist: typer>=0.15.1
|
228
228
|
Provides-Extra: dev
|
229
229
|
Requires-Dist: anthropic>=0.42.0; extra == 'dev'
|
@@ -251,7 +251,7 @@ Description-Content-Type: text/markdown
|
|
251
251
|
## Overview
|
252
252
|
|
253
253
|
> [!TIP]
|
254
|
-
> Documentation site is in production here : https://fast-agent.ai. Feel free to feed back what's helpful and what's not.
|
254
|
+
> Documentation site is in production here : https://fast-agent.ai. Feel free to feed back what's helpful and what's not. There is also an LLMs.txt [here](https://fast-agent.ai/llms.txt)
|
255
255
|
|
256
256
|
**`fast-agent`** enables you to create and interact with sophisticated Agents and Workflows in minutes. It is the first framework with complete, end-to-end tested MCP Feature support including Sampling. Both Anthropic (Haiku, Sonnet, Opus) and OpenAI models (gpt-4o/gpt-4.1 family, o1/o3 family) are supported.
|
257
257
|
|
@@ -259,8 +259,8 @@ The simple declarative syntax lets you concentrate on composing your Prompts and
|
|
259
259
|
|
260
260
|
`fast-agent` is multi-modal, supporting Images and PDFs for both Anthropic and OpenAI endpoints via Prompts, Resources and MCP Tool Call results. The inclusion of passthrough and playback LLMs enable rapid development and test of Python glue-code for your applications.
|
261
261
|
|
262
|
-
> [!
|
263
|
-
> `fast-agent`
|
262
|
+
> [!IMPORTANT]
|
263
|
+
> `fast-agent` The fast-agent documentation repo is here: https://github.com/evalstate/fast-agent-docs. Please feel free to submit PRs for documentation, experience reports or other content you think others may find helpful. All help and feedback warmly received.
|
264
264
|
|
265
265
|
### Agent Application Development
|
266
266
|
|
@@ -450,10 +450,10 @@ If the Generator has `use_history` off, the previous iteration is returned when
|
|
450
450
|
|
451
451
|
```python
|
452
452
|
@fast.evaluator_optimizer(
|
453
|
-
name="researcher"
|
454
|
-
generator="web_searcher"
|
455
|
-
evaluator="quality_assurance"
|
456
|
-
min_rating="EXCELLENT"
|
453
|
+
name="researcher",
|
454
|
+
generator="web_searcher",
|
455
|
+
evaluator="quality_assurance",
|
456
|
+
min_rating="EXCELLENT",
|
457
457
|
max_refinements=3
|
458
458
|
)
|
459
459
|
|
@@ -471,8 +471,8 @@ Routers use an LLM to assess a message, and route it to the most appropriate Age
|
|
471
471
|
|
472
472
|
```python
|
473
473
|
@fast.router(
|
474
|
-
name="route"
|
475
|
-
agents["agent1","agent2","agent3"]
|
474
|
+
name="route",
|
475
|
+
agents=["agent1","agent2","agent3"]
|
476
476
|
)
|
477
477
|
```
|
478
478
|
|
@@ -484,7 +484,7 @@ Given a complex task, the Orchestrator uses an LLM to generate a plan to divide
|
|
484
484
|
|
485
485
|
```python
|
486
486
|
@fast.orchestrator(
|
487
|
-
name="orchestrate"
|
487
|
+
name="orchestrate",
|
488
488
|
agents=["task1","task2","task3"]
|
489
489
|
)
|
490
490
|
```
|
@@ -524,7 +524,7 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
|
|
524
524
|
servers=["filesystem"], # list of MCP Servers for the agent
|
525
525
|
model="o3-mini.high", # specify a model for the agent
|
526
526
|
use_history=True, # agent maintains chat history
|
527
|
-
request_params=RequestParams(temperature= 0.7)
|
527
|
+
request_params=RequestParams(temperature= 0.7), # additional parameters for the LLM (or RequestParams())
|
528
528
|
human_input=True, # agent can request human input
|
529
529
|
)
|
530
530
|
```
|
@@ -536,7 +536,7 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
|
|
536
536
|
name="chain", # name of the chain
|
537
537
|
sequence=["agent1", "agent2", ...], # list of agents in execution order
|
538
538
|
instruction="instruction", # instruction to describe the chain for other workflows
|
539
|
-
cumulative=False
|
539
|
+
cumulative=False, # whether to accumulate messages through the chain
|
540
540
|
continue_with_final=True, # open chat with agent at end of chain after prompting
|
541
541
|
)
|
542
542
|
```
|
@@ -1,23 +1,23 @@
|
|
1
|
-
mcp_agent/__init__.py,sha256
|
1
|
+
mcp_agent/__init__.py,sha256=18T0AG0W9sJhTY38O9GFFOzliDhxx9p87CvRyti9zbw,1620
|
2
2
|
mcp_agent/app.py,sha256=WRsiUdwy_9IAnaGRDwuLm7pzgQpt2wgsg10vBOpfcwM,5539
|
3
|
-
mcp_agent/config.py,sha256=
|
3
|
+
mcp_agent/config.py,sha256=_b5JeS2nWHScSUUTu6wYxXzdfKefoqII305ecKcw7Gs,12248
|
4
4
|
mcp_agent/console.py,sha256=Gjf2QLFumwG1Lav__c07X_kZxxEUSkzV-1_-YbAwcwo,813
|
5
|
-
mcp_agent/context.py,sha256=
|
5
|
+
mcp_agent/context.py,sha256=Kb3s_0MolHx7AeTs1NVcY3ly-xFBd35o8LT7Srpx9is,7334
|
6
6
|
mcp_agent/context_dependent.py,sha256=QXfhw3RaQCKfscEEBRGuZ3sdMWqkgShz2jJ1ivGGX1I,1455
|
7
7
|
mcp_agent/event_progress.py,sha256=3dqk5Pn1tAG_m_wn4IPNwLWLyzm7CyKIidqHN-4l-JY,2736
|
8
|
-
mcp_agent/mcp_server_registry.py,sha256=
|
8
|
+
mcp_agent/mcp_server_registry.py,sha256=jUmCdfcpTitXm1-3TxpWsdRWY_8phdKNYgXwB16ZSVU,10100
|
9
9
|
mcp_agent/progress_display.py,sha256=GeJU9VUt6qKsFVymG688hCMVCsAygG9ifiiEb5IcbN4,361
|
10
10
|
mcp_agent/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
11
11
|
mcp_agent/agents/agent.py,sha256=GgaUHoilgqzh9PQYr5k2WiPj4pagwicf9-ZLFsHkNNo,3848
|
12
|
-
mcp_agent/agents/base_agent.py,sha256=
|
12
|
+
mcp_agent/agents/base_agent.py,sha256=fjDr01-hZ9sB3ghI4DlXYVePP0s5f9pmtLH-N3X8bRg,25294
|
13
13
|
mcp_agent/agents/workflow/__init__.py,sha256=HloteEW6kalvgR0XewpiFAqaQlMPlPJYg5p3K33IUzI,25
|
14
14
|
mcp_agent/agents/workflow/chain_agent.py,sha256=eIlImirrSXkqBJmPuAJgOKis81Cl6lZEGM0-6IyaUV8,6105
|
15
15
|
mcp_agent/agents/workflow/evaluator_optimizer.py,sha256=ysUMGM2NzeCIutgr_vXH6kUPpZMw0cX4J_Wl1r8eT84,13296
|
16
16
|
mcp_agent/agents/workflow/orchestrator_agent.py,sha256=byZe4bx7D_7BSZZ3hN8BNUWVFPYeqeUwDUCLTRC8mlI,21583
|
17
17
|
mcp_agent/agents/workflow/orchestrator_models.py,sha256=5P_aXADVT4Et8qT4e1cb9RelmHX5dCRrzu8j8T41Kdg,7230
|
18
18
|
mcp_agent/agents/workflow/orchestrator_prompts.py,sha256=EXKEI174sshkZyPPEnWbwwNafzSPuA39MXL7iqG9cWc,9106
|
19
|
-
mcp_agent/agents/workflow/parallel_agent.py,sha256=
|
20
|
-
mcp_agent/agents/workflow/router_agent.py,sha256=
|
19
|
+
mcp_agent/agents/workflow/parallel_agent.py,sha256=JaQFp35nmAdoBRLAwx8BfnK7kirVq9PMw24LQ3ZEzoc,7705
|
20
|
+
mcp_agent/agents/workflow/router_agent.py,sha256=yZUUhAmni2wAutQJ32EQ264Uh_MIriVP-AmhKFrYTeQ,9472
|
21
21
|
mcp_agent/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
22
22
|
mcp_agent/cli/__main__.py,sha256=AVZ7tQFhU_sDOGuUGJq8ujgKtcxsYJBJwHbVaaiRDlI,166
|
23
23
|
mcp_agent/cli/main.py,sha256=XjrgXMBaPKkVqAFo8T9LJz6Tp1-ivrKDOuNYWke99YA,3090
|
@@ -34,21 +34,21 @@ mcp_agent/core/direct_factory.py,sha256=d96OM1yS3eIocIiaA9FQt6C2zr6VDUyCJBTZCp_D
|
|
34
34
|
mcp_agent/core/enhanced_prompt.py,sha256=bzvcengS7XzHWB7NWhyxHM3hhO2HI4zP5DbGXAOw0Jw,19155
|
35
35
|
mcp_agent/core/error_handling.py,sha256=xoyS2kLe0eG0bj2eSJCJ2odIhGUve2SbDR7jP-A-uRw,624
|
36
36
|
mcp_agent/core/exceptions.py,sha256=ENAD_qGG67foxy6vDkIvc-lgopIUQy6O7zvNPpPXaQg,2289
|
37
|
-
mcp_agent/core/fastagent.py,sha256=
|
37
|
+
mcp_agent/core/fastagent.py,sha256=WEEGz2WBAddDGNeWJwqwFIPLiQnLjaNxZLoMR0peyyU,22884
|
38
38
|
mcp_agent/core/interactive_prompt.py,sha256=w3VyRzW4hzn0xhWZRwo_qRRAD5WVSrJYe8QDe1XZ55Y,24252
|
39
39
|
mcp_agent/core/mcp_content.py,sha256=2D7KHY9mG_vxoDwFLKvsPQV9VRIzHItM7V-jcEnACh8,8878
|
40
40
|
mcp_agent/core/prompt.py,sha256=qnintOUGEoDPYLI9bu9G2OlgVMCe5ZPUZilgMzydXhc,7919
|
41
41
|
mcp_agent/core/request_params.py,sha256=loYf13DN7e-DsdYRd37jWkJWJGwVBL-iFkcANP1J60Q,1366
|
42
42
|
mcp_agent/core/validation.py,sha256=RIBKFlh0GJg4rTcFQXoXp8A0sK1HpsCigKcYSK3gFaY,12090
|
43
43
|
mcp_agent/executor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
44
|
-
mcp_agent/executor/executor.py,sha256=
|
44
|
+
mcp_agent/executor/executor.py,sha256=E44p6d-o3OMRoP_dNs_cDnyti91LQ3P9eNU88mSi1kc,9462
|
45
45
|
mcp_agent/executor/task_registry.py,sha256=PCALFeYtkQrPBg4RBJnlA0aDI8nHclrNkHGUS4kV3W8,1242
|
46
46
|
mcp_agent/executor/workflow_signal.py,sha256=Cg1uZBk3fn8kXhPOg-wINNuVaf3v9pvLD6NbqWy5Z6E,11142
|
47
47
|
mcp_agent/human_input/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
48
48
|
mcp_agent/human_input/handler.py,sha256=s712Z5ssTCwjL9-VKoIdP5CtgMh43YvepynYisiWTTA,3144
|
49
49
|
mcp_agent/human_input/types.py,sha256=RtWBOVzy8vnYoQrc36jRLn8z8N3C4pDPMBN5vF6qM5Y,1476
|
50
50
|
mcp_agent/llm/__init__.py,sha256=d8zgwG-bRFuwiMNMYkywg_qytk4P8lawyld_meuUmHI,68
|
51
|
-
mcp_agent/llm/augmented_llm.py,sha256=
|
51
|
+
mcp_agent/llm/augmented_llm.py,sha256=ASe604OhrMZ9dVoGEUEpUQaY6fFamz4gL8ttzWP_9m0,24212
|
52
52
|
mcp_agent/llm/augmented_llm_passthrough.py,sha256=zHcctNpwg4EFJvD1x9Eg443SVX-uyzFphLikwF_yVE0,6288
|
53
53
|
mcp_agent/llm/augmented_llm_playback.py,sha256=6L_RWIK__R67oZK7u3Xt3hWy1T2LnHXIO-efqgP3tPw,4177
|
54
54
|
mcp_agent/llm/memory.py,sha256=HQ_c1QemOUjrkY6Z2omE6BG5fXga7y4jN7KCMOuGjPs,3345
|
@@ -60,11 +60,11 @@ mcp_agent/llm/sampling_converter.py,sha256=C7wPBlmT0eD90XWabC22zkxsrVHKCrjwIwg6c
|
|
60
60
|
mcp_agent/llm/sampling_format_converter.py,sha256=xGz4odHpOcP7--eFaJaFtUR8eR9jxZS7MnLH6J7n0EU,1263
|
61
61
|
mcp_agent/llm/providers/__init__.py,sha256=heVxtmuqFJOnjjxHz4bWSqTAxXoN1E8twC_gQ_yJpHk,265
|
62
62
|
mcp_agent/llm/providers/anthropic_utils.py,sha256=vYDN5G5jKMhD2CQg8veJYab7tvvzYkDMq8M1g_hUAQg,3275
|
63
|
-
mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=
|
63
|
+
mcp_agent/llm/providers/augmented_llm_anthropic.py,sha256=RQ4r5Q84VJ_dyuNo23b-EMzvq6RrpspzIQWtfVUfw6M,15468
|
64
64
|
mcp_agent/llm/providers/augmented_llm_deepseek.py,sha256=NiZK5nv91ZS2VgVFXpbsFNFYLsLcppcbo_RstlRMd7I,1145
|
65
65
|
mcp_agent/llm/providers/augmented_llm_generic.py,sha256=5Uq8ZBhcFuQTt7koP_5ykolREh2iWu8zKhNbh3pM9lQ,1210
|
66
66
|
mcp_agent/llm/providers/augmented_llm_google.py,sha256=N0a2fphVtkvNYxKQpEX6J4tlO1C_mRw4sw3LBXnrOeI,1130
|
67
|
-
mcp_agent/llm/providers/augmented_llm_openai.py,sha256=
|
67
|
+
mcp_agent/llm/providers/augmented_llm_openai.py,sha256=TumZs1y678IvyvYIehf8xSDqYWqC44dWrIbqFGtz03g,14085
|
68
68
|
mcp_agent/llm/providers/augmented_llm_openrouter.py,sha256=V_TlVKm92GHBxYIo6gpvH_6cAaIdppS25Tz6x5T7LW0,2341
|
69
69
|
mcp_agent/llm/providers/multipart_converter_anthropic.py,sha256=t5lHYGfFUacJldnrVtMNW-8gEMoto8Y7hJkDrnyZR-Y,16650
|
70
70
|
mcp_agent/llm/providers/multipart_converter_openai.py,sha256=XPIulWntNpZWNGWrc240StPzok2RqrDAV7OigDwQ1uU,15850
|
@@ -78,15 +78,14 @@ mcp_agent/logging/json_serializer.py,sha256=qkfxnR9ka6OgvwSpM2CggELbEtzzkApm0s_K
|
|
78
78
|
mcp_agent/logging/listeners.py,sha256=_S4Jp5_KWp0kUfrx4BxDdNCeQK3MNT3Zi9AaolPri7A,6648
|
79
79
|
mcp_agent/logging/logger.py,sha256=l02OGX_c5FOyH0rspd4ZvnkJcbb0FahhUhlh2KI8mqE,10724
|
80
80
|
mcp_agent/logging/rich_progress.py,sha256=oY9fjb4Tyw6887v8sgO6EGIK4lnmIoR3NNxhA_-Ln_M,4893
|
81
|
-
mcp_agent/logging/tracing.py,sha256=d5lSXakzzi5PtQpUkVkOnYaGX8NduGPq__S7vx-Ln8U,5187
|
82
81
|
mcp_agent/logging/transport.py,sha256=m8YsLLu5T8eof_ndpLQs4gHOzqqEL98xsVwBwDsBfxI,17335
|
83
82
|
mcp_agent/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
84
83
|
mcp_agent/mcp/gen_client.py,sha256=fAVwFVCgSamw4PwoWOV4wrK9TABx1S_zZv8BctRyF2k,3030
|
85
84
|
mcp_agent/mcp/interfaces.py,sha256=PAou8znAl2HgtvfCpLQOZFbKra9F72OcVRfBJbboNX8,6965
|
86
85
|
mcp_agent/mcp/logger_textio.py,sha256=vljC1BtNTCxBAda9ExqNB-FwVNUZIuJT3h1nWmCjMws,3172
|
87
86
|
mcp_agent/mcp/mcp_agent_client_session.py,sha256=RMYNltc2pDIzxwEJSS5589RbvPO0KWV4Y3jSyAmhKf0,4181
|
88
|
-
mcp_agent/mcp/mcp_aggregator.py,sha256=
|
89
|
-
mcp_agent/mcp/mcp_connection_manager.py,sha256=
|
87
|
+
mcp_agent/mcp/mcp_aggregator.py,sha256=c3UDWsTgHMcpHPx1p-vVru4y3eVO1jBQyLzwEMH2RHU,40237
|
88
|
+
mcp_agent/mcp/mcp_connection_manager.py,sha256=L5Dk4cyarN_v2rfktkrfZJR4xUuD3yN_hUyQnKHBWgM,14044
|
90
89
|
mcp_agent/mcp/mime_utils.py,sha256=difepNR_gpb4MpMLkBRAoyhDk-AjXUHTiqKvT_VwS1o,1805
|
91
90
|
mcp_agent/mcp/prompt_message_multipart.py,sha256=BDwRdNwyWHb2q2bccDb2iR2VlORqVvkvoG3xYzcMpCE,4403
|
92
91
|
mcp_agent/mcp/prompt_render.py,sha256=k3v4BZDThGE2gGiOYVQtA6x8WTEdOuXIEnRafANhN1U,2996
|
@@ -103,7 +102,7 @@ mcp_agent/mcp/prompts/prompt_load.py,sha256=Zo0FogqWFEG5FtF1d9ZH-RWsCSSMsi5FIEQH
|
|
103
102
|
mcp_agent/mcp/prompts/prompt_server.py,sha256=SiUR2xYfd3vEpghnYRdzz2rFEMtAbCKx2xzUXgvz1g8,18501
|
104
103
|
mcp_agent/mcp/prompts/prompt_template.py,sha256=EejiqGkau8OizORNyKTUwUjrPof5V-hH1H_MBQoQfXw,15732
|
105
104
|
mcp_agent/mcp_server/__init__.py,sha256=zBU51ITHIEPScd9nRafnhEddsWqXRPAAvHhkrbRI2_4,155
|
106
|
-
mcp_agent/mcp_server/agent_server.py,sha256=
|
105
|
+
mcp_agent/mcp_server/agent_server.py,sha256=s-nI0uTNWx4nYDDM_5GmuY5x6ZeFkymfNoCSuwuBRd8,19891
|
107
106
|
mcp_agent/resources/examples/data-analysis/analysis-campaign.py,sha256=16gxrQ5kM8fb8tPwSCMXaitonk3PSEhz28njWwPxXrw,7269
|
108
107
|
mcp_agent/resources/examples/data-analysis/analysis.py,sha256=M9z8Q4YC5OGuqSa5uefYmmfmctqMn-WqCSfg5LI407o,2609
|
109
108
|
mcp_agent/resources/examples/data-analysis/fastagent.config.yaml,sha256=ini94PHyJCfgpjcjHKMMbGuHs6LIj46F1NwY0ll5HVk,1609
|
@@ -144,8 +143,8 @@ mcp_agent/resources/examples/workflows/parallel.py,sha256=DQ5vY5-h8Qa5QHcYjsWXhZ
|
|
144
143
|
mcp_agent/resources/examples/workflows/router.py,sha256=E4x_-c3l4YW9w1i4ARcDtkdeqIdbWEGfsMzwLYpdbVc,1677
|
145
144
|
mcp_agent/resources/examples/workflows/short_story.txt,sha256=X3y_1AyhLFN2AKzCKvucJtDgAFIJfnlbsbGZO5bBWu0,1187
|
146
145
|
mcp_agent/ui/console_display.py,sha256=TVGDtJ37hc6UG0ei9g7ZPZZfFNeS1MYozt-Mx8HsPCk,9752
|
147
|
-
fast_agent_mcp-0.2.
|
148
|
-
fast_agent_mcp-0.2.
|
149
|
-
fast_agent_mcp-0.2.
|
150
|
-
fast_agent_mcp-0.2.
|
151
|
-
fast_agent_mcp-0.2.
|
146
|
+
fast_agent_mcp-0.2.20.dist-info/METADATA,sha256=RlU6MHHAJoP4xuuA8QsIspMEZfGdSKDo76so374wzA4,30142
|
147
|
+
fast_agent_mcp-0.2.20.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
148
|
+
fast_agent_mcp-0.2.20.dist-info/entry_points.txt,sha256=bRniFM5zk3Kix5z7scX0gf9VnmGQ2Cz_Q1Gh7Ir4W00,186
|
149
|
+
fast_agent_mcp-0.2.20.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
|
150
|
+
fast_agent_mcp-0.2.20.dist-info/RECORD,,
|
mcp_agent/__init__.py
CHANGED
@@ -36,7 +36,7 @@ from mcp_agent.core.request_params import RequestParams
|
|
36
36
|
|
37
37
|
# Core protocol interfaces
|
38
38
|
from mcp_agent.mcp.interfaces import AgentProtocol, AugmentedLLMProtocol
|
39
|
-
from mcp_agent.mcp.mcp_aggregator import MCPAggregator
|
39
|
+
from mcp_agent.mcp.mcp_aggregator import MCPAggregator
|
40
40
|
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
41
41
|
|
42
42
|
__all__ = [
|
@@ -58,7 +58,6 @@ __all__ = [
|
|
58
58
|
"Agent",
|
59
59
|
"AgentConfig",
|
60
60
|
"MCPAggregator",
|
61
|
-
"MCPCompoundServer",
|
62
61
|
"PromptMessageMultipart",
|
63
62
|
# FastAgent components
|
64
63
|
"FastAgent",
|
mcp_agent/agents/base_agent.py
CHANGED
@@ -31,6 +31,7 @@ from mcp.types import (
|
|
31
31
|
TextContent,
|
32
32
|
Tool,
|
33
33
|
)
|
34
|
+
from opentelemetry import trace
|
34
35
|
from pydantic import BaseModel
|
35
36
|
|
36
37
|
from mcp_agent.core.agent_types import AgentConfig, AgentType
|
@@ -92,6 +93,7 @@ class BaseAgent(MCPAggregator, AgentProtocol):
|
|
92
93
|
)
|
93
94
|
|
94
95
|
self._context = context
|
96
|
+
self.tracer = trace.get_tracer(__name__)
|
95
97
|
self.name = self.config.name
|
96
98
|
self.instruction = self.config.instruction
|
97
99
|
self.functions = functions or []
|
@@ -588,7 +590,8 @@ class BaseAgent(MCPAggregator, AgentProtocol):
|
|
588
590
|
The LLM's response as a PromptMessageMultipart
|
589
591
|
"""
|
590
592
|
assert self._llm
|
591
|
-
|
593
|
+
with self.tracer.start_as_current_span(f"Agent: '{self.name}' generate"):
|
594
|
+
return await self._llm.generate(multipart_messages, request_params)
|
592
595
|
|
593
596
|
async def structured(
|
594
597
|
self,
|
@@ -609,7 +612,8 @@ class BaseAgent(MCPAggregator, AgentProtocol):
|
|
609
612
|
An instance of the specified model, or None if coercion fails
|
610
613
|
"""
|
611
614
|
assert self._llm
|
612
|
-
|
615
|
+
with self.tracer.start_as_current_span(f"Agent: '{self.name}' structured"):
|
616
|
+
return await self._llm.structured(multipart_messages, model, request_params)
|
613
617
|
|
614
618
|
async def apply_prompt_messages(
|
615
619
|
self, prompts: List[PromptMessageMultipart], request_params: RequestParams | None = None
|
@@ -2,6 +2,7 @@ import asyncio
|
|
2
2
|
from typing import Any, List, Optional, Tuple
|
3
3
|
|
4
4
|
from mcp.types import TextContent
|
5
|
+
from opentelemetry import trace
|
5
6
|
|
6
7
|
from mcp_agent.agents.agent import Agent
|
7
8
|
from mcp_agent.agents.base_agent import BaseAgent
|
@@ -18,7 +19,7 @@ class ParallelAgent(BaseAgent):
|
|
18
19
|
This workflow performs both the fan-out and fan-in operations using LLMs.
|
19
20
|
From the user's perspective, an input is specified and the output is returned.
|
20
21
|
"""
|
21
|
-
|
22
|
+
|
22
23
|
@property
|
23
24
|
def agent_type(self) -> AgentType:
|
24
25
|
"""Return the type of this agent."""
|
@@ -62,31 +63,37 @@ class ParallelAgent(BaseAgent):
|
|
62
63
|
Returns:
|
63
64
|
The aggregated response from the fan-in agent
|
64
65
|
"""
|
65
|
-
# Execute all fan-out agents in parallel
|
66
|
-
responses: List[PromptMessageMultipart] = await asyncio.gather(
|
67
|
-
*[agent.generate(multipart_messages, request_params) for agent in self.fan_out_agents]
|
68
|
-
)
|
69
66
|
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
67
|
+
tracer = trace.get_tracer(__name__)
|
68
|
+
with tracer.start_as_current_span(f"Parallel: '{self.name}' generate"):
|
69
|
+
# Execute all fan-out agents in parallel
|
70
|
+
responses: List[PromptMessageMultipart] = await asyncio.gather(
|
71
|
+
*[
|
72
|
+
agent.generate(multipart_messages, request_params)
|
73
|
+
for agent in self.fan_out_agents
|
74
|
+
]
|
75
|
+
)
|
74
76
|
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
77
|
+
# Extract the received message from the input
|
78
|
+
received_message: Optional[str] = (
|
79
|
+
multipart_messages[-1].all_text() if multipart_messages else None
|
80
|
+
)
|
79
81
|
|
80
|
-
|
81
|
-
|
82
|
+
# Convert responses to strings for aggregation
|
83
|
+
string_responses = []
|
84
|
+
for response in responses:
|
85
|
+
string_responses.append(response.all_text())
|
82
86
|
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
+
# Format the responses and send to the fan-in agent
|
88
|
+
aggregated_prompt = self._format_responses(string_responses, received_message)
|
89
|
+
|
90
|
+
# Create a new multipart message with the formatted responses
|
91
|
+
formatted_prompt = PromptMessageMultipart(
|
92
|
+
role="user", content=[TextContent(type="text", text=aggregated_prompt)]
|
93
|
+
)
|
87
94
|
|
88
|
-
|
89
|
-
|
95
|
+
# Use the fan-in agent to aggregate the responses
|
96
|
+
return await self.fan_in_agent.generate([formatted_prompt], request_params)
|
90
97
|
|
91
98
|
def _format_responses(self, responses: List[Any], message: Optional[str] = None) -> str:
|
92
99
|
"""
|
@@ -116,7 +123,7 @@ class ParallelAgent(BaseAgent):
|
|
116
123
|
|
117
124
|
async def structured(
|
118
125
|
self,
|
119
|
-
|
126
|
+
multipart_messages: List[PromptMessageMultipart],
|
120
127
|
model: type[ModelT],
|
121
128
|
request_params: Optional[RequestParams] = None,
|
122
129
|
) -> Tuple[ModelT | None, PromptMessageMultipart]:
|
@@ -133,27 +140,35 @@ class ParallelAgent(BaseAgent):
|
|
133
140
|
Returns:
|
134
141
|
An instance of the specified model, or None if coercion fails
|
135
142
|
"""
|
136
|
-
# Generate parallel responses first
|
137
|
-
responses: List[PromptMessageMultipart] = await asyncio.gather(
|
138
|
-
*[agent.generate(prompt, request_params) for agent in self.fan_out_agents]
|
139
|
-
)
|
140
143
|
|
141
|
-
|
142
|
-
|
144
|
+
tracer = trace.get_tracer(__name__)
|
145
|
+
with tracer.start_as_current_span(f"Parallel: '{self.name}' generate"):
|
146
|
+
# Generate parallel responses first
|
147
|
+
responses: List[PromptMessageMultipart] = await asyncio.gather(
|
148
|
+
*[
|
149
|
+
agent.generate(multipart_messages, request_params)
|
150
|
+
for agent in self.fan_out_agents
|
151
|
+
]
|
152
|
+
)
|
143
153
|
|
144
|
-
|
145
|
-
|
154
|
+
# Extract the received message
|
155
|
+
received_message: Optional[str] = (
|
156
|
+
multipart_messages[-1].all_text() if multipart_messages else None
|
157
|
+
)
|
146
158
|
|
147
|
-
|
148
|
-
|
159
|
+
# Convert responses to strings
|
160
|
+
string_responses = [response.all_text() for response in responses]
|
149
161
|
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
162
|
+
# Format the responses for the fan-in agent
|
163
|
+
aggregated_prompt = self._format_responses(string_responses, received_message)
|
164
|
+
|
165
|
+
# Create a multipart message
|
166
|
+
formatted_prompt = PromptMessageMultipart(
|
167
|
+
role="user", content=[TextContent(type="text", text=aggregated_prompt)]
|
168
|
+
)
|
154
169
|
|
155
|
-
|
156
|
-
|
170
|
+
# Use the fan-in agent to parse the structured output
|
171
|
+
return await self.fan_in_agent.structured([formatted_prompt], model, request_params)
|
157
172
|
|
158
173
|
async def initialize(self) -> None:
|
159
174
|
"""
|
@@ -7,6 +7,7 @@ by determining the best agent for a request and dispatching to it.
|
|
7
7
|
|
8
8
|
from typing import TYPE_CHECKING, Callable, List, Optional, Tuple, Type
|
9
9
|
|
10
|
+
from opentelemetry import trace
|
10
11
|
from pydantic import BaseModel
|
11
12
|
|
12
13
|
from mcp_agent.agents.agent import Agent
|
@@ -158,17 +159,18 @@ class RouterAgent(BaseAgent):
|
|
158
159
|
Returns:
|
159
160
|
The response from the selected agent
|
160
161
|
"""
|
162
|
+
tracer = trace.get_tracer(__name__)
|
163
|
+
with tracer.start_as_current_span(f"Routing: '{self.name}' generate"):
|
164
|
+
route, warn = await self._route_request(multipart_messages[-1])
|
161
165
|
|
162
|
-
|
166
|
+
if not route:
|
167
|
+
return Prompt.assistant(warn or "No routing result or warning received")
|
163
168
|
|
164
|
-
|
165
|
-
|
169
|
+
# Get the selected agent
|
170
|
+
agent: Agent = self.agent_map[route.agent]
|
166
171
|
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
# Dispatch the request to the selected agent
|
171
|
-
return await agent.generate(multipart_messages, request_params)
|
172
|
+
# Dispatch the request to the selected agent
|
173
|
+
return await agent.generate(multipart_messages, request_params)
|
172
174
|
|
173
175
|
async def structured(
|
174
176
|
self,
|
@@ -187,18 +189,21 @@ class RouterAgent(BaseAgent):
|
|
187
189
|
Returns:
|
188
190
|
The parsed response from the selected agent, or None if parsing fails
|
189
191
|
"""
|
190
|
-
route, warn = await self._route_request(multipart_messages[-1])
|
191
192
|
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
193
|
+
tracer = trace.get_tracer(__name__)
|
194
|
+
with tracer.start_as_current_span(f"Routing: '{self.name}' structured"):
|
195
|
+
route, warn = await self._route_request(multipart_messages[-1])
|
196
|
+
|
197
|
+
if not route:
|
198
|
+
return None, Prompt.assistant(
|
199
|
+
warn or "No routing result or warning received (structured)"
|
200
|
+
)
|
196
201
|
|
197
|
-
|
198
|
-
|
202
|
+
# Get the selected agent
|
203
|
+
agent: Agent = self.agent_map[route.agent]
|
199
204
|
|
200
|
-
|
201
|
-
|
205
|
+
# Dispatch the request to the selected agent
|
206
|
+
return await agent.structured(multipart_messages, model, request_params)
|
202
207
|
|
203
208
|
async def _route_request(
|
204
209
|
self, message: PromptMessageMultipart
|
mcp_agent/config.py
CHANGED
@@ -93,6 +93,9 @@ class MCPServerSettings(BaseModel):
|
|
93
93
|
sampling: MCPSamplingSettings | None = None
|
94
94
|
"""Sampling settings for this Client/Server pair"""
|
95
95
|
|
96
|
+
cwd: str | None = None
|
97
|
+
"""Working directory for the executed server command."""
|
98
|
+
|
96
99
|
|
97
100
|
class MCPSettings(BaseModel):
|
98
101
|
"""Configuration for all MCP servers."""
|
@@ -181,13 +184,11 @@ class OpenTelemetrySettings(BaseModel):
|
|
181
184
|
OTEL settings for the fast-agent application.
|
182
185
|
"""
|
183
186
|
|
184
|
-
enabled: bool =
|
187
|
+
enabled: bool = False
|
185
188
|
|
186
189
|
service_name: str = "fast-agent"
|
187
|
-
service_instance_id: str | None = None
|
188
|
-
service_version: str | None = None
|
189
190
|
|
190
|
-
otlp_endpoint: str
|
191
|
+
otlp_endpoint: str = "http://localhost:4318/v1/traces"
|
191
192
|
"""OTLP endpoint for OpenTelemetry tracing"""
|
192
193
|
|
193
194
|
console_debug: bool = False
|
mcp_agent/context.py
CHANGED
@@ -4,11 +4,14 @@ A central context object to store global state that is shared across the applica
|
|
4
4
|
|
5
5
|
import asyncio
|
6
6
|
import concurrent.futures
|
7
|
+
import uuid
|
7
8
|
from typing import TYPE_CHECKING, Any, Optional, Union
|
8
9
|
|
9
10
|
from mcp import ServerSession
|
10
11
|
from opentelemetry import trace
|
11
12
|
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
13
|
+
from opentelemetry.instrumentation.anthropic import AnthropicInstrumentor
|
14
|
+
from opentelemetry.instrumentation.openai import OpenAIInstrumentor
|
12
15
|
from opentelemetry.propagate import set_global_textmap
|
13
16
|
from opentelemetry.sdk.resources import Resource
|
14
17
|
from opentelemetry.sdk.trace import TracerProvider
|
@@ -51,7 +54,7 @@ class Context(BaseModel):
|
|
51
54
|
server_registry: Optional[ServerRegistry] = None
|
52
55
|
task_registry: Optional[ActivityRegistry] = None
|
53
56
|
|
54
|
-
tracer:
|
57
|
+
tracer: trace.Tracer | None = None
|
55
58
|
|
56
59
|
model_config = ConfigDict(
|
57
60
|
extra="allow",
|
@@ -63,28 +66,27 @@ async def configure_otel(config: "Settings") -> None:
|
|
63
66
|
"""
|
64
67
|
Configure OpenTelemetry based on the application config.
|
65
68
|
"""
|
66
|
-
if not config.otel.enabled:
|
67
|
-
return
|
68
|
-
|
69
|
-
# Check if a provider is already set to avoid re-initialization
|
70
|
-
if trace.get_tracer_provider().__class__.__name__ != "NoOpTracerProvider":
|
69
|
+
if not config.otel or not config.otel.enabled:
|
71
70
|
return
|
72
71
|
|
73
72
|
# Set up global textmap propagator first
|
74
73
|
set_global_textmap(TraceContextTextMapPropagator())
|
75
74
|
|
76
75
|
service_name = config.otel.service_name
|
77
|
-
|
78
|
-
|
76
|
+
from importlib.metadata import version
|
77
|
+
|
78
|
+
try:
|
79
|
+
app_version = version("fast-agent-mcp")
|
80
|
+
except: # noqa: E722
|
81
|
+
app_version = "unknown"
|
79
82
|
|
80
|
-
# Create resource identifying this service
|
81
83
|
resource = Resource.create(
|
82
84
|
attributes={
|
83
85
|
key: value
|
84
86
|
for key, value in {
|
85
87
|
"service.name": service_name,
|
86
|
-
"service.instance.id":
|
87
|
-
"service.version":
|
88
|
+
"service.instance.id": str(uuid.uuid4())[:6],
|
89
|
+
"service.version": app_version,
|
88
90
|
}.items()
|
89
91
|
if value is not None
|
90
92
|
}
|
@@ -107,6 +109,8 @@ async def configure_otel(config: "Settings") -> None:
|
|
107
109
|
|
108
110
|
# Set as global tracer provider
|
109
111
|
trace.set_tracer_provider(tracer_provider)
|
112
|
+
AnthropicInstrumentor().instrument()
|
113
|
+
OpenAIInstrumentor().instrument()
|
110
114
|
|
111
115
|
|
112
116
|
async def configure_logger(config: "Settings") -> None:
|