fast-agent-mcp 0.1.9__py3-none-any.whl → 0.1.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.9.dist-info → fast_agent_mcp-0.1.11.dist-info}/METADATA +37 -38
- {fast_agent_mcp-0.1.9.dist-info → fast_agent_mcp-0.1.11.dist-info}/RECORD +27 -25
- mcp_agent/agents/agent.py +112 -0
- mcp_agent/config.py +9 -0
- mcp_agent/context.py +0 -2
- mcp_agent/core/agent_app.py +29 -0
- mcp_agent/core/factory.py +14 -13
- mcp_agent/core/fastagent.py +2 -2
- mcp_agent/core/proxies.py +41 -0
- mcp_agent/logging/listeners.py +3 -6
- mcp_agent/mcp/gen_client.py +4 -4
- mcp_agent/mcp/interfaces.py +152 -0
- mcp_agent/mcp/mcp_agent_client_session.py +30 -146
- mcp_agent/mcp/mcp_aggregator.py +63 -5
- mcp_agent/mcp/mcp_connection_manager.py +0 -1
- mcp_agent/mcp/prompts/prompt_server.py +12 -11
- mcp_agent/mcp/resource_utils.py +58 -38
- mcp_agent/mcp/sampling.py +133 -0
- mcp_agent/mcp/stdio.py +22 -15
- mcp_agent/mcp_server_registry.py +5 -2
- mcp_agent/resources/examples/internal/agent.py +1 -1
- mcp_agent/resources/examples/internal/fastagent.config.yaml +3 -0
- mcp_agent/resources/examples/workflows/orchestrator.py +3 -3
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +110 -2
- {fast_agent_mcp-0.1.9.dist-info → fast_agent_mcp-0.1.11.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.9.dist-info → fast_agent_mcp-0.1.11.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.9.dist-info → fast_agent_mcp-0.1.11.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: fast-agent-mcp
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.11
|
4
4
|
Summary: Define, Prompt and Test MCP enabled Agents and Workflows
|
5
5
|
Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
|
6
6
|
License: Apache License
|
@@ -212,7 +212,7 @@ Requires-Python: >=3.10
|
|
212
212
|
Requires-Dist: aiohttp>=3.11.13
|
213
213
|
Requires-Dist: anthropic>=0.49.0
|
214
214
|
Requires-Dist: fastapi>=0.115.6
|
215
|
-
Requires-Dist: mcp>=1.
|
215
|
+
Requires-Dist: mcp>=1.5.0
|
216
216
|
Requires-Dist: numpy>=2.2.1
|
217
217
|
Requires-Dist: openai>=1.63.2
|
218
218
|
Requires-Dist: opentelemetry-distro>=0.50b0
|
@@ -241,10 +241,9 @@ Provides-Extra: temporal
|
|
241
241
|
Requires-Dist: temporalio>=1.8.0; extra == 'temporal'
|
242
242
|
Description-Content-Type: text/markdown
|
243
243
|
|
244
|
-
## fast-agent
|
245
|
-
|
246
244
|
<p align="center">
|
247
245
|
<a href="https://pypi.org/project/fast-agent-mcp/"><img src="https://img.shields.io/pypi/v/fast-agent-mcp?color=%2334D058&label=pypi" /></a>
|
246
|
+
<a href="#"><img src="https://github.com/evalstate/fast-agent/actions/workflows/main-checks.yml/badge.svg" /></a>
|
248
247
|
<a href="https://github.com/evalstate/fast-agent/issues"><img src="https://img.shields.io/github/issues-raw/evalstate/fast-agent" /></a>
|
249
248
|
<a href="https://lmai.link/discord/mcp-agent"><img src="https://shields.io/discord/1089284610329952357" alt="discord" /></a>
|
250
249
|
<img alt="Pepy Total Downloads" src="https://img.shields.io/pepy/dt/fast-agent-mcp?label=pypi%20%7C%20downloads"/>
|
@@ -253,15 +252,14 @@ Description-Content-Type: text/markdown
|
|
253
252
|
|
254
253
|
## Overview
|
255
254
|
|
256
|
-
**`fast-agent`** enables you to create and interact with sophisticated Agents and Workflows in minutes.
|
255
|
+
**`fast-agent`** enables you to create and interact with sophisticated Agents and Workflows in minutes. It is the first framework with complete, end-to-end tested MCP Feature support including Sampling. Both Anthropic (Haiku, Sonnet, Opus) and OpenAI models (gpt-4o family, o1/o3 family) are supported.
|
257
256
|
|
258
257
|
The simple declarative syntax lets you concentrate on composing your Prompts and MCP Servers to [build effective agents](https://www.anthropic.com/research/building-effective-agents).
|
259
258
|
|
260
|
-
|
261
|
-
|
262
|
-
`fast-agent` is now multi-modal, supporting Images and PDFs for both Anthropic and OpenAI endpoints (for supported models), via Prompts and MCP Tool Call results.
|
259
|
+
`fast-agent` is multi-modal, supporting Images and PDFs for both Anthropic and OpenAI endpoints via Prompts, Resources and MCP Tool Call results. The inclusion of passthrough and playback LLMs enable rapid development and test of Python glue-code for your applications.
|
263
260
|
|
264
|
-
> [!TIP]
|
261
|
+
> [!TIP]
|
262
|
+
> `fast-agent` is now MCP Native! Coming Soon - Full Documentation Site and Further MCP Examples.
|
265
263
|
|
266
264
|
### Agent Application Development
|
267
265
|
|
@@ -271,7 +269,7 @@ Chat with individual Agents and Components before, during and after workflow exe
|
|
271
269
|
|
272
270
|
Simple model selection makes testing Model <-> MCP Server interaction painless. You can read more about the motivation behind this project [here](https://llmindset.co.uk/resources/fast-agent/)
|
273
271
|
|
274
|
-

|
275
273
|
|
276
274
|
## Get started:
|
277
275
|
|
@@ -596,6 +594,14 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
|
|
596
594
|
|
597
595
|
Add Resources to prompts using either the inbuilt `prompt-server` or MCP Types directly. Convenience class are made available to do so simply, for example:
|
598
596
|
|
597
|
+
```python
|
598
|
+
summary: str = await agent.with_resource(
|
599
|
+
"Summarise this PDF please",
|
600
|
+
"mcp_server",
|
601
|
+
"resource://fast-agent/sample.pdf",
|
602
|
+
)
|
603
|
+
```
|
604
|
+
|
599
605
|
#### MCP Tool Result Conversion
|
600
606
|
|
601
607
|
LLM APIs have restrictions on the content types that can be returned as Tool Calls/Function results via their Chat Completions API's:
|
@@ -611,40 +617,33 @@ MCP Prompts are supported with `apply_prompt(name,arguments)`, which always retu
|
|
611
617
|
|
612
618
|
Prompts can also be applied interactively through the interactive interface by using the `/prompt` command.
|
613
619
|
|
620
|
+
### Sampling
|
621
|
+
|
622
|
+
Sampling LLMs are configured per Client/Server pair. Specify the model name in fastagent.config.yaml as follows:
|
623
|
+
|
624
|
+
```yaml
|
625
|
+
mcp:
|
626
|
+
servers:
|
627
|
+
sampling_resource:
|
628
|
+
command: "uv"
|
629
|
+
args: ["run", "sampling_resource_server.py"]
|
630
|
+
sampling:
|
631
|
+
model: "haiku"
|
632
|
+
```
|
633
|
+
|
614
634
|
### Secrets File
|
615
635
|
|
616
636
|
> [!TIP]
|
617
637
|
> fast-agent will look recursively for a fastagent.secrets.yaml file, so you only need to manage this at the root folder of your agent definitions.
|
618
638
|
|
639
|
+
### Interactive Shell
|
640
|
+
|
641
|
+

|
642
|
+
|
619
643
|
## Project Notes
|
620
644
|
|
621
645
|
`fast-agent` builds on the [`mcp-agent`](https://github.com/lastmile-ai/mcp-agent) project by Sarmad Qadri.
|
622
646
|
|
623
|
-
###
|
624
|
-
|
625
|
-
-
|
626
|
-
- Overhaul of Eval/Opt for Conversation Management
|
627
|
-
- Removed instructor/double-llm calling - native structured outputs for OAI.
|
628
|
-
- Improved handling of Parallel/Fan-In and respose option
|
629
|
-
- XML based generated prompts
|
630
|
-
- "FastAgent" style prototyping, with per-agent models
|
631
|
-
- API keys through Environment Variables
|
632
|
-
- Warm-up / Post-Workflow Agent Interactions
|
633
|
-
- Quick Setup
|
634
|
-
- Interactive Prompt Mode
|
635
|
-
- Simple Model Selection with aliases
|
636
|
-
- User/Assistant and Tool Call message display
|
637
|
-
- MCP Sever Environment Variable support
|
638
|
-
- MCP Roots support
|
639
|
-
- Comprehensive Progress display
|
640
|
-
- JSONL file logging with secret revokation
|
641
|
-
- OpenAI o1/o3-mini support with reasoning level
|
642
|
-
- Enhanced Human Input Messaging and Handling
|
643
|
-
- Declarative workflows
|
644
|
-
- Numerous defect fixes
|
645
|
-
|
646
|
-
### Features to add (Commmitted)
|
647
|
-
|
648
|
-
- Run Agent as MCP Server, with interop
|
649
|
-
- Multi-part content types supporing Vision, PDF and multi-part Text.
|
650
|
-
- Improved test automation (supported by prompt_server.py and augmented_llm_playback.py)
|
647
|
+
### Contributing
|
648
|
+
|
649
|
+
Contributions and PRs are welcome - feel free to raise issues to discuss. Full guidelines for contributing and roadmap coming very soon. Get in touch!
|
@@ -1,14 +1,14 @@
|
|
1
1
|
mcp_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
2
2
|
mcp_agent/app.py,sha256=0_C1xmNZlk9qZoewnNI_mC7sSfO9oJgkOyiKkQ62MHU,10606
|
3
|
-
mcp_agent/config.py,sha256=
|
3
|
+
mcp_agent/config.py,sha256=cEiY_J5MqKj23KkHtzP1h04yalaGgO2OiXErduiVf2M,10890
|
4
4
|
mcp_agent/console.py,sha256=Gjf2QLFumwG1Lav__c07X_kZxxEUSkzV-1_-YbAwcwo,813
|
5
|
-
mcp_agent/context.py,sha256=
|
5
|
+
mcp_agent/context.py,sha256=m1S5M9a2Kdxy5rEGG6Uwwmi19bDEpU6u-e5ZgPmVXfY,8031
|
6
6
|
mcp_agent/context_dependent.py,sha256=TGqRLzYCOnsWGoaD1HtrliYtWo8MeaWCQk6ePUmyYCw,1446
|
7
7
|
mcp_agent/event_progress.py,sha256=25iz0yyg-O4glMmtijcYpDdUmtUIKsCmR_8A52GgeC4,2716
|
8
|
-
mcp_agent/mcp_server_registry.py,sha256=
|
8
|
+
mcp_agent/mcp_server_registry.py,sha256=eQbl0usicnsNE03haxc6C_FHl_0goPAZdcb082cDIQk,9992
|
9
9
|
mcp_agent/progress_display.py,sha256=GeJU9VUt6qKsFVymG688hCMVCsAygG9ifiiEb5IcbN4,361
|
10
10
|
mcp_agent/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
11
|
-
mcp_agent/agents/agent.py,sha256=
|
11
|
+
mcp_agent/agents/agent.py,sha256=qI3njT8SPTLKzCwBfdlWNMVoSEwLGNjuq6owPfhvIas,17444
|
12
12
|
mcp_agent/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
13
|
mcp_agent/cli/__main__.py,sha256=AVZ7tQFhU_sDOGuUGJq8ujgKtcxsYJBJwHbVaaiRDlI,166
|
14
14
|
mcp_agent/cli/main.py,sha256=DE6EZzspfzHwPK59x8vL4AIDHRQkVQ1Ja70XRGU1IQs,2753
|
@@ -17,18 +17,18 @@ mcp_agent/cli/commands/bootstrap.py,sha256=Rmwbuwl52eHfnya7fnwKk2J7nCsHpSh6irka4
|
|
17
17
|
mcp_agent/cli/commands/config.py,sha256=32YTS5jmsYAs9QzAhjkG70_daAHqOemf4XbZBBSMz6g,204
|
18
18
|
mcp_agent/cli/commands/setup.py,sha256=_SCpd6_PrixqbSaE72JQ7erIRkZnJGmh_3TvvwSzEiE,6392
|
19
19
|
mcp_agent/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
20
|
-
mcp_agent/core/agent_app.py,sha256=
|
20
|
+
mcp_agent/core/agent_app.py,sha256=coAbhzGT34SV_S0AsLbHuOkxyovOZFlpk_HUphRNU78,30807
|
21
21
|
mcp_agent/core/agent_types.py,sha256=yKiMbv9QO2dduq4zXmoMZlOZpXJZhM4oNwIq1-134FE,318
|
22
22
|
mcp_agent/core/agent_utils.py,sha256=QMvwmxZyCqYhBzSyL9xARsxTuwdmlyjQvrPpsH36HnQ,1888
|
23
23
|
mcp_agent/core/decorators.py,sha256=dkAah1eIuYsEfQISDryG0u2GrzNnsO_jyN7lhpQfNlM,16191
|
24
24
|
mcp_agent/core/enhanced_prompt.py,sha256=bykUEnnc1CEWODJwXvl4VGfCtrJPtVXU0D4mUglJK7A,18827
|
25
25
|
mcp_agent/core/error_handling.py,sha256=D3HMW5odrbJvaKqcpCGj6eDXrbFcuqYaCZz7fyYiTu4,623
|
26
26
|
mcp_agent/core/exceptions.py,sha256=a2-JGRwFFRoQEPuAq0JC5PhAJ5TO3xVJfdS4-VN29cw,2225
|
27
|
-
mcp_agent/core/factory.py,sha256=
|
28
|
-
mcp_agent/core/fastagent.py,sha256=
|
27
|
+
mcp_agent/core/factory.py,sha256=MhlYS0G0IyFy_j46HVJdjEznJzfCFjx_NRhUPcbQIJI,19081
|
28
|
+
mcp_agent/core/fastagent.py,sha256=jJmO0DryFGwSkse_3q5Ll-5XONDvj7k_Oeb-ETBKFkA,19620
|
29
29
|
mcp_agent/core/mcp_content.py,sha256=rXT2C5gP9qgC-TI5F362ZLJi_erzcEOnlP9D2ZKK0i0,6860
|
30
30
|
mcp_agent/core/prompt.py,sha256=R-X3kptu3ehV_SQeiGnP6F9HMN-92I8e73gnkQ1tDVs,4317
|
31
|
-
mcp_agent/core/proxies.py,sha256=
|
31
|
+
mcp_agent/core/proxies.py,sha256=qsIqyJgiIh-b9ehHiZrM39YutQFJPHaHO14GOMFE1KI,10289
|
32
32
|
mcp_agent/core/types.py,sha256=Zhi9iW7uiOfdpSt9NC0FCtGRFtJPg4mpZPK2aYi7a7M,817
|
33
33
|
mcp_agent/core/validation.py,sha256=x0fsx5eLTawASFm9MDtEukwGOj_RTdY1OW064UihMR8,8309
|
34
34
|
mcp_agent/eval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -45,26 +45,28 @@ mcp_agent/human_input/types.py,sha256=ZvuDHvI0-wO2tFoS0bzrv8U5B83zYdxAG7g9G9jCxu
|
|
45
45
|
mcp_agent/logging/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
46
46
|
mcp_agent/logging/events.py,sha256=qfYJnrqgXdujV-nl-iOwBEBh6HMraowBI4zeAWPPU4A,3461
|
47
47
|
mcp_agent/logging/json_serializer.py,sha256=pa_mf0i0YKpLsGq3THuITFUdAbmae-dv1OPOLbcS0to,5782
|
48
|
-
mcp_agent/logging/listeners.py,sha256=
|
48
|
+
mcp_agent/logging/listeners.py,sha256=1DOc0CvAE6pFxOljfZqs2TGgF50sZZGMDt4Gm_PAjWo,6551
|
49
49
|
mcp_agent/logging/logger.py,sha256=Tr009BnfGUKuZcdinnSin0Z_zIsfDNGdcnamw2rDHRQ,10604
|
50
50
|
mcp_agent/logging/rich_progress.py,sha256=IEVFdFGA0nwg6pSt9Ydni5LCNYZZPKYMe-6DCi9pO4Y,4851
|
51
51
|
mcp_agent/logging/tracing.py,sha256=jQivxKYl870oXakmyUk7TXuTQSvsIzpHwZlSQfy4b0c,5203
|
52
52
|
mcp_agent/logging/transport.py,sha256=MFgiCQ-YFP0tSMhDMpZCj585vflWcMydM4oyCFduVf0,17203
|
53
53
|
mcp_agent/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
54
|
-
mcp_agent/mcp/gen_client.py,sha256=
|
54
|
+
mcp_agent/mcp/gen_client.py,sha256=D92Yo088CAeuWG6M82Vlkq0H8igUTw9SwwOQinZZCkg,3052
|
55
|
+
mcp_agent/mcp/interfaces.py,sha256=hUA9R7RA1tF1td9RCfzWHBUVCLXF6FC1a4I1EZ5Fnh4,4629
|
55
56
|
mcp_agent/mcp/mcp_activity.py,sha256=CajXCFWZ2cKEX9s4-HfNVAj471ePTVs4NOkvmIh65tE,592
|
56
|
-
mcp_agent/mcp/mcp_agent_client_session.py,sha256=
|
57
|
+
mcp_agent/mcp/mcp_agent_client_session.py,sha256=3xZbhr48YV5SkBTQGMdNrT_KIGWOBSFPqCZLCSOK2HA,4156
|
57
58
|
mcp_agent/mcp/mcp_agent_server.py,sha256=xP09HZTeguJi4Fq0p3fjLBP55uSYe5AdqM90xCgn9Ho,1639
|
58
|
-
mcp_agent/mcp/mcp_aggregator.py,sha256=
|
59
|
-
mcp_agent/mcp/mcp_connection_manager.py,sha256=
|
59
|
+
mcp_agent/mcp/mcp_aggregator.py,sha256=1DYZpmq1IJZo7cYKfahH6LeyVKuNkosGhSq6k59lrlM,37941
|
60
|
+
mcp_agent/mcp/mcp_connection_manager.py,sha256=PdLia-rxbhUdAdEnW7TQbkf1qeI9RR3xhQw1j11Bi6o,13612
|
60
61
|
mcp_agent/mcp/mime_utils.py,sha256=difepNR_gpb4MpMLkBRAoyhDk-AjXUHTiqKvT_VwS1o,1805
|
61
62
|
mcp_agent/mcp/prompt_message_multipart.py,sha256=U7IN0JStmy26akTXcqE4x90oWzm8xs1qa0VeKIyPKmE,1962
|
62
63
|
mcp_agent/mcp/prompt_serialization.py,sha256=StcXV7V4fqqtCmOCXGCyYXx5vpwNhL2xr3RG_awwdqI,16056
|
63
|
-
mcp_agent/mcp/resource_utils.py,sha256=
|
64
|
-
mcp_agent/mcp/
|
64
|
+
mcp_agent/mcp/resource_utils.py,sha256=G9IBWyasxKKcbq3T_fSpM6mHE8PjBargEdfQnBPrkZY,6650
|
65
|
+
mcp_agent/mcp/sampling.py,sha256=iHjjI5ViCe2CYm_7EtJiHr-WPYug6MQyAuBtru0AnkI,4601
|
66
|
+
mcp_agent/mcp/stdio.py,sha256=fZr9yVqPvmPC8pkaf95rZtw0uD8BGND0UI_cUYyuSsE,4478
|
65
67
|
mcp_agent/mcp/prompts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
66
68
|
mcp_agent/mcp/prompts/__main__.py,sha256=gr1Tdz9fcK0EXjEuZg_BOnKUmvhYq5AH2lFZicVyNb0,237
|
67
|
-
mcp_agent/mcp/prompts/prompt_server.py,sha256=
|
69
|
+
mcp_agent/mcp/prompts/prompt_server.py,sha256=6K4FeKNW_JApWUNB055gl8UnWyC1mvtl_kPEvgUnPjk,17348
|
68
70
|
mcp_agent/mcp/prompts/prompt_template.py,sha256=NDnSVA0W1wayZHCVx27lfuVPoxlAz-FfBwiCEQG9Ixk,16324
|
69
71
|
mcp_agent/mcp_server/__init__.py,sha256=SEWyU7aSFzdSk6iTYnrQu-llji5_P5dp3TaztCt_rzo,154
|
70
72
|
mcp_agent/mcp_server/agent_server.py,sha256=SUBggPyrzWtBRUC5xIMpCxu6ei-6Vah3q9Si12BQ-zY,4444
|
@@ -72,8 +74,8 @@ mcp_agent/resources/examples/data-analysis/analysis-campaign.py,sha256=EG-HhaDHl
|
|
72
74
|
mcp_agent/resources/examples/data-analysis/analysis.py,sha256=5zLoioZQNKUfXt1EXLrGX3TU06-0N06-L9Gtp9BIr6k,2611
|
73
75
|
mcp_agent/resources/examples/data-analysis/fastagent.config.yaml,sha256=ini94PHyJCfgpjcjHKMMbGuHs6LIj46F1NwY0ll5HVk,1609
|
74
76
|
mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv,sha256=pcMeOL1_r8m8MziE6xgbBrQbjl5Ijo98yycZn7O-dlk,227977
|
75
|
-
mcp_agent/resources/examples/internal/agent.py,sha256=
|
76
|
-
mcp_agent/resources/examples/internal/fastagent.config.yaml,sha256=
|
77
|
+
mcp_agent/resources/examples/internal/agent.py,sha256=orShmYKkrjMc7qa3ZtfzoO80uOClZaPaw2Wvc4_FIH8,406
|
78
|
+
mcp_agent/resources/examples/internal/fastagent.config.yaml,sha256=U2s0Asc06wC04FstKnBMeB3J5gIa3xa-Rao-1-74XTk,1935
|
77
79
|
mcp_agent/resources/examples/internal/job.py,sha256=WEKIAANMEAuKr13__rYf3PqJeTAsNB_kqYqbqVYQlUM,4093
|
78
80
|
mcp_agent/resources/examples/internal/prompt_category.py,sha256=b3tjkfrVIW1EPoDjr4mG87wlZ7D0Uju9eg6asXAYYpI,551
|
79
81
|
mcp_agent/resources/examples/internal/prompt_sizing.py,sha256=UtQ_jvwS4yMh80PHhUQXJ9WXk-fqNYlqUMNTNkZosKM,2003
|
@@ -93,7 +95,7 @@ mcp_agent/resources/examples/workflows/chaining.py,sha256=1G_0XBcFkSJCOXb6N_iXWl
|
|
93
95
|
mcp_agent/resources/examples/workflows/evaluator.py,sha256=3XmW1mjImlaWb0c5FWHYS9yP8nVGTbEdJySAoWXwrDg,3109
|
94
96
|
mcp_agent/resources/examples/workflows/fastagent.config.yaml,sha256=k2AiapOcK42uqG2nWDVvnSLqN4okQIQZK0FTbZufBpY,809
|
95
97
|
mcp_agent/resources/examples/workflows/human_input.py,sha256=c8cBdLEPbaMXddFwsfN3Z7RFs5PZXsdrjANfvq1VTPM,605
|
96
|
-
mcp_agent/resources/examples/workflows/orchestrator.py,sha256=
|
98
|
+
mcp_agent/resources/examples/workflows/orchestrator.py,sha256=2nkRkpMgi4V-M6fwcpipQUImf0nZoGf4zfVllfojsz8,2596
|
97
99
|
mcp_agent/resources/examples/workflows/parallel.py,sha256=pLbQrtXfbdYqMVddxtg5dZnBnm5Wo2mXlIa1Vf2F1FQ,3096
|
98
100
|
mcp_agent/resources/examples/workflows/router.py,sha256=J1yTAimFY53jcyd21cq1XAZvtOxnNsmtSjSp13M5EgE,1668
|
99
101
|
mcp_agent/resources/examples/workflows/sse.py,sha256=tdmmh7p87YNfcF_fCq3evAmc1Nek0oY0YOqLRKBLqKg,570
|
@@ -120,7 +122,7 @@ mcp_agent/workflows/llm/anthropic_utils.py,sha256=OFmsVmDQ22880duDWQrEeQEB47xtvu
|
|
120
122
|
mcp_agent/workflows/llm/augmented_llm.py,sha256=9cWy-4yNG13w4oQgXmisgWTcm6aoJIRCYTX85Bkf-MI,30554
|
121
123
|
mcp_agent/workflows/llm/augmented_llm_anthropic.py,sha256=opV4PTai2eoYUzJS0gCPGEy4pe-lT2Eo1Sao6Y_EIiY,20140
|
122
124
|
mcp_agent/workflows/llm/augmented_llm_openai.py,sha256=OUSmvY2m6HU1JOK5nEzKDHpHReT0ffjoHDFHk6aYhoc,21002
|
123
|
-
mcp_agent/workflows/llm/augmented_llm_passthrough.py,sha256=
|
125
|
+
mcp_agent/workflows/llm/augmented_llm_passthrough.py,sha256=aeQ2WWNIzdzgYWHijE-RWgzFzSUcRJNRv5zq0ug3B2U,7891
|
124
126
|
mcp_agent/workflows/llm/augmented_llm_playback.py,sha256=5ypv3owJU6pscktqg9tkLQVKNgaA50e8OWmC1hAhrtE,4328
|
125
127
|
mcp_agent/workflows/llm/llm_selector.py,sha256=G7pIybuBDwtmyxUDov_QrNYH2FoI0qFRu2JfoxWUF5Y,11045
|
126
128
|
mcp_agent/workflows/llm/model_factory.py,sha256=UHePE5Ow03kpE44kjYtFGEhVFSYp0AY2yGri58yCBKU,7688
|
@@ -151,8 +153,8 @@ mcp_agent/workflows/swarm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
|
|
151
153
|
mcp_agent/workflows/swarm/swarm.py,sha256=-lAIeSWDqbGHGRPTvjiP9nIKWvxxy9DAojl9yQzO1Pw,11050
|
152
154
|
mcp_agent/workflows/swarm/swarm_anthropic.py,sha256=pW8zFx5baUWGd5Vw3nIDF2oVOOGNorij4qvGJKdYPcs,1624
|
153
155
|
mcp_agent/workflows/swarm/swarm_openai.py,sha256=wfteywvAGkT5bLmIxX_StHJq8144whYmCRnJASAjOes,1596
|
154
|
-
fast_agent_mcp-0.1.
|
155
|
-
fast_agent_mcp-0.1.
|
156
|
-
fast_agent_mcp-0.1.
|
157
|
-
fast_agent_mcp-0.1.
|
158
|
-
fast_agent_mcp-0.1.
|
156
|
+
fast_agent_mcp-0.1.11.dist-info/METADATA,sha256=ff0dlOdPoM72tfefKvN6bdVwszZIKE-5wIkSAI3qJTU,29678
|
157
|
+
fast_agent_mcp-0.1.11.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
158
|
+
fast_agent_mcp-0.1.11.dist-info/entry_points.txt,sha256=qPM7vwtN1_KmP3dXehxgiCxUBHtqP7yfenZigztvY-w,226
|
159
|
+
fast_agent_mcp-0.1.11.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
|
160
|
+
fast_agent_mcp-0.1.11.dist-info/RECORD,,
|
mcp_agent/agents/agent.py
CHANGED
@@ -9,7 +9,10 @@ from mcp.types import (
|
|
9
9
|
ListToolsResult,
|
10
10
|
TextContent,
|
11
11
|
Tool,
|
12
|
+
EmbeddedResource,
|
13
|
+
ReadResourceResult,
|
12
14
|
)
|
15
|
+
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
13
16
|
|
14
17
|
from mcp_agent.core.exceptions import PromptExitError
|
15
18
|
from mcp_agent.mcp.mcp_aggregator import MCPAggregator
|
@@ -320,6 +323,11 @@ class Agent(MCPAggregator):
|
|
320
323
|
],
|
321
324
|
)
|
322
325
|
|
326
|
+
async def read_resource(
|
327
|
+
self, server_name: str, resource_name: str
|
328
|
+
) -> ReadResourceResult:
|
329
|
+
return None
|
330
|
+
|
323
331
|
async def apply_prompt(
|
324
332
|
self, prompt_name: str, arguments: dict[str, str] = None
|
325
333
|
) -> str:
|
@@ -359,3 +367,107 @@ class Agent(MCPAggregator):
|
|
359
367
|
# The LLM will automatically generate a response if needed
|
360
368
|
result = await self._llm.apply_prompt_template(prompt_result, display_name)
|
361
369
|
return result
|
370
|
+
|
371
|
+
async def get_resource(self, server_name: str, resource_name: str):
|
372
|
+
"""
|
373
|
+
Get a resource directly from an MCP server by name.
|
374
|
+
|
375
|
+
Args:
|
376
|
+
server_name: Name of the MCP server to retrieve the resource from
|
377
|
+
resource_name: Name of the resource to retrieve
|
378
|
+
|
379
|
+
Returns:
|
380
|
+
The resource object from the MCP server
|
381
|
+
|
382
|
+
Raises:
|
383
|
+
ValueError: If the server doesn't exist or the resource couldn't be found
|
384
|
+
"""
|
385
|
+
if not self.initialized:
|
386
|
+
await self.initialize()
|
387
|
+
|
388
|
+
# Get the specified server connection
|
389
|
+
server = self.get_server(server_name)
|
390
|
+
if not server:
|
391
|
+
raise ValueError(f"Server '{server_name}' not found or not connected")
|
392
|
+
|
393
|
+
# Request the resource directly from the server
|
394
|
+
try:
|
395
|
+
resource_result = await server.get_resource(resource_name)
|
396
|
+
return resource_result
|
397
|
+
except Exception as e:
|
398
|
+
self.logger.error(
|
399
|
+
f"Error retrieving resource '{resource_name}' from server '{server_name}': {str(e)}"
|
400
|
+
)
|
401
|
+
raise ValueError(
|
402
|
+
f"Failed to retrieve resource '{resource_name}' from server '{server_name}': {str(e)}"
|
403
|
+
)
|
404
|
+
|
405
|
+
async def get_embedded_resources(
|
406
|
+
self, server_name: str, resource_name: str
|
407
|
+
) -> List[EmbeddedResource]:
|
408
|
+
"""
|
409
|
+
Get a resource from an MCP server and return it as a list of embedded resources ready for use in prompts.
|
410
|
+
|
411
|
+
Args:
|
412
|
+
server_name: Name of the MCP server to retrieve the resource from
|
413
|
+
resource_name: Name or URI of the resource to retrieve
|
414
|
+
|
415
|
+
Returns:
|
416
|
+
List of EmbeddedResource objects ready to use in a PromptMessageMultipart
|
417
|
+
|
418
|
+
Raises:
|
419
|
+
ValueError: If the server doesn't exist or the resource couldn't be found
|
420
|
+
"""
|
421
|
+
# Get the raw resource result
|
422
|
+
result: ReadResourceResult = await super().get_resource(
|
423
|
+
server_name, resource_name
|
424
|
+
)
|
425
|
+
|
426
|
+
# Convert each resource content to an EmbeddedResource
|
427
|
+
embedded_resources: List[EmbeddedResource] = []
|
428
|
+
for resource_content in result.contents:
|
429
|
+
embedded_resource = EmbeddedResource(
|
430
|
+
type="resource", resource=resource_content, annotations=None
|
431
|
+
)
|
432
|
+
embedded_resources.append(embedded_resource)
|
433
|
+
|
434
|
+
return embedded_resources
|
435
|
+
|
436
|
+
async def with_resource(
|
437
|
+
self,
|
438
|
+
prompt_content: Union[str, PromptMessageMultipart],
|
439
|
+
server_name: str,
|
440
|
+
resource_name: str,
|
441
|
+
) -> str:
|
442
|
+
"""
|
443
|
+
Create a prompt with the given content and resource, then send it to the agent.
|
444
|
+
|
445
|
+
Args:
|
446
|
+
prompt_content: Either a string message or an existing PromptMessageMultipart
|
447
|
+
server_name: Name of the MCP server to retrieve the resource from
|
448
|
+
resource_name: Name or URI of the resource to retrieve
|
449
|
+
|
450
|
+
Returns:
|
451
|
+
The agent's response as a string
|
452
|
+
"""
|
453
|
+
# Get the embedded resources
|
454
|
+
embedded_resources: List[EmbeddedResource] = await self.get_embedded_resources(
|
455
|
+
server_name, resource_name
|
456
|
+
)
|
457
|
+
|
458
|
+
# Create or update the prompt message
|
459
|
+
prompt: PromptMessageMultipart
|
460
|
+
if isinstance(prompt_content, str):
|
461
|
+
# Create a new prompt with the text and resources
|
462
|
+
content = [TextContent(type="text", text=prompt_content)]
|
463
|
+
content.extend(embedded_resources)
|
464
|
+
prompt = PromptMessageMultipart(role="user", content=content)
|
465
|
+
elif isinstance(prompt_content, PromptMessageMultipart):
|
466
|
+
# Add resources to the existing prompt
|
467
|
+
prompt = prompt_content
|
468
|
+
prompt.content.extend(embedded_resources)
|
469
|
+
else:
|
470
|
+
raise TypeError("prompt_content must be a string or PromptMessageMultipart")
|
471
|
+
|
472
|
+
# Send the prompt to the agent and return the response
|
473
|
+
return await self._llm.generate_prompt(prompt, None)
|
mcp_agent/config.py
CHANGED
@@ -18,6 +18,12 @@ class MCPServerAuthSettings(BaseModel):
|
|
18
18
|
model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
|
19
19
|
|
20
20
|
|
21
|
+
class MCPSamplingSettings(BaseModel):
|
22
|
+
model: str = "haiku"
|
23
|
+
|
24
|
+
model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
|
25
|
+
|
26
|
+
|
21
27
|
class MCPRootSettings(BaseModel):
|
22
28
|
"""Represents a root directory configuration for an MCP server."""
|
23
29
|
|
@@ -81,6 +87,9 @@ class MCPServerSettings(BaseModel):
|
|
81
87
|
env: Dict[str, str] | None = None
|
82
88
|
"""Environment variables to pass to the server process."""
|
83
89
|
|
90
|
+
sampling: MCPSamplingSettings | None = None
|
91
|
+
"""Sampling settings for this Client/Server pair"""
|
92
|
+
|
84
93
|
|
85
94
|
class MCPSettings(BaseModel):
|
86
95
|
"""Configuration for all MCP servers."""
|
mcp_agent/context.py
CHANGED
@@ -24,7 +24,6 @@ from mcp_agent.executor.executor import Executor
|
|
24
24
|
from mcp_agent.executor.decorator_registry import (
|
25
25
|
DecoratorRegistry,
|
26
26
|
register_asyncio_decorators,
|
27
|
-
register_temporal_decorators,
|
28
27
|
)
|
29
28
|
from mcp_agent.executor.task_registry import ActivityRegistry
|
30
29
|
from mcp_agent.executor.executor import AsyncioExecutor
|
@@ -194,7 +193,6 @@ async def initialize_context(
|
|
194
193
|
|
195
194
|
context.decorator_registry = DecoratorRegistry()
|
196
195
|
register_asyncio_decorators(context.decorator_registry)
|
197
|
-
register_temporal_decorators(context.decorator_registry)
|
198
196
|
|
199
197
|
# Store the tracer in context if needed
|
200
198
|
context.tracer = trace.get_tracer(config.otel.service_name)
|
mcp_agent/core/agent_app.py
CHANGED
@@ -112,6 +112,35 @@ class AgentApp:
|
|
112
112
|
|
113
113
|
proxy = self._agents[target]
|
114
114
|
return await proxy.apply_prompt(prompt_name, arguments)
|
115
|
+
|
116
|
+
async def with_resource(
|
117
|
+
self,
|
118
|
+
prompt_content: Union[str, PromptMessageMultipart],
|
119
|
+
server_name: str,
|
120
|
+
resource_name: str,
|
121
|
+
agent_name: Optional[str] = None,
|
122
|
+
) -> str:
|
123
|
+
"""
|
124
|
+
Create a prompt with the given content and resource, then send it to the agent.
|
125
|
+
|
126
|
+
Args:
|
127
|
+
prompt_content: Either a string message or an existing PromptMessageMultipart
|
128
|
+
server_name: Name of the MCP server to retrieve the resource from
|
129
|
+
resource_name: Name or URI of the resource to retrieve
|
130
|
+
agent_name: The name of the agent to use (uses default if None)
|
131
|
+
|
132
|
+
Returns:
|
133
|
+
The agent's response as a string
|
134
|
+
"""
|
135
|
+
target = agent_name or self._default
|
136
|
+
if not target:
|
137
|
+
raise ValueError("No default agent available")
|
138
|
+
|
139
|
+
if target not in self._agents:
|
140
|
+
raise ValueError(f"No agent named '{target}'")
|
141
|
+
|
142
|
+
proxy = self._agents[target]
|
143
|
+
return await proxy.with_resource(prompt_content, server_name, resource_name)
|
115
144
|
|
116
145
|
async def prompt(self, agent_name: Optional[str] = None, default: str = "") -> str:
|
117
146
|
"""
|
mcp_agent/core/factory.py
CHANGED
@@ -172,16 +172,17 @@ async def create_agents_by_type(
|
|
172
172
|
if agent_type == AgentType.BASIC:
|
173
173
|
# Get the agent name for special handling
|
174
174
|
agent_name = agent_data["config"].name
|
175
|
-
agent = Agent(
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
model=config.model,
|
181
|
-
request_params=config.default_request_params,
|
182
|
-
)
|
183
|
-
agent._llm = await agent.attach_llm(llm_factory)
|
175
|
+
agent = Agent(
|
176
|
+
config=config,
|
177
|
+
context=app_instance.context,
|
178
|
+
)
|
179
|
+
await agent.initialize()
|
184
180
|
|
181
|
+
llm_factory = model_factory_func(
|
182
|
+
model=config.model,
|
183
|
+
request_params=config.default_request_params,
|
184
|
+
)
|
185
|
+
agent._llm = await agent.attach_llm(llm_factory)
|
185
186
|
# Store the agent
|
186
187
|
instance = agent
|
187
188
|
|
@@ -222,16 +223,16 @@ async def create_agents_by_type(
|
|
222
223
|
default_request_params=base_params,
|
223
224
|
)
|
224
225
|
planner_agent = Agent(
|
225
|
-
config=planner_config,
|
226
|
+
config=planner_config,
|
227
|
+
context=app_instance.context,
|
226
228
|
)
|
227
229
|
planner_factory = model_factory_func(
|
228
230
|
model=config.model,
|
229
231
|
request_params=config.default_request_params,
|
230
232
|
)
|
231
233
|
|
232
|
-
|
233
|
-
|
234
|
-
|
234
|
+
planner = await planner_agent.attach_llm(planner_factory)
|
235
|
+
await planner.initialize()
|
235
236
|
# Create the orchestrator with pre-configured planner
|
236
237
|
instance = Orchestrator(
|
237
238
|
name=config.name,
|
mcp_agent/core/fastagent.py
CHANGED
@@ -16,7 +16,6 @@ from contextlib import asynccontextmanager
|
|
16
16
|
from functools import partial
|
17
17
|
|
18
18
|
from mcp_agent.app import MCPApp
|
19
|
-
from mcp_agent.context_dependent import ContextDependent
|
20
19
|
from mcp_agent.config import Settings
|
21
20
|
|
22
21
|
from mcp_agent.core.agent_app import AgentApp
|
@@ -64,7 +63,7 @@ from mcp_agent.mcp_server import AgentMCPServer
|
|
64
63
|
T = TypeVar("T") # For the wrapper classes
|
65
64
|
|
66
65
|
|
67
|
-
class FastAgent
|
66
|
+
class FastAgent:
|
68
67
|
"""
|
69
68
|
A decorator-based interface for MCP Agent applications.
|
70
69
|
Provides a simplified way to create and manage agents using decorators.
|
@@ -320,6 +319,7 @@ class FastAgent(ContextDependent):
|
|
320
319
|
"""
|
321
320
|
active_agents = {}
|
322
321
|
had_error = False
|
322
|
+
await self.app.initialize()
|
323
323
|
|
324
324
|
# Handle quiet mode by disabling logger settings after initialization
|
325
325
|
quiet_mode = hasattr(self, "args") and self.args.quiet
|
mcp_agent/core/proxies.py
CHANGED
@@ -1,6 +1,9 @@
|
|
1
1
|
"""
|
2
2
|
Proxy classes for agent interactions.
|
3
3
|
These proxies provide a consistent interface for interacting with different types of agents.
|
4
|
+
|
5
|
+
FOR COMPATIBILITY WITH LEGACY MCP-AGENT CODE
|
6
|
+
|
4
7
|
"""
|
5
8
|
|
6
9
|
from typing import List, Optional, Dict, Union, TYPE_CHECKING
|
@@ -8,6 +11,7 @@ from typing import List, Optional, Dict, Union, TYPE_CHECKING
|
|
8
11
|
from mcp_agent.agents.agent import Agent
|
9
12
|
from mcp_agent.app import MCPApp
|
10
13
|
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
14
|
+
from mcp.types import EmbeddedResource
|
11
15
|
|
12
16
|
# Handle circular imports
|
13
17
|
if TYPE_CHECKING:
|
@@ -129,6 +133,43 @@ class LLMAgentProxy(BaseAgentProxy):
|
|
129
133
|
"""
|
130
134
|
return await self._agent.apply_prompt(prompt_name, arguments)
|
131
135
|
|
136
|
+
# Add the new methods
|
137
|
+
async def get_embedded_resources(
|
138
|
+
self, server_name: str, resource_name: str
|
139
|
+
) -> List[EmbeddedResource]:
|
140
|
+
"""
|
141
|
+
Get a resource from an MCP server and return it as a list of embedded resources ready for use in prompts.
|
142
|
+
|
143
|
+
Args:
|
144
|
+
server_name: Name of the MCP server to retrieve the resource from
|
145
|
+
resource_name: Name or URI of the resource to retrieve
|
146
|
+
|
147
|
+
Returns:
|
148
|
+
List of EmbeddedResource objects ready to use in a PromptMessageMultipart
|
149
|
+
"""
|
150
|
+
return await self._agent.get_embedded_resources(server_name, resource_name)
|
151
|
+
|
152
|
+
async def with_resource(
|
153
|
+
self,
|
154
|
+
prompt_content: Union[str, PromptMessageMultipart],
|
155
|
+
server_name: str,
|
156
|
+
resource_name: str,
|
157
|
+
) -> str:
|
158
|
+
"""
|
159
|
+
Create a prompt with the given content and resource, then send it to the agent.
|
160
|
+
|
161
|
+
Args:
|
162
|
+
prompt_content: Either a string message or an existing PromptMessageMultipart
|
163
|
+
server_name: Name of the MCP server to retrieve the resource from
|
164
|
+
resource_name: Name or URI of the resource to retrieve
|
165
|
+
|
166
|
+
Returns:
|
167
|
+
The agent's response as a string
|
168
|
+
"""
|
169
|
+
return await self._agent.with_resource(
|
170
|
+
prompt_content, server_name, resource_name
|
171
|
+
)
|
172
|
+
|
132
173
|
|
133
174
|
class WorkflowProxy(BaseAgentProxy):
|
134
175
|
"""Proxy for workflow types that implement generate_str() directly"""
|
mcp_agent/logging/listeners.py
CHANGED
@@ -177,10 +177,7 @@ class BatchingListener(FilteredListener):
|
|
177
177
|
|
178
178
|
if self._flush_task and not self._flush_task.done():
|
179
179
|
self._flush_task.cancel()
|
180
|
-
|
181
|
-
await self._flush_task
|
182
|
-
except asyncio.CancelledError:
|
183
|
-
pass
|
180
|
+
await self._flush_task
|
184
181
|
self._flush_task = None
|
185
182
|
await self.flush()
|
186
183
|
|
@@ -193,8 +190,8 @@ class BatchingListener(FilteredListener):
|
|
193
190
|
)
|
194
191
|
except asyncio.TimeoutError:
|
195
192
|
await self.flush()
|
196
|
-
except asyncio.CancelledError:
|
197
|
-
|
193
|
+
# except asyncio.CancelledError:
|
194
|
+
# break
|
198
195
|
finally:
|
199
196
|
await self.flush() # Final flush
|
200
197
|
|
mcp_agent/mcp/gen_client.py
CHANGED
@@ -6,7 +6,7 @@ from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStre
|
|
6
6
|
from mcp import ClientSession
|
7
7
|
|
8
8
|
from mcp_agent.logging.logger import get_logger
|
9
|
-
from mcp_agent.
|
9
|
+
from mcp_agent.mcp.interfaces import ServerRegistryProtocol
|
10
10
|
from mcp_agent.mcp.mcp_agent_client_session import MCPAgentClientSession
|
11
11
|
|
12
12
|
logger = get_logger(__name__)
|
@@ -15,7 +15,7 @@ logger = get_logger(__name__)
|
|
15
15
|
@asynccontextmanager
|
16
16
|
async def gen_client(
|
17
17
|
server_name: str,
|
18
|
-
server_registry:
|
18
|
+
server_registry: ServerRegistryProtocol,
|
19
19
|
client_session_factory: Callable[
|
20
20
|
[MemoryObjectReceiveStream, MemoryObjectSendStream, timedelta | None],
|
21
21
|
ClientSession,
|
@@ -41,7 +41,7 @@ async def gen_client(
|
|
41
41
|
|
42
42
|
async def connect(
|
43
43
|
server_name: str,
|
44
|
-
server_registry:
|
44
|
+
server_registry: ServerRegistryProtocol,
|
45
45
|
client_session_factory: Callable[
|
46
46
|
[MemoryObjectReceiveStream, MemoryObjectSendStream, timedelta | None],
|
47
47
|
ClientSession,
|
@@ -67,7 +67,7 @@ async def connect(
|
|
67
67
|
|
68
68
|
async def disconnect(
|
69
69
|
server_name: str | None,
|
70
|
-
server_registry:
|
70
|
+
server_registry: ServerRegistryProtocol,
|
71
71
|
) -> None:
|
72
72
|
"""
|
73
73
|
Disconnect from the specified server. If server_name is None, disconnect from all servers.
|