fast-agent-mcp 0.1.8__py3-none-any.whl → 0.1.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/METADATA +26 -4
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/RECORD +43 -22
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/entry_points.txt +1 -0
- mcp_agent/agents/agent.py +5 -11
- mcp_agent/core/agent_app.py +89 -13
- mcp_agent/core/fastagent.py +13 -3
- mcp_agent/core/mcp_content.py +222 -0
- mcp_agent/core/prompt.py +132 -0
- mcp_agent/core/proxies.py +41 -36
- mcp_agent/logging/transport.py +30 -3
- mcp_agent/mcp/mcp_aggregator.py +11 -10
- mcp_agent/mcp/mime_utils.py +69 -0
- mcp_agent/mcp/prompt_message_multipart.py +64 -0
- mcp_agent/mcp/prompt_serialization.py +447 -0
- mcp_agent/mcp/prompts/__init__.py +0 -0
- mcp_agent/mcp/prompts/__main__.py +10 -0
- mcp_agent/mcp/prompts/prompt_server.py +508 -0
- mcp_agent/mcp/prompts/prompt_template.py +469 -0
- mcp_agent/mcp/resource_utils.py +203 -0
- mcp_agent/resources/examples/internal/agent.py +1 -1
- mcp_agent/resources/examples/internal/fastagent.config.yaml +2 -2
- mcp_agent/resources/examples/internal/sizer.py +0 -5
- mcp_agent/resources/examples/prompting/__init__.py +3 -0
- mcp_agent/resources/examples/prompting/agent.py +23 -0
- mcp_agent/resources/examples/prompting/fastagent.config.yaml +44 -0
- mcp_agent/resources/examples/prompting/image_server.py +56 -0
- mcp_agent/workflows/llm/anthropic_utils.py +101 -0
- mcp_agent/workflows/llm/augmented_llm.py +139 -66
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +127 -251
- mcp_agent/workflows/llm/augmented_llm_openai.py +149 -305
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +43 -0
- mcp_agent/workflows/llm/augmented_llm_playback.py +109 -0
- mcp_agent/workflows/llm/model_factory.py +20 -3
- mcp_agent/workflows/llm/openai_utils.py +65 -0
- mcp_agent/workflows/llm/providers/__init__.py +8 -0
- mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +348 -0
- mcp_agent/workflows/llm/providers/multipart_converter_openai.py +426 -0
- mcp_agent/workflows/llm/providers/openai_multipart.py +197 -0
- mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +258 -0
- mcp_agent/workflows/llm/providers/sampling_converter_openai.py +229 -0
- mcp_agent/workflows/llm/sampling_format_converter.py +39 -0
- mcp_agent/core/server_validation.py +0 -44
- mcp_agent/core/simulator_registry.py +0 -22
- mcp_agent/workflows/llm/enhanced_passthrough.py +0 -70
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.8.dist-info → fast_agent_mcp-0.1.9.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: fast-agent-mcp
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.9
|
4
4
|
Summary: Define, Prompt and Test MCP enabled Agents and Workflows
|
5
5
|
Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
|
6
6
|
License: Apache License
|
@@ -212,7 +212,7 @@ Requires-Python: >=3.10
|
|
212
212
|
Requires-Dist: aiohttp>=3.11.13
|
213
213
|
Requires-Dist: anthropic>=0.49.0
|
214
214
|
Requires-Dist: fastapi>=0.115.6
|
215
|
-
Requires-Dist: mcp
|
215
|
+
Requires-Dist: mcp>=1.4.1
|
216
216
|
Requires-Dist: numpy>=2.2.1
|
217
217
|
Requires-Dist: openai>=1.63.2
|
218
218
|
Requires-Dist: opentelemetry-distro>=0.50b0
|
@@ -259,6 +259,10 @@ The simple declarative syntax lets you concentrate on composing your Prompts and
|
|
259
259
|
|
260
260
|
Evaluate how different models handle Agent and MCP Server calling tasks, then build multi-model workflows using the best provider for each task.
|
261
261
|
|
262
|
+
`fast-agent` is now multi-modal, supporting Images and PDFs for both Anthropic and OpenAI endpoints (for supported models), via Prompts and MCP Tool Call results.
|
263
|
+
|
264
|
+
> [!TIP] > `fast-agent` is now MCP Native! Coming Soon - Full Documentation Site.
|
265
|
+
|
262
266
|
### Agent Application Development
|
263
267
|
|
264
268
|
Prompts and configurations that define your Agent Applications are stored in simple files, with minimal boilerplate, enabling simple management and version control.
|
@@ -588,6 +592,19 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
|
|
588
592
|
)
|
589
593
|
```
|
590
594
|
|
595
|
+
### Multimodal Support
|
596
|
+
|
597
|
+
Add Resources to prompts using either the inbuilt `prompt-server` or MCP Types directly. Convenience class are made available to do so simply, for example:
|
598
|
+
|
599
|
+
#### MCP Tool Result Conversion
|
600
|
+
|
601
|
+
LLM APIs have restrictions on the content types that can be returned as Tool Calls/Function results via their Chat Completions API's:
|
602
|
+
|
603
|
+
- OpenAI supports Text
|
604
|
+
- Anthropic supports Text and Image
|
605
|
+
|
606
|
+
For MCP Tool Results, `ImageResources` and `EmbeddedResources` are converted to User Messages and added to the conversation.
|
607
|
+
|
591
608
|
### Prompts
|
592
609
|
|
593
610
|
MCP Prompts are supported with `apply_prompt(name,arguments)`, which always returns an Assistant Message. If the last message from the MCP Server is a 'User' message, it is sent to the LLM for processing. Prompts applied to the Agent's Context are retained - meaning that with `use_history=False`, Agents can act as finely tuned responders.
|
@@ -605,8 +622,9 @@ Prompts can also be applied interactively through the interactive interface by u
|
|
605
622
|
|
606
623
|
### llmindset.co.uk fork:
|
607
624
|
|
625
|
+
- Addition of MCP Prompts including Prompt Server and agent save/replay ability.
|
608
626
|
- Overhaul of Eval/Opt for Conversation Management
|
609
|
-
-
|
627
|
+
- Removed instructor/double-llm calling - native structured outputs for OAI.
|
610
628
|
- Improved handling of Parallel/Fan-In and respose option
|
611
629
|
- XML based generated prompts
|
612
630
|
- "FastAgent" style prototyping, with per-agent models
|
@@ -625,4 +643,8 @@ Prompts can also be applied interactively through the interactive interface by u
|
|
625
643
|
- Declarative workflows
|
626
644
|
- Numerous defect fixes
|
627
645
|
|
628
|
-
### Features to add
|
646
|
+
### Features to add (Commmitted)
|
647
|
+
|
648
|
+
- Run Agent as MCP Server, with interop
|
649
|
+
- Multi-part content types supporing Vision, PDF and multi-part Text.
|
650
|
+
- Improved test automation (supported by prompt_server.py and augmented_llm_playback.py)
|
@@ -8,7 +8,7 @@ mcp_agent/event_progress.py,sha256=25iz0yyg-O4glMmtijcYpDdUmtUIKsCmR_8A52GgeC4,2
|
|
8
8
|
mcp_agent/mcp_server_registry.py,sha256=5x30L1IlmC18JASl7NQbZYHMqPWS3ay0f_3U3uleaMM,9884
|
9
9
|
mcp_agent/progress_display.py,sha256=GeJU9VUt6qKsFVymG688hCMVCsAygG9ifiiEb5IcbN4,361
|
10
10
|
mcp_agent/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
11
|
-
mcp_agent/agents/agent.py,sha256=
|
11
|
+
mcp_agent/agents/agent.py,sha256=foxgVBSjpRp697467-girJmAjmEylfDCXbXtI660wHI,13173
|
12
12
|
mcp_agent/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
13
13
|
mcp_agent/cli/__main__.py,sha256=AVZ7tQFhU_sDOGuUGJq8ujgKtcxsYJBJwHbVaaiRDlI,166
|
14
14
|
mcp_agent/cli/main.py,sha256=DE6EZzspfzHwPK59x8vL4AIDHRQkVQ1Ja70XRGU1IQs,2753
|
@@ -17,7 +17,7 @@ mcp_agent/cli/commands/bootstrap.py,sha256=Rmwbuwl52eHfnya7fnwKk2J7nCsHpSh6irka4
|
|
17
17
|
mcp_agent/cli/commands/config.py,sha256=32YTS5jmsYAs9QzAhjkG70_daAHqOemf4XbZBBSMz6g,204
|
18
18
|
mcp_agent/cli/commands/setup.py,sha256=_SCpd6_PrixqbSaE72JQ7erIRkZnJGmh_3TvvwSzEiE,6392
|
19
19
|
mcp_agent/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
20
|
-
mcp_agent/core/agent_app.py,sha256=
|
20
|
+
mcp_agent/core/agent_app.py,sha256=6fzvExSmVSXyNo-Rq9Xvu0qUKjKHKjOpuhRfzCthV8o,29735
|
21
21
|
mcp_agent/core/agent_types.py,sha256=yKiMbv9QO2dduq4zXmoMZlOZpXJZhM4oNwIq1-134FE,318
|
22
22
|
mcp_agent/core/agent_utils.py,sha256=QMvwmxZyCqYhBzSyL9xARsxTuwdmlyjQvrPpsH36HnQ,1888
|
23
23
|
mcp_agent/core/decorators.py,sha256=dkAah1eIuYsEfQISDryG0u2GrzNnsO_jyN7lhpQfNlM,16191
|
@@ -25,10 +25,10 @@ mcp_agent/core/enhanced_prompt.py,sha256=bykUEnnc1CEWODJwXvl4VGfCtrJPtVXU0D4mUgl
|
|
25
25
|
mcp_agent/core/error_handling.py,sha256=D3HMW5odrbJvaKqcpCGj6eDXrbFcuqYaCZz7fyYiTu4,623
|
26
26
|
mcp_agent/core/exceptions.py,sha256=a2-JGRwFFRoQEPuAq0JC5PhAJ5TO3xVJfdS4-VN29cw,2225
|
27
27
|
mcp_agent/core/factory.py,sha256=TYtGtUKEVQi96uXQu3RddrpYGiUGolHMEATS57e4hgw,19074
|
28
|
-
mcp_agent/core/fastagent.py,sha256=
|
29
|
-
mcp_agent/core/
|
30
|
-
mcp_agent/core/
|
31
|
-
mcp_agent/core/
|
28
|
+
mcp_agent/core/fastagent.py,sha256=CYor0u4Vqrn8XmYIaSEtfuLrJgF2YChJUAE_3xjRGmk,19659
|
29
|
+
mcp_agent/core/mcp_content.py,sha256=rXT2C5gP9qgC-TI5F362ZLJi_erzcEOnlP9D2ZKK0i0,6860
|
30
|
+
mcp_agent/core/prompt.py,sha256=R-X3kptu3ehV_SQeiGnP6F9HMN-92I8e73gnkQ1tDVs,4317
|
31
|
+
mcp_agent/core/proxies.py,sha256=lawAc3mcoYlDpX9HwBc8tdh2oAr9YS_CT2LtbqXvuyg,8858
|
32
32
|
mcp_agent/core/types.py,sha256=Zhi9iW7uiOfdpSt9NC0FCtGRFtJPg4mpZPK2aYi7a7M,817
|
33
33
|
mcp_agent/core/validation.py,sha256=x0fsx5eLTawASFm9MDtEukwGOj_RTdY1OW064UihMR8,8309
|
34
34
|
mcp_agent/eval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -49,29 +49,41 @@ mcp_agent/logging/listeners.py,sha256=lx2Pq_SE0rsG3nF3TwDSxkmsWzdXxIUjuaWct-KOtJ
|
|
49
49
|
mcp_agent/logging/logger.py,sha256=Tr009BnfGUKuZcdinnSin0Z_zIsfDNGdcnamw2rDHRQ,10604
|
50
50
|
mcp_agent/logging/rich_progress.py,sha256=IEVFdFGA0nwg6pSt9Ydni5LCNYZZPKYMe-6DCi9pO4Y,4851
|
51
51
|
mcp_agent/logging/tracing.py,sha256=jQivxKYl870oXakmyUk7TXuTQSvsIzpHwZlSQfy4b0c,5203
|
52
|
-
mcp_agent/logging/transport.py,sha256=
|
52
|
+
mcp_agent/logging/transport.py,sha256=MFgiCQ-YFP0tSMhDMpZCj585vflWcMydM4oyCFduVf0,17203
|
53
53
|
mcp_agent/mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
54
54
|
mcp_agent/mcp/gen_client.py,sha256=u0HwdJiw9YCerS5JC7JDuGgBh9oTcLd7vv9vPjwibXc,3025
|
55
55
|
mcp_agent/mcp/mcp_activity.py,sha256=CajXCFWZ2cKEX9s4-HfNVAj471ePTVs4NOkvmIh65tE,592
|
56
56
|
mcp_agent/mcp/mcp_agent_client_session.py,sha256=NtWcQhjmnnaR3yYcYj2d2lh-m563NexZUa57K1tAjeM,9477
|
57
57
|
mcp_agent/mcp/mcp_agent_server.py,sha256=xP09HZTeguJi4Fq0p3fjLBP55uSYe5AdqM90xCgn9Ho,1639
|
58
|
-
mcp_agent/mcp/mcp_aggregator.py,sha256=
|
58
|
+
mcp_agent/mcp/mcp_aggregator.py,sha256=X_SnSX-_CPye87Xst_h0XyO4Cd3EuxBCEvLhU1SlRkU,36045
|
59
59
|
mcp_agent/mcp/mcp_connection_manager.py,sha256=EPJTKiEMKnFYpC37SOXiLriQL2YyhH0s6vvZWQRb_Mo,13663
|
60
|
+
mcp_agent/mcp/mime_utils.py,sha256=difepNR_gpb4MpMLkBRAoyhDk-AjXUHTiqKvT_VwS1o,1805
|
61
|
+
mcp_agent/mcp/prompt_message_multipart.py,sha256=U7IN0JStmy26akTXcqE4x90oWzm8xs1qa0VeKIyPKmE,1962
|
62
|
+
mcp_agent/mcp/prompt_serialization.py,sha256=StcXV7V4fqqtCmOCXGCyYXx5vpwNhL2xr3RG_awwdqI,16056
|
63
|
+
mcp_agent/mcp/resource_utils.py,sha256=x-hMxVH7moVY0PLV1LHkpzk8cihL8AJINtzRcaGsiSE,6358
|
60
64
|
mcp_agent/mcp/stdio.py,sha256=tW075R5rQ-UlflXWFKIFDgCbWbuhKqxhiYolWvyEkFs,3985
|
65
|
+
mcp_agent/mcp/prompts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
66
|
+
mcp_agent/mcp/prompts/__main__.py,sha256=gr1Tdz9fcK0EXjEuZg_BOnKUmvhYq5AH2lFZicVyNb0,237
|
67
|
+
mcp_agent/mcp/prompts/prompt_server.py,sha256=lGa-H2rXarVcbwrcjfhdZ0rGwggE9eytUko6soiJVXo,17375
|
68
|
+
mcp_agent/mcp/prompts/prompt_template.py,sha256=NDnSVA0W1wayZHCVx27lfuVPoxlAz-FfBwiCEQG9Ixk,16324
|
61
69
|
mcp_agent/mcp_server/__init__.py,sha256=SEWyU7aSFzdSk6iTYnrQu-llji5_P5dp3TaztCt_rzo,154
|
62
70
|
mcp_agent/mcp_server/agent_server.py,sha256=SUBggPyrzWtBRUC5xIMpCxu6ei-6Vah3q9Si12BQ-zY,4444
|
63
71
|
mcp_agent/resources/examples/data-analysis/analysis-campaign.py,sha256=EG-HhaDHltZ4hHAqhgfX_pHM2wem48aYhSIKJxyWHKc,7269
|
64
72
|
mcp_agent/resources/examples/data-analysis/analysis.py,sha256=5zLoioZQNKUfXt1EXLrGX3TU06-0N06-L9Gtp9BIr6k,2611
|
65
73
|
mcp_agent/resources/examples/data-analysis/fastagent.config.yaml,sha256=ini94PHyJCfgpjcjHKMMbGuHs6LIj46F1NwY0ll5HVk,1609
|
66
74
|
mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv,sha256=pcMeOL1_r8m8MziE6xgbBrQbjl5Ijo98yycZn7O-dlk,227977
|
67
|
-
mcp_agent/resources/examples/internal/agent.py,sha256=
|
68
|
-
mcp_agent/resources/examples/internal/fastagent.config.yaml,sha256=
|
75
|
+
mcp_agent/resources/examples/internal/agent.py,sha256=4EXhVJcX5mw2LuDqmZL4B4SM0zxMFmMou7NCEeoVeQ0,391
|
76
|
+
mcp_agent/resources/examples/internal/fastagent.config.yaml,sha256=NF-plJ2ZMLZL8_YfdwmfsvRyafgsNEEHzsjm_p8vNlY,1858
|
69
77
|
mcp_agent/resources/examples/internal/job.py,sha256=WEKIAANMEAuKr13__rYf3PqJeTAsNB_kqYqbqVYQlUM,4093
|
70
78
|
mcp_agent/resources/examples/internal/prompt_category.py,sha256=b3tjkfrVIW1EPoDjr4mG87wlZ7D0Uju9eg6asXAYYpI,551
|
71
79
|
mcp_agent/resources/examples/internal/prompt_sizing.py,sha256=UtQ_jvwS4yMh80PHhUQXJ9WXk-fqNYlqUMNTNkZosKM,2003
|
72
|
-
mcp_agent/resources/examples/internal/sizer.py,sha256=
|
80
|
+
mcp_agent/resources/examples/internal/sizer.py,sha256=RBq1qhYVKF2_qtRdvpKpRI3XIFpZ4eyBzhVjnlip-P8,356
|
73
81
|
mcp_agent/resources/examples/internal/social.py,sha256=Cot2lg3PLhLm13gPdVFvFEN28-mm6x3-jHu2YsV4N3s,1707
|
74
82
|
mcp_agent/resources/examples/mcp_researcher/researcher-eval.py,sha256=kNPjIU-JwE0oIBQKwhv6lZsUF_SPtYVkiEEbY1ZVZxk,1807
|
83
|
+
mcp_agent/resources/examples/prompting/__init__.py,sha256=GG1zksC76L-wmerkjplWHwamelxl5vlY0YkRzgAq_v0,49
|
84
|
+
mcp_agent/resources/examples/prompting/agent.py,sha256=gG2jQnRibO8OmljoFQAs9xxhCyHLCkVxJxQkfF7ykfY,607
|
85
|
+
mcp_agent/resources/examples/prompting/fastagent.config.yaml,sha256=UR6LtCpeSIzkHsCrHJW1z-wE7AgmgKozS_IYcfcSAkc,1270
|
86
|
+
mcp_agent/resources/examples/prompting/image_server.py,sha256=-6YWtzS-K5ofHtdoOk4uC3ZBFUyVELT9Fdck1RptcWg,1711
|
75
87
|
mcp_agent/resources/examples/researcher/fastagent.config.yaml,sha256=bNOnID9OgdSBTUEhdimKB8LjaZLa1B6igmp-nxx8nr4,2271
|
76
88
|
mcp_agent/resources/examples/researcher/researcher-eval.py,sha256=0qDjxun7CZ1cZ8JTa6G1v1XcpwGSSL6-qAZ35yI1-K4,1818
|
77
89
|
mcp_agent/resources/examples/researcher/researcher-imp.py,sha256=Xfw2YAyjXd47pQz-uljgG5ii5x77fVuCP2XCivRDI48,7885
|
@@ -104,14 +116,23 @@ mcp_agent/workflows/intent_classifier/intent_classifier_llm.py,sha256=WSLUv2Casb
|
|
104
116
|
mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py,sha256=Hp4454IniWFxV4ml50Ml8ip9rS1La5FBn5pd7vm1FHA,1964
|
105
117
|
mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py,sha256=zj76WlTYnSCYjBQ_IDi5vFBQGmNwYaoUq1rT730sY98,1940
|
106
118
|
mcp_agent/workflows/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
107
|
-
mcp_agent/workflows/llm/
|
108
|
-
mcp_agent/workflows/llm/
|
109
|
-
mcp_agent/workflows/llm/
|
110
|
-
mcp_agent/workflows/llm/
|
111
|
-
mcp_agent/workflows/llm/
|
119
|
+
mcp_agent/workflows/llm/anthropic_utils.py,sha256=OFmsVmDQ22880duDWQrEeQEB47xtvujSYJ-fNw1lhi0,3712
|
120
|
+
mcp_agent/workflows/llm/augmented_llm.py,sha256=9cWy-4yNG13w4oQgXmisgWTcm6aoJIRCYTX85Bkf-MI,30554
|
121
|
+
mcp_agent/workflows/llm/augmented_llm_anthropic.py,sha256=opV4PTai2eoYUzJS0gCPGEy4pe-lT2Eo1Sao6Y_EIiY,20140
|
122
|
+
mcp_agent/workflows/llm/augmented_llm_openai.py,sha256=OUSmvY2m6HU1JOK5nEzKDHpHReT0ffjoHDFHk6aYhoc,21002
|
123
|
+
mcp_agent/workflows/llm/augmented_llm_passthrough.py,sha256=oZC9K90DdjvCQiQ-2yH1FGTTYsjPl9EMRx4n5_CihIM,3996
|
124
|
+
mcp_agent/workflows/llm/augmented_llm_playback.py,sha256=5ypv3owJU6pscktqg9tkLQVKNgaA50e8OWmC1hAhrtE,4328
|
112
125
|
mcp_agent/workflows/llm/llm_selector.py,sha256=G7pIybuBDwtmyxUDov_QrNYH2FoI0qFRu2JfoxWUF5Y,11045
|
113
|
-
mcp_agent/workflows/llm/model_factory.py,sha256=
|
126
|
+
mcp_agent/workflows/llm/model_factory.py,sha256=UHePE5Ow03kpE44kjYtFGEhVFSYp0AY2yGri58yCBKU,7688
|
127
|
+
mcp_agent/workflows/llm/openai_utils.py,sha256=GGkJF-nazA4HWrlmMKKLf0qSfl2gbSqo-rbMDoJs5mE,1895
|
114
128
|
mcp_agent/workflows/llm/prompt_utils.py,sha256=EY3eddqnmc_YDUQJFysPnpTH6hr4r2HneeEmX76P8TQ,4948
|
129
|
+
mcp_agent/workflows/llm/sampling_format_converter.py,sha256=-vN927eMyo0vYg9GkuWAUzYqQR_kpz4BLmukgNfm2K8,1457
|
130
|
+
mcp_agent/workflows/llm/providers/__init__.py,sha256=qirdqAKIbw3BY1NBdGytH9tvpjOu0QNOqKAG2deD_U4,285
|
131
|
+
mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py,sha256=TO0zHEnqnOEVfCjM-qp-DGrJoSUPRWChjUPTRo2Gt9U,13463
|
132
|
+
mcp_agent/workflows/llm/providers/multipart_converter_openai.py,sha256=IaHgR-bo5PJBd960kDJYnvinLmg0dtajg6ZXFYeLke0,17691
|
133
|
+
mcp_agent/workflows/llm/providers/openai_multipart.py,sha256=RKkwssszD6jJpZ-Hj875uu5rbePrwzN7v43Ec69Ziwg,7566
|
134
|
+
mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py,sha256=vaM0QWzYP6VFAUwLLxRGpyB0erAQAJAhQed0eFT2jPQ,8916
|
135
|
+
mcp_agent/workflows/llm/providers/sampling_converter_openai.py,sha256=yUTSF9fmcy-aNVd-9yGT2kGV7F0VAkYCQK5S8eImeIs,8436
|
115
136
|
mcp_agent/workflows/orchestrator/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
116
137
|
mcp_agent/workflows/orchestrator/orchestrator.py,sha256=s8-_4CG4oRnvYAwUqqyevGLpy21IYtcNtsd_SbRZ8Fk,22125
|
117
138
|
mcp_agent/workflows/orchestrator/orchestrator_models.py,sha256=1ldku1fYA_hu2F6K4l2C96mAdds05VibtSzSQrGm3yw,7321
|
@@ -130,8 +151,8 @@ mcp_agent/workflows/swarm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
|
|
130
151
|
mcp_agent/workflows/swarm/swarm.py,sha256=-lAIeSWDqbGHGRPTvjiP9nIKWvxxy9DAojl9yQzO1Pw,11050
|
131
152
|
mcp_agent/workflows/swarm/swarm_anthropic.py,sha256=pW8zFx5baUWGd5Vw3nIDF2oVOOGNorij4qvGJKdYPcs,1624
|
132
153
|
mcp_agent/workflows/swarm/swarm_openai.py,sha256=wfteywvAGkT5bLmIxX_StHJq8144whYmCRnJASAjOes,1596
|
133
|
-
fast_agent_mcp-0.1.
|
134
|
-
fast_agent_mcp-0.1.
|
135
|
-
fast_agent_mcp-0.1.
|
136
|
-
fast_agent_mcp-0.1.
|
137
|
-
fast_agent_mcp-0.1.
|
154
|
+
fast_agent_mcp-0.1.9.dist-info/METADATA,sha256=c7JIWUS2bWR5pQQ6cVPwzBOQspzzvB8I25zo3wRUCQg,29748
|
155
|
+
fast_agent_mcp-0.1.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
156
|
+
fast_agent_mcp-0.1.9.dist-info/entry_points.txt,sha256=qPM7vwtN1_KmP3dXehxgiCxUBHtqP7yfenZigztvY-w,226
|
157
|
+
fast_agent_mcp-0.1.9.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
|
158
|
+
fast_agent_mcp-0.1.9.dist-info/RECORD,,
|
mcp_agent/agents/agent.py
CHANGED
@@ -320,18 +320,20 @@ class Agent(MCPAggregator):
|
|
320
320
|
],
|
321
321
|
)
|
322
322
|
|
323
|
-
async def apply_prompt(
|
323
|
+
async def apply_prompt(
|
324
|
+
self, prompt_name: str, arguments: dict[str, str] = None
|
325
|
+
) -> str:
|
324
326
|
"""
|
325
327
|
Apply an MCP Server Prompt by name and return the assistant's response.
|
326
328
|
Will search all available servers for the prompt if not namespaced.
|
327
|
-
|
329
|
+
|
328
330
|
If the last message in the prompt is from a user, this will automatically
|
329
331
|
generate an assistant response to ensure we always end with an assistant message.
|
330
332
|
|
331
333
|
Args:
|
332
334
|
prompt_name: The name of the prompt to apply
|
333
335
|
arguments: Optional dictionary of string arguments to pass to the prompt template
|
334
|
-
|
336
|
+
|
335
337
|
Returns:
|
336
338
|
The assistant's response or error message
|
337
339
|
"""
|
@@ -357,11 +359,3 @@ class Agent(MCPAggregator):
|
|
357
359
|
# The LLM will automatically generate a response if needed
|
358
360
|
result = await self._llm.apply_prompt_template(prompt_result, display_name)
|
359
361
|
return result
|
360
|
-
|
361
|
-
# For backward compatibility
|
362
|
-
async def load_prompt(self, prompt_name: str, arguments: dict[str, str] = None) -> str:
|
363
|
-
"""
|
364
|
-
Legacy method - use apply_prompt instead.
|
365
|
-
This is maintained for backward compatibility.
|
366
|
-
"""
|
367
|
-
return await self.apply_prompt(prompt_name, arguments)
|
mcp_agent/core/agent_app.py
CHANGED
@@ -2,9 +2,10 @@
|
|
2
2
|
Main application wrapper for interacting with agents.
|
3
3
|
"""
|
4
4
|
|
5
|
-
from typing import Optional, Dict, TYPE_CHECKING
|
5
|
+
from typing import Optional, Dict, Union, TYPE_CHECKING
|
6
6
|
|
7
7
|
from mcp_agent.app import MCPApp
|
8
|
+
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
8
9
|
from mcp_agent.progress_display import progress_display
|
9
10
|
from mcp_agent.workflows.orchestrator.orchestrator import Orchestrator
|
10
11
|
from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM
|
@@ -37,16 +38,80 @@ class AgentApp:
|
|
37
38
|
# Optional: set default agent for direct calls
|
38
39
|
self._default = next(iter(agents)) if agents else None
|
39
40
|
|
40
|
-
async def
|
41
|
-
|
42
|
-
|
43
|
-
|
41
|
+
async def send_prompt(
|
42
|
+
self, prompt: PromptMessageMultipart, agent_name: Optional[str] = None
|
43
|
+
) -> str:
|
44
|
+
"""
|
45
|
+
Send a PromptMessageMultipart to an agent
|
46
|
+
|
47
|
+
Args:
|
48
|
+
prompt: The PromptMessageMultipart to send
|
49
|
+
agent_name: The name of the agent to send to (uses default if None)
|
50
|
+
|
51
|
+
Returns:
|
52
|
+
The agent's response as a string
|
53
|
+
"""
|
54
|
+
target = agent_name or self._default
|
55
|
+
if not target:
|
56
|
+
raise ValueError("No default agent available")
|
57
|
+
|
58
|
+
if target not in self._agents:
|
59
|
+
raise ValueError(f"No agent named '{target}'")
|
60
|
+
|
61
|
+
proxy = self._agents[target]
|
62
|
+
return await proxy.send_prompt(prompt)
|
63
|
+
|
64
|
+
async def send(
|
65
|
+
self,
|
66
|
+
message: Union[str, PromptMessageMultipart] = None,
|
67
|
+
agent_name: Optional[str] = None,
|
68
|
+
) -> str:
|
69
|
+
"""
|
70
|
+
Send a message to the default agent or specified agent
|
71
|
+
|
72
|
+
Args:
|
73
|
+
message: Either a string message or a PromptMessageMultipart object
|
74
|
+
agent_name: The name of the agent to send to (uses default if None)
|
75
|
+
|
76
|
+
Returns:
|
77
|
+
The agent's response as a string
|
78
|
+
"""
|
79
|
+
target = agent_name or self._default
|
80
|
+
if not target:
|
81
|
+
raise ValueError("No default agent available")
|
82
|
+
|
83
|
+
if target not in self._agents:
|
84
|
+
raise ValueError(f"No agent named '{target}'")
|
44
85
|
|
45
|
-
|
46
|
-
|
86
|
+
proxy = self._agents[target]
|
87
|
+
return await proxy.send(message)
|
88
|
+
|
89
|
+
async def apply_prompt(
|
90
|
+
self,
|
91
|
+
prompt_name: str,
|
92
|
+
arguments: Optional[dict[str, str]] = None,
|
93
|
+
agent_name: Optional[str] = None,
|
94
|
+
) -> str:
|
95
|
+
"""
|
96
|
+
Apply an MCP Server Prompt by name and return the assistant's response
|
47
97
|
|
48
|
-
|
49
|
-
|
98
|
+
Args:
|
99
|
+
prompt_name: The name of the prompt to apply
|
100
|
+
arguments: Optional dictionary of string arguments to pass to the prompt template
|
101
|
+
agent_name: The name of the agent to use (uses default if None)
|
102
|
+
|
103
|
+
Returns:
|
104
|
+
The assistant's response as a string
|
105
|
+
"""
|
106
|
+
target = agent_name or self._default
|
107
|
+
if not target:
|
108
|
+
raise ValueError("No default agent available")
|
109
|
+
|
110
|
+
if target not in self._agents:
|
111
|
+
raise ValueError(f"No agent named '{target}'")
|
112
|
+
|
113
|
+
proxy = self._agents[target]
|
114
|
+
return await proxy.apply_prompt(prompt_name, arguments)
|
50
115
|
|
51
116
|
async def prompt(self, agent_name: Optional[str] = None, default: str = "") -> str:
|
52
117
|
"""
|
@@ -506,7 +571,7 @@ class AgentApp:
|
|
506
571
|
if user_input == "":
|
507
572
|
continue
|
508
573
|
|
509
|
-
result = await self.send(
|
574
|
+
result = await self.send(user_input, agent)
|
510
575
|
|
511
576
|
# Check if current agent is a chain that should continue with final agent
|
512
577
|
if agent_types.get(agent) == "Chain":
|
@@ -532,10 +597,21 @@ class AgentApp:
|
|
532
597
|
return self._agents[name]
|
533
598
|
|
534
599
|
async def __call__(
|
535
|
-
self,
|
600
|
+
self,
|
601
|
+
message: Optional[Union[str, PromptMessageMultipart]] = None,
|
602
|
+
agent_name: Optional[str] = None,
|
536
603
|
) -> str:
|
537
|
-
"""
|
604
|
+
"""
|
605
|
+
Support: agent('message') or agent(Prompt.user('message'))
|
606
|
+
|
607
|
+
Args:
|
608
|
+
message: Either a string message or a PromptMessageMultipart object
|
609
|
+
agent_name: The name of the agent to use (uses default if None)
|
610
|
+
|
611
|
+
Returns:
|
612
|
+
The agent's response as a string
|
613
|
+
"""
|
538
614
|
target = agent_name or self._default
|
539
615
|
if not target:
|
540
616
|
raise ValueError("No default agent available")
|
541
|
-
return await self.send(
|
617
|
+
return await self.send(message, target)
|
mcp_agent/core/fastagent.py
CHANGED
@@ -70,7 +70,12 @@ class FastAgent(ContextDependent):
|
|
70
70
|
Provides a simplified way to create and manage agents using decorators.
|
71
71
|
"""
|
72
72
|
|
73
|
-
def __init__(
|
73
|
+
def __init__(
|
74
|
+
self,
|
75
|
+
name: str,
|
76
|
+
config_path: Optional[str] = None,
|
77
|
+
ignore_unknown_args: bool = False,
|
78
|
+
):
|
74
79
|
"""
|
75
80
|
Initialize the decorator interface.
|
76
81
|
|
@@ -101,7 +106,12 @@ class FastAgent(ContextDependent):
|
|
101
106
|
action="store_true",
|
102
107
|
help="Disable progress display, tool and message logging for cleaner output",
|
103
108
|
)
|
104
|
-
|
109
|
+
|
110
|
+
if ignore_unknown_args:
|
111
|
+
known_args, _ = parser.parse_known_args()
|
112
|
+
self.args = known_args
|
113
|
+
else:
|
114
|
+
self.args = parser.parse_args()
|
105
115
|
|
106
116
|
# Quiet mode will be handled in _load_config()
|
107
117
|
|
@@ -372,7 +382,7 @@ class FastAgent(ContextDependent):
|
|
372
382
|
|
373
383
|
# Create wrapper with all agents
|
374
384
|
wrapper = AgentApp(agent_app, active_agents)
|
375
|
-
|
385
|
+
|
376
386
|
# Store reference to AgentApp in MCPApp for proxies to access
|
377
387
|
agent_app._agent_app = wrapper
|
378
388
|
|
@@ -0,0 +1,222 @@
|
|
1
|
+
"""
|
2
|
+
Helper functions for creating MCP content types with minimal code.
|
3
|
+
|
4
|
+
This module provides simple functions to create TextContent, ImageContent,
|
5
|
+
EmbeddedResource, and other MCP content types with minimal boilerplate.
|
6
|
+
"""
|
7
|
+
|
8
|
+
import base64
|
9
|
+
from pathlib import Path
|
10
|
+
from typing import Literal, Optional, Union, List, Any
|
11
|
+
|
12
|
+
from mcp.types import (
|
13
|
+
TextContent,
|
14
|
+
ImageContent,
|
15
|
+
EmbeddedResource,
|
16
|
+
TextResourceContents,
|
17
|
+
BlobResourceContents,
|
18
|
+
)
|
19
|
+
|
20
|
+
from mcp_agent.mcp.mime_utils import (
|
21
|
+
guess_mime_type,
|
22
|
+
is_binary_content,
|
23
|
+
is_image_mime_type,
|
24
|
+
)
|
25
|
+
|
26
|
+
|
27
|
+
def MCPText(
|
28
|
+
text: str,
|
29
|
+
role: Literal["user", "assistant"] = "user",
|
30
|
+
annotations: Optional[dict] = None,
|
31
|
+
) -> dict:
|
32
|
+
"""
|
33
|
+
Create a message with text content.
|
34
|
+
|
35
|
+
Args:
|
36
|
+
text: The text content
|
37
|
+
role: Role of the message, defaults to "user"
|
38
|
+
annotations: Optional annotations
|
39
|
+
|
40
|
+
Returns:
|
41
|
+
A dictionary with role and content that can be used in a prompt
|
42
|
+
"""
|
43
|
+
return {
|
44
|
+
"role": role,
|
45
|
+
"content": TextContent(type="text", text=text, annotations=annotations),
|
46
|
+
}
|
47
|
+
|
48
|
+
|
49
|
+
def MCPImage(
|
50
|
+
path: Union[str, Path] = None,
|
51
|
+
data: bytes = None,
|
52
|
+
mime_type: Optional[str] = None,
|
53
|
+
role: Literal["user", "assistant"] = "user",
|
54
|
+
annotations: Optional[dict] = None,
|
55
|
+
) -> dict:
|
56
|
+
"""
|
57
|
+
Create a message with image content.
|
58
|
+
|
59
|
+
Args:
|
60
|
+
path: Path to the image file
|
61
|
+
data: Raw image data bytes (alternative to path)
|
62
|
+
mime_type: Optional mime type, will be guessed from path if not provided
|
63
|
+
role: Role of the message, defaults to "user"
|
64
|
+
annotations: Optional annotations
|
65
|
+
|
66
|
+
Returns:
|
67
|
+
A dictionary with role and content that can be used in a prompt
|
68
|
+
"""
|
69
|
+
if path is None and data is None:
|
70
|
+
raise ValueError("Either path or data must be provided")
|
71
|
+
|
72
|
+
if path is not None and data is not None:
|
73
|
+
raise ValueError("Only one of path or data can be provided")
|
74
|
+
|
75
|
+
if path is not None:
|
76
|
+
path = Path(path)
|
77
|
+
if not mime_type:
|
78
|
+
mime_type = guess_mime_type(str(path))
|
79
|
+
with open(path, "rb") as f:
|
80
|
+
data = f.read()
|
81
|
+
|
82
|
+
if not mime_type:
|
83
|
+
mime_type = "image/png" # Default
|
84
|
+
|
85
|
+
b64_data = base64.b64encode(data).decode("ascii")
|
86
|
+
|
87
|
+
return {
|
88
|
+
"role": role,
|
89
|
+
"content": ImageContent(
|
90
|
+
type="image", data=b64_data, mimeType=mime_type, annotations=annotations
|
91
|
+
),
|
92
|
+
}
|
93
|
+
|
94
|
+
|
95
|
+
def MCPFile(
|
96
|
+
path: Union[str, Path],
|
97
|
+
mime_type: Optional[str] = None,
|
98
|
+
role: Literal["user", "assistant"] = "user",
|
99
|
+
annotations: Optional[dict] = None,
|
100
|
+
) -> dict:
|
101
|
+
"""
|
102
|
+
Create a message with an embedded resource from a file.
|
103
|
+
|
104
|
+
Args:
|
105
|
+
path: Path to the resource file
|
106
|
+
mime_type: Optional mime type, will be guessed from path if not provided
|
107
|
+
role: Role of the message, defaults to "user"
|
108
|
+
annotations: Optional annotations
|
109
|
+
|
110
|
+
Returns:
|
111
|
+
A dictionary with role and content that can be used in a prompt
|
112
|
+
"""
|
113
|
+
path = Path(path)
|
114
|
+
uri = f"file://{path.absolute()}"
|
115
|
+
|
116
|
+
if not mime_type:
|
117
|
+
mime_type = guess_mime_type(str(path))
|
118
|
+
|
119
|
+
# Determine if this is text or binary content
|
120
|
+
is_binary = is_binary_content(mime_type)
|
121
|
+
|
122
|
+
if is_binary:
|
123
|
+
# Read as binary
|
124
|
+
binary_data = path.read_bytes()
|
125
|
+
b64_data = base64.b64encode(binary_data).decode("ascii")
|
126
|
+
|
127
|
+
resource = BlobResourceContents(uri=uri, blob=b64_data, mimeType=mime_type)
|
128
|
+
else:
|
129
|
+
# Read as text
|
130
|
+
try:
|
131
|
+
text_data = path.read_text(encoding="utf-8")
|
132
|
+
resource = TextResourceContents(uri=uri, text=text_data, mimeType=mime_type)
|
133
|
+
except UnicodeDecodeError:
|
134
|
+
# Fallback to binary if text read fails
|
135
|
+
binary_data = path.read_bytes()
|
136
|
+
b64_data = base64.b64encode(binary_data).decode("ascii")
|
137
|
+
resource = BlobResourceContents(
|
138
|
+
uri=uri, blob=b64_data, mimeType=mime_type or "application/octet-stream"
|
139
|
+
)
|
140
|
+
|
141
|
+
return {
|
142
|
+
"role": role,
|
143
|
+
"content": EmbeddedResource(
|
144
|
+
type="resource", resource=resource, annotations=annotations
|
145
|
+
),
|
146
|
+
}
|
147
|
+
|
148
|
+
|
149
|
+
|
150
|
+
def MCPPrompt(
|
151
|
+
*content_items, role: Literal["user", "assistant"] = "user"
|
152
|
+
) -> List[dict]:
|
153
|
+
"""
|
154
|
+
Create one or more prompt messages with various content types.
|
155
|
+
|
156
|
+
This function intelligently creates different content types:
|
157
|
+
- Strings become TextContent
|
158
|
+
- File paths with image mime types become ImageContent
|
159
|
+
- File paths with text mime types or other mime types become EmbeddedResource
|
160
|
+
- Dicts with role and content are passed through unchanged
|
161
|
+
- Raw bytes become ImageContent
|
162
|
+
|
163
|
+
Args:
|
164
|
+
*content_items: Content items of various types
|
165
|
+
role: Role for all items (user or assistant)
|
166
|
+
|
167
|
+
Returns:
|
168
|
+
List of messages that can be used in a prompt
|
169
|
+
"""
|
170
|
+
result = []
|
171
|
+
|
172
|
+
for item in content_items:
|
173
|
+
if isinstance(item, dict) and "role" in item and "content" in item:
|
174
|
+
# Already a fully formed message
|
175
|
+
result.append(item)
|
176
|
+
elif isinstance(item, str) and not Path(item).exists():
|
177
|
+
# Simple text content (that's not a file path)
|
178
|
+
result.append(MCPText(item, role=role))
|
179
|
+
elif isinstance(item, Path) or isinstance(item, str):
|
180
|
+
# File path - determine the content type based on mime type
|
181
|
+
path_str = str(item)
|
182
|
+
mime_type = guess_mime_type(path_str)
|
183
|
+
|
184
|
+
if is_image_mime_type(mime_type):
|
185
|
+
# Image files (except SVG which is handled as text)
|
186
|
+
result.append(MCPImage(path=item, role=role))
|
187
|
+
else:
|
188
|
+
# All other file types (text documents, PDFs, SVGs, etc.)
|
189
|
+
result.append(MCPFile(path=item, role=role))
|
190
|
+
elif isinstance(item, bytes):
|
191
|
+
# Raw binary data, assume image
|
192
|
+
result.append(MCPImage(data=item, role=role))
|
193
|
+
else:
|
194
|
+
# Try to convert to string
|
195
|
+
result.append(MCPText(str(item), role=role))
|
196
|
+
|
197
|
+
return result
|
198
|
+
|
199
|
+
|
200
|
+
def User(*content_items) -> List[dict]:
|
201
|
+
"""Create user message(s) with various content types."""
|
202
|
+
return MCPPrompt(*content_items, role="user")
|
203
|
+
|
204
|
+
|
205
|
+
def Assistant(*content_items) -> List[dict]:
|
206
|
+
"""Create assistant message(s) with various content types."""
|
207
|
+
return MCPPrompt(*content_items, role="assistant")
|
208
|
+
|
209
|
+
|
210
|
+
def create_message(content: Any, role: Literal["user", "assistant"] = "user") -> dict:
|
211
|
+
"""
|
212
|
+
Create a single prompt message from content of various types.
|
213
|
+
|
214
|
+
Args:
|
215
|
+
content: Content of various types (str, Path, bytes, etc.)
|
216
|
+
role: Role of the message
|
217
|
+
|
218
|
+
Returns:
|
219
|
+
A dictionary with role and content that can be used in a prompt
|
220
|
+
"""
|
221
|
+
messages = MCPPrompt(content, role=role)
|
222
|
+
return messages[0] if messages else {}
|