fast-agent-mcp 0.1.7__py3-none-any.whl → 0.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.8.dist-info}/METADATA +12 -6
- {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.8.dist-info}/RECORD +22 -21
- mcp_agent/core/agent_app.py +36 -31
- mcp_agent/core/decorators.py +3 -2
- mcp_agent/core/enhanced_prompt.py +106 -20
- mcp_agent/core/factory.py +28 -66
- mcp_agent/human_input/handler.py +4 -1
- mcp_agent/mcp/mcp_aggregator.py +16 -12
- mcp_agent/resources/examples/researcher/researcher-eval.py +1 -1
- mcp_agent/resources/examples/workflows/orchestrator.py +5 -4
- mcp_agent/resources/examples/workflows/router.py +0 -2
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +57 -87
- mcp_agent/workflows/llm/augmented_llm.py +25 -84
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +8 -30
- mcp_agent/workflows/llm/augmented_llm_openai.py +34 -40
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +61 -0
- mcp_agent/workflows/llm/model_factory.py +5 -3
- mcp_agent/workflows/orchestrator/orchestrator.py +62 -153
- mcp_agent/workflows/router/router_llm.py +18 -24
- {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.8.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.8.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.8.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: fast-agent-mcp
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.8
|
4
4
|
Summary: Define, Prompt and Test MCP enabled Agents and Workflows
|
5
5
|
Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
|
6
6
|
License: Apache License
|
@@ -209,9 +209,9 @@ Classifier: License :: OSI Approved :: Apache Software License
|
|
209
209
|
Classifier: Operating System :: OS Independent
|
210
210
|
Classifier: Programming Language :: Python :: 3
|
211
211
|
Requires-Python: >=3.10
|
212
|
-
Requires-Dist:
|
212
|
+
Requires-Dist: aiohttp>=3.11.13
|
213
|
+
Requires-Dist: anthropic>=0.49.0
|
213
214
|
Requires-Dist: fastapi>=0.115.6
|
214
|
-
Requires-Dist: instructor>=1.7.2
|
215
215
|
Requires-Dist: mcp==1.2.1
|
216
216
|
Requires-Dist: numpy>=2.2.1
|
217
217
|
Requires-Dist: openai>=1.63.2
|
@@ -224,11 +224,17 @@ Requires-Dist: pyyaml>=6.0.2
|
|
224
224
|
Requires-Dist: rich>=13.9.4
|
225
225
|
Requires-Dist: scikit-learn>=1.6.0
|
226
226
|
Requires-Dist: typer>=0.15.1
|
227
|
-
Provides-Extra: anthropic
|
228
|
-
Requires-Dist: anthropic>=0.42.0; extra == 'anthropic'
|
229
|
-
Requires-Dist: instructor[anthropic]>=1.7.2; extra == 'anthropic'
|
230
227
|
Provides-Extra: cohere
|
231
228
|
Requires-Dist: cohere>=5.13.4; extra == 'cohere'
|
229
|
+
Provides-Extra: dev
|
230
|
+
Requires-Dist: anthropic>=0.42.0; extra == 'dev'
|
231
|
+
Requires-Dist: pre-commit>=4.0.1; extra == 'dev'
|
232
|
+
Requires-Dist: pydantic>=2.10.4; extra == 'dev'
|
233
|
+
Requires-Dist: pytest-asyncio>=0.21.1; extra == 'dev'
|
234
|
+
Requires-Dist: pytest>=7.4.0; extra == 'dev'
|
235
|
+
Requires-Dist: pyyaml>=6.0.2; extra == 'dev'
|
236
|
+
Requires-Dist: ruff>=0.8.4; extra == 'dev'
|
237
|
+
Requires-Dist: tomli>=2.2.1; extra == 'dev'
|
232
238
|
Provides-Extra: openai
|
233
239
|
Requires-Dist: openai>=1.58.1; extra == 'openai'
|
234
240
|
Provides-Extra: temporal
|
@@ -17,14 +17,14 @@ mcp_agent/cli/commands/bootstrap.py,sha256=Rmwbuwl52eHfnya7fnwKk2J7nCsHpSh6irka4
|
|
17
17
|
mcp_agent/cli/commands/config.py,sha256=32YTS5jmsYAs9QzAhjkG70_daAHqOemf4XbZBBSMz6g,204
|
18
18
|
mcp_agent/cli/commands/setup.py,sha256=_SCpd6_PrixqbSaE72JQ7erIRkZnJGmh_3TvvwSzEiE,6392
|
19
19
|
mcp_agent/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
20
|
-
mcp_agent/core/agent_app.py,sha256=
|
20
|
+
mcp_agent/core/agent_app.py,sha256=ytZFkiqprFApi1sgYsIvojDMddX9G9M_zVkr5v8PIaE,27300
|
21
21
|
mcp_agent/core/agent_types.py,sha256=yKiMbv9QO2dduq4zXmoMZlOZpXJZhM4oNwIq1-134FE,318
|
22
22
|
mcp_agent/core/agent_utils.py,sha256=QMvwmxZyCqYhBzSyL9xARsxTuwdmlyjQvrPpsH36HnQ,1888
|
23
|
-
mcp_agent/core/decorators.py,sha256=
|
24
|
-
mcp_agent/core/enhanced_prompt.py,sha256=
|
23
|
+
mcp_agent/core/decorators.py,sha256=dkAah1eIuYsEfQISDryG0u2GrzNnsO_jyN7lhpQfNlM,16191
|
24
|
+
mcp_agent/core/enhanced_prompt.py,sha256=bykUEnnc1CEWODJwXvl4VGfCtrJPtVXU0D4mUglJK7A,18827
|
25
25
|
mcp_agent/core/error_handling.py,sha256=D3HMW5odrbJvaKqcpCGj6eDXrbFcuqYaCZz7fyYiTu4,623
|
26
26
|
mcp_agent/core/exceptions.py,sha256=a2-JGRwFFRoQEPuAq0JC5PhAJ5TO3xVJfdS4-VN29cw,2225
|
27
|
-
mcp_agent/core/factory.py,sha256=
|
27
|
+
mcp_agent/core/factory.py,sha256=TYtGtUKEVQi96uXQu3RddrpYGiUGolHMEATS57e4hgw,19074
|
28
28
|
mcp_agent/core/fastagent.py,sha256=v8LN-Oux3X0zSnLeE_vtHYN8JR1nZakhO2CBdYbCGQI,19461
|
29
29
|
mcp_agent/core/proxies.py,sha256=fUhuB3GoIdDIHg7rXn24O3C_tPiAzkZ9sTGuaQl4gxs,8827
|
30
30
|
mcp_agent/core/server_validation.py,sha256=_59cn16nNT4HGPwg19HgxMtHK4MsdWYDUw_CuL-5xek,1696
|
@@ -40,7 +40,7 @@ mcp_agent/executor/temporal.py,sha256=U-wyltgWlVmzJoyivT6rR0Z1U3S6TbMXpeCxyuXako
|
|
40
40
|
mcp_agent/executor/workflow.py,sha256=lA6r7PNEvxCVFHp4XkEJkaR0QCTf-J6iw9JwNx-tzNY,6727
|
41
41
|
mcp_agent/executor/workflow_signal.py,sha256=3PWwSgXhz3PhkA8SRX3u0BDVoSlQqRGqC9d1qLC25vE,11210
|
42
42
|
mcp_agent/human_input/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
43
|
-
mcp_agent/human_input/handler.py,sha256=
|
43
|
+
mcp_agent/human_input/handler.py,sha256=BTZroDkHRvknSfYcTs60NerE4lJY6ROpoIMjAmfSqYY,3197
|
44
44
|
mcp_agent/human_input/types.py,sha256=ZvuDHvI0-wO2tFoS0bzrv8U5B83zYdxAG7g9G9jCxug,1489
|
45
45
|
mcp_agent/logging/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
46
46
|
mcp_agent/logging/events.py,sha256=qfYJnrqgXdujV-nl-iOwBEBh6HMraowBI4zeAWPPU4A,3461
|
@@ -55,7 +55,7 @@ mcp_agent/mcp/gen_client.py,sha256=u0HwdJiw9YCerS5JC7JDuGgBh9oTcLd7vv9vPjwibXc,3
|
|
55
55
|
mcp_agent/mcp/mcp_activity.py,sha256=CajXCFWZ2cKEX9s4-HfNVAj471ePTVs4NOkvmIh65tE,592
|
56
56
|
mcp_agent/mcp/mcp_agent_client_session.py,sha256=NtWcQhjmnnaR3yYcYj2d2lh-m563NexZUa57K1tAjeM,9477
|
57
57
|
mcp_agent/mcp/mcp_agent_server.py,sha256=xP09HZTeguJi4Fq0p3fjLBP55uSYe5AdqM90xCgn9Ho,1639
|
58
|
-
mcp_agent/mcp/mcp_aggregator.py,sha256=
|
58
|
+
mcp_agent/mcp/mcp_aggregator.py,sha256=9NYawRUf0xFjE1v-7_7VWN6TwVgqhsnvIuTyAE0V-4I,36095
|
59
59
|
mcp_agent/mcp/mcp_connection_manager.py,sha256=EPJTKiEMKnFYpC37SOXiLriQL2YyhH0s6vvZWQRb_Mo,13663
|
60
60
|
mcp_agent/mcp/stdio.py,sha256=tW075R5rQ-UlflXWFKIFDgCbWbuhKqxhiYolWvyEkFs,3985
|
61
61
|
mcp_agent/mcp_server/__init__.py,sha256=SEWyU7aSFzdSk6iTYnrQu-llji5_P5dp3TaztCt_rzo,154
|
@@ -73,7 +73,7 @@ mcp_agent/resources/examples/internal/sizer.py,sha256=FC9zTscPRStlaaeDFVUODnrD5y
|
|
73
73
|
mcp_agent/resources/examples/internal/social.py,sha256=Cot2lg3PLhLm13gPdVFvFEN28-mm6x3-jHu2YsV4N3s,1707
|
74
74
|
mcp_agent/resources/examples/mcp_researcher/researcher-eval.py,sha256=kNPjIU-JwE0oIBQKwhv6lZsUF_SPtYVkiEEbY1ZVZxk,1807
|
75
75
|
mcp_agent/resources/examples/researcher/fastagent.config.yaml,sha256=bNOnID9OgdSBTUEhdimKB8LjaZLa1B6igmp-nxx8nr4,2271
|
76
|
-
mcp_agent/resources/examples/researcher/researcher-eval.py,sha256=
|
76
|
+
mcp_agent/resources/examples/researcher/researcher-eval.py,sha256=0qDjxun7CZ1cZ8JTa6G1v1XcpwGSSL6-qAZ35yI1-K4,1818
|
77
77
|
mcp_agent/resources/examples/researcher/researcher-imp.py,sha256=Xfw2YAyjXd47pQz-uljgG5ii5x77fVuCP2XCivRDI48,7885
|
78
78
|
mcp_agent/resources/examples/researcher/researcher.py,sha256=iE6hlwoJVCgCvJfSPYrBvoXKFqkvXah8NHSHX5EgTxA,1431
|
79
79
|
mcp_agent/resources/examples/workflows/agent_build.py,sha256=ioG4X8IbR8wwja8Zdncsk8YAu0VD2Xt1Vhr7saNJCZQ,2855
|
@@ -81,9 +81,9 @@ mcp_agent/resources/examples/workflows/chaining.py,sha256=1G_0XBcFkSJCOXb6N_iXWl
|
|
81
81
|
mcp_agent/resources/examples/workflows/evaluator.py,sha256=3XmW1mjImlaWb0c5FWHYS9yP8nVGTbEdJySAoWXwrDg,3109
|
82
82
|
mcp_agent/resources/examples/workflows/fastagent.config.yaml,sha256=k2AiapOcK42uqG2nWDVvnSLqN4okQIQZK0FTbZufBpY,809
|
83
83
|
mcp_agent/resources/examples/workflows/human_input.py,sha256=c8cBdLEPbaMXddFwsfN3Z7RFs5PZXsdrjANfvq1VTPM,605
|
84
|
-
mcp_agent/resources/examples/workflows/orchestrator.py,sha256=
|
84
|
+
mcp_agent/resources/examples/workflows/orchestrator.py,sha256=orsE4S03uk4ylkhERmTwzIyqyL7kFHR7oOzvYV3Id38,2599
|
85
85
|
mcp_agent/resources/examples/workflows/parallel.py,sha256=pLbQrtXfbdYqMVddxtg5dZnBnm5Wo2mXlIa1Vf2F1FQ,3096
|
86
|
-
mcp_agent/resources/examples/workflows/router.py,sha256=
|
86
|
+
mcp_agent/resources/examples/workflows/router.py,sha256=J1yTAimFY53jcyd21cq1XAZvtOxnNsmtSjSp13M5EgE,1668
|
87
87
|
mcp_agent/resources/examples/workflows/sse.py,sha256=tdmmh7p87YNfcF_fCq3evAmc1Nek0oY0YOqLRKBLqKg,570
|
88
88
|
mcp_agent/telemetry/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
89
89
|
mcp_agent/telemetry/usage_tracking.py,sha256=ePujKMSjPxB7k6X34DGaVlnsV1728mcWZq38OqahiCU,501
|
@@ -94,7 +94,7 @@ mcp_agent/workflows/embedding/embedding_base.py,sha256=-c20ggQ8s7XhMxRX-WEhOgHE7
|
|
94
94
|
mcp_agent/workflows/embedding/embedding_cohere.py,sha256=OKTJvKD_uEafd4c2uhR5tBjprea1nyvlJOO-3FDqOnk,1540
|
95
95
|
mcp_agent/workflows/embedding/embedding_openai.py,sha256=dntjJ5P-FSMGYuyPZC8MuCU_ehwjXw9wDfzZZuSQN1E,1480
|
96
96
|
mcp_agent/workflows/evaluator_optimizer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
97
|
-
mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py,sha256=
|
97
|
+
mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py,sha256=cJpts0w6jffJCHOjBdAa18E8cw7qteoAbrGvm9Rrh6U,18144
|
98
98
|
mcp_agent/workflows/intent_classifier/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
99
99
|
mcp_agent/workflows/intent_classifier/intent_classifier_base.py,sha256=zTbOmq6EY_abOlme4zl28HM4RWNNS6bbHl3tF7SshJ0,4004
|
100
100
|
mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py,sha256=_bWZGukc_q9LdA_Q18UoAMSzhN8tt4K_bRHNUhy7Crw,3997
|
@@ -104,15 +104,16 @@ mcp_agent/workflows/intent_classifier/intent_classifier_llm.py,sha256=WSLUv2Casb
|
|
104
104
|
mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py,sha256=Hp4454IniWFxV4ml50Ml8ip9rS1La5FBn5pd7vm1FHA,1964
|
105
105
|
mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py,sha256=zj76WlTYnSCYjBQ_IDi5vFBQGmNwYaoUq1rT730sY98,1940
|
106
106
|
mcp_agent/workflows/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
107
|
-
mcp_agent/workflows/llm/augmented_llm.py,sha256=
|
108
|
-
mcp_agent/workflows/llm/augmented_llm_anthropic.py,sha256=
|
109
|
-
mcp_agent/workflows/llm/augmented_llm_openai.py,sha256=
|
107
|
+
mcp_agent/workflows/llm/augmented_llm.py,sha256=5HVa2xGzKyBzlk1IePzGsgshcgT1vslpF00ug-Id15M,26884
|
108
|
+
mcp_agent/workflows/llm/augmented_llm_anthropic.py,sha256=cUcEq5S9hoz2NBdo4jQBs-rBHFd71yOV9TEE7w-mzTw,24033
|
109
|
+
mcp_agent/workflows/llm/augmented_llm_openai.py,sha256=dhlGasCbTOXiLrAutDNxWXEyZermPToZ5yZc_PWzxlc,26798
|
110
|
+
mcp_agent/workflows/llm/augmented_llm_passthrough.py,sha256=YiP_SnYIGT5ObQros48i_TNywaIdN1Of9d4q9EdrJbI,2272
|
110
111
|
mcp_agent/workflows/llm/enhanced_passthrough.py,sha256=rHNbb6pYllIuVMOhuzUbt63_6WlUnjm57Y7r59N1pnk,2388
|
111
112
|
mcp_agent/workflows/llm/llm_selector.py,sha256=G7pIybuBDwtmyxUDov_QrNYH2FoI0qFRu2JfoxWUF5Y,11045
|
112
|
-
mcp_agent/workflows/llm/model_factory.py,sha256=
|
113
|
+
mcp_agent/workflows/llm/model_factory.py,sha256=b0monjiedzYvYZaKPgK44tppVDk14kBoVJHEzmjVl28,7153
|
113
114
|
mcp_agent/workflows/llm/prompt_utils.py,sha256=EY3eddqnmc_YDUQJFysPnpTH6hr4r2HneeEmX76P8TQ,4948
|
114
115
|
mcp_agent/workflows/orchestrator/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
115
|
-
mcp_agent/workflows/orchestrator/orchestrator.py,sha256=
|
116
|
+
mcp_agent/workflows/orchestrator/orchestrator.py,sha256=s8-_4CG4oRnvYAwUqqyevGLpy21IYtcNtsd_SbRZ8Fk,22125
|
116
117
|
mcp_agent/workflows/orchestrator/orchestrator_models.py,sha256=1ldku1fYA_hu2F6K4l2C96mAdds05VibtSzSQrGm3yw,7321
|
117
118
|
mcp_agent/workflows/orchestrator/orchestrator_prompts.py,sha256=EXKEI174sshkZyPPEnWbwwNafzSPuA39MXL7iqG9cWc,9106
|
118
119
|
mcp_agent/workflows/parallel/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -124,13 +125,13 @@ mcp_agent/workflows/router/router_base.py,sha256=S-UxofpdW9e7ZQXaZcSE8zBY--6W0m5
|
|
124
125
|
mcp_agent/workflows/router/router_embedding.py,sha256=wEU49li9OqTX-Xucm0HDUFLZjlND1WuewOcQVAo0s2E,7944
|
125
126
|
mcp_agent/workflows/router/router_embedding_cohere.py,sha256=aKZVzzQfBuz0by9k0zWLAA0Db_unDIMYL4ynVzzx8C4,1975
|
126
127
|
mcp_agent/workflows/router/router_embedding_openai.py,sha256=KqW2IFLdQoAJ2lIz1X18WQJFjXF-YSFSTtsqVnp1JeI,1975
|
127
|
-
mcp_agent/workflows/router/router_llm.py,sha256=
|
128
|
+
mcp_agent/workflows/router/router_llm.py,sha256=msXmp_PPPX-2fZF8F_bYjGId2CmmaBe4DSHKNNggcnU,10942
|
128
129
|
mcp_agent/workflows/swarm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
129
130
|
mcp_agent/workflows/swarm/swarm.py,sha256=-lAIeSWDqbGHGRPTvjiP9nIKWvxxy9DAojl9yQzO1Pw,11050
|
130
131
|
mcp_agent/workflows/swarm/swarm_anthropic.py,sha256=pW8zFx5baUWGd5Vw3nIDF2oVOOGNorij4qvGJKdYPcs,1624
|
131
132
|
mcp_agent/workflows/swarm/swarm_openai.py,sha256=wfteywvAGkT5bLmIxX_StHJq8144whYmCRnJASAjOes,1596
|
132
|
-
fast_agent_mcp-0.1.
|
133
|
-
fast_agent_mcp-0.1.
|
134
|
-
fast_agent_mcp-0.1.
|
135
|
-
fast_agent_mcp-0.1.
|
136
|
-
fast_agent_mcp-0.1.
|
133
|
+
fast_agent_mcp-0.1.8.dist-info/METADATA,sha256=OsJut48Sg0EH8_PcwoKXZMkbeqAOPpFbJwrU5BLqSzk,28637
|
134
|
+
fast_agent_mcp-0.1.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
135
|
+
fast_agent_mcp-0.1.8.dist-info/entry_points.txt,sha256=2IXtSmDK9XjWN__RWuRIJTgWyW17wJnJ_h-pb0pZAxo,174
|
136
|
+
fast_agent_mcp-0.1.8.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
|
137
|
+
fast_agent_mcp-0.1.8.dist-info/RECORD,,
|
mcp_agent/core/agent_app.py
CHANGED
@@ -177,9 +177,6 @@ class AgentApp:
|
|
177
177
|
from rich import print as rich_print
|
178
178
|
from rich.table import Table
|
179
179
|
from rich.console import Console
|
180
|
-
from prompt_toolkit import PromptSession
|
181
|
-
from prompt_toolkit.formatted_text import HTML
|
182
|
-
from prompt_toolkit.completion import WordCompleter
|
183
180
|
|
184
181
|
console = Console()
|
185
182
|
|
@@ -325,8 +322,11 @@ class AgentApp:
|
|
325
322
|
)
|
326
323
|
|
327
324
|
# Ask user to select one
|
328
|
-
|
329
|
-
|
325
|
+
from mcp_agent.core.enhanced_prompt import (
|
326
|
+
get_selection_input,
|
327
|
+
)
|
328
|
+
|
329
|
+
selection = await get_selection_input(
|
330
330
|
"Enter prompt number to select: ", default="1"
|
331
331
|
)
|
332
332
|
|
@@ -381,12 +381,16 @@ class AgentApp:
|
|
381
381
|
prompt_names = [
|
382
382
|
str(i + 1) for i in range(len(all_prompts))
|
383
383
|
]
|
384
|
-
completer = WordCompleter(prompt_names)
|
385
384
|
|
386
385
|
# Ask user to select a prompt
|
387
|
-
|
388
|
-
|
389
|
-
|
386
|
+
from mcp_agent.core.enhanced_prompt import (
|
387
|
+
get_selection_input,
|
388
|
+
)
|
389
|
+
|
390
|
+
selection = await get_selection_input(
|
391
|
+
"Enter prompt number to select (or press Enter to cancel): ",
|
392
|
+
options=prompt_names,
|
393
|
+
allow_cancel=True,
|
390
394
|
)
|
391
395
|
|
392
396
|
# Make cancellation easier
|
@@ -437,37 +441,38 @@ class AgentApp:
|
|
437
441
|
|
438
442
|
# Collect required arguments
|
439
443
|
for arg_name in required_args:
|
440
|
-
#
|
444
|
+
# Get description if available
|
441
445
|
description = arg_descriptions.get(arg_name, "")
|
442
|
-
|
443
|
-
rich_print(
|
444
|
-
f" [dim]{arg_name}: {description}[/dim]"
|
445
|
-
)
|
446
|
-
|
446
|
+
|
447
447
|
# Collect required argument value
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
448
|
+
from mcp_agent.core.enhanced_prompt import (
|
449
|
+
get_argument_input,
|
450
|
+
)
|
451
|
+
|
452
|
+
arg_value = await get_argument_input(
|
453
|
+
arg_name=arg_name,
|
454
|
+
description=description,
|
455
|
+
required=True,
|
452
456
|
)
|
453
|
-
# Add to arg_values
|
454
|
-
|
457
|
+
# Add to arg_values if a value was provided
|
458
|
+
if arg_value is not None:
|
459
|
+
arg_values[arg_name] = arg_value
|
455
460
|
|
456
461
|
# Only include non-empty values for optional arguments
|
457
462
|
if optional_args:
|
458
463
|
# Collect optional arguments
|
459
464
|
for arg_name in optional_args:
|
460
|
-
#
|
465
|
+
# Get description if available
|
461
466
|
description = arg_descriptions.get(arg_name, "")
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
arg_value = await
|
468
|
-
|
469
|
-
|
470
|
-
|
467
|
+
|
468
|
+
from mcp_agent.core.enhanced_prompt import (
|
469
|
+
get_argument_input,
|
470
|
+
)
|
471
|
+
|
472
|
+
arg_value = await get_argument_input(
|
473
|
+
arg_name=arg_name,
|
474
|
+
description=description,
|
475
|
+
required=False,
|
471
476
|
)
|
472
477
|
# Only include non-empty values for optional arguments
|
473
478
|
if arg_value:
|
mcp_agent/core/decorators.py
CHANGED
@@ -256,7 +256,8 @@ def parallel(
|
|
256
256
|
self.agents[passthrough_name] = {
|
257
257
|
"config": AgentConfig(
|
258
258
|
name=passthrough_name,
|
259
|
-
|
259
|
+
model="passthrough",
|
260
|
+
instruction=f"This agent combines the results from the fan-out agents verbatim. {name}",
|
260
261
|
servers=[],
|
261
262
|
use_history=use_history,
|
262
263
|
),
|
@@ -452,4 +453,4 @@ def passthrough(
|
|
452
453
|
name=name,
|
453
454
|
use_history=use_history,
|
454
455
|
)
|
455
|
-
return decorator
|
456
|
+
return decorator
|
@@ -2,13 +2,13 @@
|
|
2
2
|
Enhanced prompt functionality with advanced prompt_toolkit features.
|
3
3
|
"""
|
4
4
|
|
5
|
-
from typing import List
|
5
|
+
from typing import List, Optional
|
6
6
|
from importlib.metadata import version
|
7
7
|
from prompt_toolkit import PromptSession
|
8
8
|
from prompt_toolkit.formatted_text import HTML
|
9
9
|
from prompt_toolkit.history import InMemoryHistory
|
10
10
|
from prompt_toolkit.key_binding import KeyBindings
|
11
|
-
from prompt_toolkit.completion import Completer, Completion
|
11
|
+
from prompt_toolkit.completion import Completer, Completion, WordCompleter
|
12
12
|
from prompt_toolkit.lexers import PygmentsLexer
|
13
13
|
from prompt_toolkit.filters import Condition
|
14
14
|
from prompt_toolkit.styles import Style
|
@@ -330,6 +330,110 @@ async def get_enhanced_input(
|
|
330
330
|
# Log and gracefully handle other exceptions
|
331
331
|
print(f"\nInput error: {type(e).__name__}: {e}")
|
332
332
|
return "STOP"
|
333
|
+
finally:
|
334
|
+
# Ensure the prompt session is properly cleaned up
|
335
|
+
# This is especially important on Windows to prevent resource leaks
|
336
|
+
if session.app.is_running:
|
337
|
+
session.app.exit()
|
338
|
+
|
339
|
+
|
340
|
+
async def get_selection_input(
|
341
|
+
prompt_text: str,
|
342
|
+
options: List[str] = None,
|
343
|
+
default: str = None,
|
344
|
+
allow_cancel: bool = True,
|
345
|
+
complete_options: bool = True,
|
346
|
+
) -> Optional[str]:
|
347
|
+
"""
|
348
|
+
Display a selection prompt and return the user's selection.
|
349
|
+
|
350
|
+
Args:
|
351
|
+
prompt_text: Text to display as the prompt
|
352
|
+
options: List of valid options (for auto-completion)
|
353
|
+
default: Default value if user presses enter
|
354
|
+
allow_cancel: Whether to allow cancellation with empty input
|
355
|
+
complete_options: Whether to use the options for auto-completion
|
356
|
+
|
357
|
+
Returns:
|
358
|
+
Selected value, or None if cancelled
|
359
|
+
"""
|
360
|
+
try:
|
361
|
+
# Initialize completer if options provided and completion requested
|
362
|
+
completer = WordCompleter(options) if options and complete_options else None
|
363
|
+
|
364
|
+
# Create prompt session
|
365
|
+
prompt_session = PromptSession(completer=completer)
|
366
|
+
|
367
|
+
try:
|
368
|
+
# Get user input
|
369
|
+
selection = await prompt_session.prompt_async(
|
370
|
+
prompt_text, default=default or ""
|
371
|
+
)
|
372
|
+
|
373
|
+
# Handle cancellation
|
374
|
+
if allow_cancel and not selection.strip():
|
375
|
+
return None
|
376
|
+
|
377
|
+
return selection
|
378
|
+
finally:
|
379
|
+
# Ensure prompt session cleanup
|
380
|
+
if prompt_session.app.is_running:
|
381
|
+
prompt_session.app.exit()
|
382
|
+
except (KeyboardInterrupt, EOFError):
|
383
|
+
return None
|
384
|
+
except Exception as e:
|
385
|
+
rich_print(f"\n[red]Error getting selection: {e}[/red]")
|
386
|
+
return None
|
387
|
+
|
388
|
+
|
389
|
+
async def get_argument_input(
|
390
|
+
arg_name: str,
|
391
|
+
description: str = None,
|
392
|
+
required: bool = True,
|
393
|
+
) -> Optional[str]:
|
394
|
+
"""
|
395
|
+
Prompt for an argument value with formatting and help text.
|
396
|
+
|
397
|
+
Args:
|
398
|
+
arg_name: Name of the argument
|
399
|
+
description: Optional description of the argument
|
400
|
+
required: Whether this argument is required
|
401
|
+
|
402
|
+
Returns:
|
403
|
+
Input value, or None if cancelled/skipped
|
404
|
+
"""
|
405
|
+
# Format the prompt differently based on whether it's required
|
406
|
+
required_text = "(required)" if required else "(optional, press Enter to skip)"
|
407
|
+
|
408
|
+
# Show description if available
|
409
|
+
if description:
|
410
|
+
rich_print(f" [dim]{arg_name}: {description}[/dim]")
|
411
|
+
|
412
|
+
prompt_text = HTML(
|
413
|
+
f"Enter value for <ansibrightcyan>{arg_name}</ansibrightcyan> {required_text}: "
|
414
|
+
)
|
415
|
+
|
416
|
+
# Create prompt session
|
417
|
+
prompt_session = PromptSession()
|
418
|
+
|
419
|
+
try:
|
420
|
+
# Get user input
|
421
|
+
arg_value = await prompt_session.prompt_async(prompt_text)
|
422
|
+
|
423
|
+
# For optional arguments, empty input means skip
|
424
|
+
if not required and not arg_value:
|
425
|
+
return None
|
426
|
+
|
427
|
+
return arg_value
|
428
|
+
except (KeyboardInterrupt, EOFError):
|
429
|
+
return None
|
430
|
+
except Exception as e:
|
431
|
+
rich_print(f"\n[red]Error getting input: {e}[/red]")
|
432
|
+
return None
|
433
|
+
finally:
|
434
|
+
# Ensure prompt session cleanup
|
435
|
+
if prompt_session.app.is_running:
|
436
|
+
prompt_session.app.exit()
|
333
437
|
|
334
438
|
|
335
439
|
async def handle_special_commands(command, agent_app=None):
|
@@ -408,24 +512,6 @@ async def handle_special_commands(command, agent_app=None):
|
|
408
512
|
)
|
409
513
|
return True
|
410
514
|
|
411
|
-
elif command == "SELECT_PROMPT" or (
|
412
|
-
isinstance(command, str) and command.startswith("SELECT_PROMPT:")
|
413
|
-
):
|
414
|
-
# Handle prompt selection UI (previously named "list_prompts" action)
|
415
|
-
if agent_app:
|
416
|
-
# If it's a specific prompt, extract the name
|
417
|
-
prompt_name = None
|
418
|
-
if isinstance(command, str) and command.startswith("SELECT_PROMPT:"):
|
419
|
-
prompt_name = command.split(":", 1)[1].strip()
|
420
|
-
|
421
|
-
# Return a dictionary with a select_prompt action to be handled by the caller
|
422
|
-
return {"select_prompt": True, "prompt_name": prompt_name}
|
423
|
-
else:
|
424
|
-
rich_print(
|
425
|
-
"[yellow]Prompt selection is not available outside of an agent context[/yellow]"
|
426
|
-
)
|
427
|
-
return True
|
428
|
-
|
429
515
|
elif isinstance(command, str) and command.startswith("SWITCH:"):
|
430
516
|
agent_name = command.split(":", 1)[1]
|
431
517
|
if agent_name in available_agents:
|
mcp_agent/core/factory.py
CHANGED
@@ -34,10 +34,7 @@ T = TypeVar("T") # For the wrapper classes
|
|
34
34
|
|
35
35
|
|
36
36
|
def create_proxy(
|
37
|
-
app: MCPApp,
|
38
|
-
name: str,
|
39
|
-
instance: AgentOrWorkflow,
|
40
|
-
agent_type: str
|
37
|
+
app: MCPApp, name: str, instance: AgentOrWorkflow, agent_type: str
|
41
38
|
) -> BaseAgentProxy:
|
42
39
|
"""Create appropriate proxy type based on agent type and validate instance type
|
43
40
|
|
@@ -61,9 +58,7 @@ def create_proxy(
|
|
61
58
|
log_agent_load(app, name)
|
62
59
|
if agent_type == AgentType.BASIC.value:
|
63
60
|
if not isinstance(instance, Agent):
|
64
|
-
raise TypeError(
|
65
|
-
f"Expected Agent instance for {name}, got {type(instance)}"
|
66
|
-
)
|
61
|
+
raise TypeError(f"Expected Agent instance for {name}, got {type(instance)}")
|
67
62
|
return LLMAgentProxy(app, name, instance)
|
68
63
|
elif agent_type == AgentType.ORCHESTRATOR.value:
|
69
64
|
if not isinstance(instance, Orchestrator):
|
@@ -177,42 +172,18 @@ async def create_agents_by_type(
|
|
177
172
|
if agent_type == AgentType.BASIC:
|
178
173
|
# Get the agent name for special handling
|
179
174
|
agent_name = agent_data["config"].name
|
175
|
+
agent = Agent(config=config, context=app_instance.context)
|
180
176
|
|
181
|
-
#
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
# Create basic agent with configuration
|
189
|
-
agent = Agent(config=config, context=app_instance.context)
|
190
|
-
|
191
|
-
# Set up a PassthroughLLM directly
|
192
|
-
async with agent:
|
193
|
-
agent._llm = PassthroughLLM(
|
194
|
-
name=f"{config.name}_llm",
|
195
|
-
context=app_instance.context,
|
196
|
-
agent=agent,
|
197
|
-
default_request_params=config.default_request_params,
|
198
|
-
)
|
199
|
-
|
200
|
-
# Store the agent
|
201
|
-
instance = agent
|
202
|
-
else:
|
203
|
-
# Standard basic agent with LLM
|
204
|
-
agent = Agent(config=config, context=app_instance.context)
|
205
|
-
|
206
|
-
# Set up LLM with proper configuration
|
207
|
-
async with agent:
|
208
|
-
llm_factory = model_factory_func(
|
209
|
-
model=config.model,
|
210
|
-
request_params=config.default_request_params,
|
211
|
-
)
|
212
|
-
agent._llm = await agent.attach_llm(llm_factory)
|
177
|
+
# Set up LLM with proper configuration
|
178
|
+
async with agent:
|
179
|
+
llm_factory = model_factory_func(
|
180
|
+
model=config.model,
|
181
|
+
request_params=config.default_request_params,
|
182
|
+
)
|
183
|
+
agent._llm = await agent.attach_llm(llm_factory)
|
213
184
|
|
214
|
-
|
215
|
-
|
185
|
+
# Store the agent
|
186
|
+
instance = agent
|
216
187
|
|
217
188
|
elif agent_type == AgentType.ORCHESTRATOR:
|
218
189
|
# Get base params configured with model settings
|
@@ -276,12 +247,8 @@ async def create_agents_by_type(
|
|
276
247
|
|
277
248
|
elif agent_type == AgentType.EVALUATOR_OPTIMIZER:
|
278
249
|
# Get the referenced agents - unwrap from proxies
|
279
|
-
generator = unwrap_proxy(
|
280
|
-
|
281
|
-
)
|
282
|
-
evaluator = unwrap_proxy(
|
283
|
-
active_agents[agent_data["evaluator"]]
|
284
|
-
)
|
250
|
+
generator = unwrap_proxy(active_agents[agent_data["generator"]])
|
251
|
+
evaluator = unwrap_proxy(active_agents[agent_data["evaluator"]])
|
285
252
|
|
286
253
|
if not generator or not evaluator:
|
287
254
|
raise ValueError(
|
@@ -294,7 +261,9 @@ async def create_agents_by_type(
|
|
294
261
|
optimizer_model = None
|
295
262
|
if isinstance(generator, Agent):
|
296
263
|
optimizer_model = generator.config.model
|
297
|
-
elif hasattr(generator,
|
264
|
+
elif hasattr(generator, "_sequence") and hasattr(
|
265
|
+
generator, "_agent_proxies"
|
266
|
+
):
|
298
267
|
# For ChainProxy, use the config model directly
|
299
268
|
optimizer_model = config.model
|
300
269
|
|
@@ -311,9 +280,7 @@ async def create_agents_by_type(
|
|
311
280
|
|
312
281
|
elif agent_type == AgentType.ROUTER:
|
313
282
|
# Get the router's agents - unwrap proxies
|
314
|
-
router_agents = get_agent_instances(
|
315
|
-
agent_data["agents"], active_agents
|
316
|
-
)
|
283
|
+
router_agents = get_agent_instances(agent_data["agents"], active_agents)
|
317
284
|
|
318
285
|
# Create the router with proper configuration
|
319
286
|
llm_factory = model_factory_func(
|
@@ -376,20 +343,15 @@ async def create_agents_by_type(
|
|
376
343
|
"continue_with_final", True
|
377
344
|
)
|
378
345
|
# Set cumulative behavior from configuration
|
379
|
-
instance._cumulative = agent_data.get(
|
380
|
-
"cumulative", False
|
381
|
-
)
|
346
|
+
instance._cumulative = agent_data.get("cumulative", False)
|
382
347
|
|
383
348
|
elif agent_type == AgentType.PARALLEL:
|
384
|
-
# Get fan-out agents (could be basic agents or other parallels)
|
385
349
|
fan_out_agents = get_agent_instances(
|
386
350
|
agent_data["fan_out"], active_agents
|
387
351
|
)
|
388
352
|
|
389
353
|
# Get fan-in agent - unwrap proxy
|
390
|
-
fan_in_agent = unwrap_proxy(
|
391
|
-
active_agents[agent_data["fan_in"]]
|
392
|
-
)
|
354
|
+
fan_in_agent = unwrap_proxy(active_agents[agent_data["fan_in"]])
|
393
355
|
|
394
356
|
# Create the parallel workflow
|
395
357
|
llm_factory = model_factory_func(config.model)
|
@@ -416,7 +378,7 @@ async def create_agents_by_type(
|
|
416
378
|
|
417
379
|
|
418
380
|
async def create_basic_agents(
|
419
|
-
app_instance: MCPApp,
|
381
|
+
app_instance: MCPApp,
|
420
382
|
agents_dict: Dict[str, Dict[str, Any]],
|
421
383
|
model_factory_func: Callable,
|
422
384
|
) -> ProxyDict:
|
@@ -432,17 +394,17 @@ async def create_basic_agents(
|
|
432
394
|
Dictionary of initialized basic agents wrapped in appropriate proxies
|
433
395
|
"""
|
434
396
|
return await create_agents_by_type(
|
435
|
-
app_instance,
|
436
|
-
agents_dict,
|
437
|
-
AgentType.BASIC,
|
438
|
-
model_factory_func=model_factory_func
|
397
|
+
app_instance,
|
398
|
+
agents_dict,
|
399
|
+
AgentType.BASIC,
|
400
|
+
model_factory_func=model_factory_func,
|
439
401
|
)
|
440
402
|
|
441
403
|
|
442
404
|
async def create_agents_in_dependency_order(
|
443
|
-
app_instance: MCPApp,
|
405
|
+
app_instance: MCPApp,
|
444
406
|
agents_dict: Dict[str, Dict[str, Any]],
|
445
|
-
active_agents: ProxyDict,
|
407
|
+
active_agents: ProxyDict,
|
446
408
|
agent_type: AgentType,
|
447
409
|
model_factory_func: Callable,
|
448
410
|
) -> ProxyDict:
|
@@ -498,4 +460,4 @@ async def create_agents_in_dependency_order(
|
|
498
460
|
if agent_name in agent_result:
|
499
461
|
result_agents[agent_name] = agent_result[agent_name]
|
500
462
|
|
501
|
-
return result_agents
|
463
|
+
return result_agents
|
mcp_agent/human_input/handler.py
CHANGED
@@ -69,7 +69,10 @@ async def console_input_callback(request: HumanInputRequest) -> HumanInputRespon
|
|
69
69
|
command_result = await handle_special_commands(response)
|
70
70
|
if isinstance(command_result, dict) and "list_prompts" in command_result:
|
71
71
|
from rich import print as rich_print
|
72
|
-
|
72
|
+
|
73
|
+
rich_print(
|
74
|
+
"[yellow]Prompt listing not available in human input context[/yellow]"
|
75
|
+
)
|
73
76
|
|
74
77
|
except KeyboardInterrupt:
|
75
78
|
console.print("\n[yellow]Input interrupted[/yellow]")
|
mcp_agent/mcp/mcp_aggregator.py
CHANGED
@@ -788,19 +788,23 @@ class MCPAggregator(ContextDependent):
|
|
788
788
|
# Add empty list to results for this server
|
789
789
|
results[s_name] = []
|
790
790
|
|
791
|
-
#
|
791
|
+
# Process servers sequentially to ensure proper resource cleanup
|
792
|
+
# This helps prevent resource leaks especially on Windows
|
792
793
|
if supported_servers:
|
793
|
-
|
794
|
-
|
795
|
-
|
796
|
-
|
797
|
-
|
798
|
-
|
799
|
-
|
800
|
-
|
801
|
-
|
802
|
-
|
803
|
-
|
794
|
+
server_results = []
|
795
|
+
for s_name in supported_servers:
|
796
|
+
try:
|
797
|
+
result = await self._execute_on_server(
|
798
|
+
server_name=s_name,
|
799
|
+
operation_type="prompts-list",
|
800
|
+
operation_name="",
|
801
|
+
method_name="list_prompts",
|
802
|
+
error_factory=lambda _: [],
|
803
|
+
)
|
804
|
+
server_results.append(result)
|
805
|
+
except Exception as e:
|
806
|
+
logger.debug(f"Error fetching prompts from {s_name}: {e}")
|
807
|
+
server_results.append(e)
|
804
808
|
|
805
809
|
for i, result in enumerate(server_results):
|
806
810
|
if isinstance(result, BaseException):
|