fast-agent-mcp 0.0.14__py3-none-any.whl → 0.0.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.0.14.dist-info → fast_agent_mcp-0.0.15.dist-info}/METADATA +51 -30
- {fast_agent_mcp-0.0.14.dist-info → fast_agent_mcp-0.0.15.dist-info}/RECORD +17 -10
- mcp_agent/core/agent_app.py +163 -0
- mcp_agent/core/agent_types.py +16 -0
- mcp_agent/core/agent_utils.py +65 -0
- mcp_agent/core/error_handling.py +23 -0
- mcp_agent/core/fastagent.py +220 -306
- mcp_agent/core/proxies.py +127 -0
- mcp_agent/core/types.py +22 -0
- mcp_agent/resources/examples/internal/social.py +66 -0
- mcp_agent/resources/examples/workflows/parallel.py +4 -0
- mcp_agent/workflows/llm/augmented_llm.py +75 -1
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +49 -18
- mcp_agent/workflows/parallel/parallel_llm.py +32 -7
- {fast_agent_mcp-0.0.14.dist-info → fast_agent_mcp-0.0.15.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.0.14.dist-info → fast_agent_mcp-0.0.15.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.0.14.dist-info → fast_agent_mcp-0.0.15.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: fast-agent-mcp
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.15
|
4
4
|
Summary: Define, Prompt and Test MCP enabled Agents and Workflows
|
5
5
|
Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
|
6
6
|
License: Apache License
|
@@ -247,7 +247,7 @@ Description-Content-Type: text/markdown
|
|
247
247
|
|
248
248
|
## Overview
|
249
249
|
|
250
|
-
**`fast-agent`**
|
250
|
+
**`fast-agent`** enables you to create and interact with sophisticated Agents and Workflows in minutes.
|
251
251
|
|
252
252
|
The simple declarative syntax lets you concentrate on composing your Prompts and MCP Servers to [build effective agents](https://www.anthropic.com/research/building-effective-agents).
|
253
253
|
|
@@ -257,7 +257,9 @@ Evaluate how different models handle Agent and MCP Server calling tasks, then bu
|
|
257
257
|
|
258
258
|
Prompts and configurations that define your Agent Applications are stored in simple files, with minimal boilerplate, enabling simple management and version control.
|
259
259
|
|
260
|
-
Chat with individual Agents and Components before, during and after workflow execution to tune and diagnose your
|
260
|
+
Chat with individual Agents and Components before, during and after workflow execution to tune and diagnose your application.
|
261
|
+
|
262
|
+
Simple model selection makes testing Model <-> MCP Server interaction painless.
|
261
263
|
|
262
264
|
## Get started:
|
263
265
|
|
@@ -294,12 +296,14 @@ async with fast.run() as agent:
|
|
294
296
|
```
|
295
297
|
|
296
298
|
Or start an interactive chat with the Agent:
|
299
|
+
|
297
300
|
```python
|
298
|
-
async with fast.run() as agent:
|
301
|
+
async with fast.run() as agent:
|
299
302
|
await agent()
|
300
303
|
```
|
301
304
|
|
302
305
|
Here is the complete `sizer.py` Agent application, with boilerplate code:
|
306
|
+
|
303
307
|
```python
|
304
308
|
import asyncio
|
305
309
|
from mcp_agent.core.fastagent import FastAgent
|
@@ -319,22 +323,25 @@ if __name__ == "__main__":
|
|
319
323
|
asyncio.run(main())
|
320
324
|
```
|
321
325
|
|
322
|
-
The Agent can be run with `uv run sizer.py
|
326
|
+
The Agent can then be run with `uv run sizer.py`.
|
327
|
+
|
328
|
+
Specify a model with the `--model` switch - for example `uv run sizer.py --model sonnet`.
|
323
329
|
|
324
330
|
### Combining Agents and using MCP Servers
|
325
331
|
|
326
332
|
_To generate examples use `fast-agent bootstrap workflow`. This example can be run with `uv run chaining.py`. fast-agent looks for configuration files in the current directory before checking parent directories recursively._
|
327
333
|
|
328
|
-
Agents can be chained to build a workflow:
|
334
|
+
Agents can be chained to build a workflow, using MCP Servers defined in the `fastagent.config.yaml` file:
|
335
|
+
|
329
336
|
```python
|
330
337
|
@fast.agent(
|
331
338
|
"url_fetcher",
|
332
|
-
|
339
|
+
"Given a URL, provide a complete and comprehensive summary",
|
333
340
|
servers=["fetch"], # Name of an MCP Server defined in fastagent.config.yaml
|
334
341
|
)
|
335
342
|
@fast.agent(
|
336
343
|
"social_media",
|
337
|
-
|
344
|
+
"""
|
338
345
|
Write a 280 character social media post for any given text.
|
339
346
|
Respond only with the post, never use hashtags.
|
340
347
|
""",
|
@@ -347,53 +354,66 @@ async def main():
|
|
347
354
|
)
|
348
355
|
```
|
349
356
|
|
350
|
-
All Agents and Workflows respond to `.send("message")`
|
357
|
+
All Agents and Workflows respond to `.send("message")` and `.prompt()` to begin a chat session.
|
358
|
+
|
359
|
+
Saved as `social.py` we can now run this workflow from the command line with:
|
360
|
+
|
361
|
+
```bash
|
362
|
+
uv run social.py --agent social_media --message "<url>"
|
363
|
+
```
|
364
|
+
|
365
|
+
Add the `--quiet` switch to only return the final response.
|
351
366
|
|
352
367
|
## Workflows
|
353
368
|
|
354
369
|
### Chain
|
355
370
|
|
356
|
-
|
371
|
+
The `chain` workflow offers a more declarative approach to calling Agents in sequence:
|
372
|
+
|
357
373
|
```python
|
358
374
|
|
359
375
|
@fast.chain(
|
360
376
|
"post_writer",
|
361
|
-
|
377
|
+
sequence=["url_fetcher","social_media"]
|
362
378
|
)
|
363
379
|
|
364
380
|
# we can them prompt it directly:
|
365
381
|
async with fast.run() as agent:
|
366
|
-
await agent.post_writer
|
382
|
+
await agent.post_writer()
|
367
383
|
|
368
384
|
```
|
385
|
+
|
386
|
+
This starts an interactive session, which produces a short social media post for a given URL. If a _chain_ is prompted, by default it returns to a chat with last Agent in the chain.
|
387
|
+
|
369
388
|
Chains can be incorporated in other workflows, or contain other workflow elements (including other Chains). You can set an `instruction` to precisely describe it's capabilities to other workflow steps if needed.
|
370
389
|
|
371
390
|
### Parallel
|
372
391
|
|
373
|
-
The Parallel Workflow sends the same message to multiple Agents simultaneously (`fan-out`), then uses the `fan-in`
|
392
|
+
The Parallel Workflow sends the same message to multiple Agents simultaneously (`fan-out`), then uses the `fan-in` Agent to process the combined content.
|
374
393
|
|
375
394
|
```python
|
376
|
-
|
377
|
-
@fast.agent(
|
378
|
-
|
379
|
-
instruction="combine the lists, remove duplicates"
|
380
|
-
)
|
395
|
+
@fast.agent("translate_fr", "Translate the text to French")
|
396
|
+
@fast.agent("translate_de", "Translate the text to German")
|
397
|
+
@fast.agent("translate_es", "Translate the text to Spanish")
|
381
398
|
|
382
399
|
@fast.parallel(
|
383
|
-
name="
|
384
|
-
fan_out=["
|
385
|
-
fan_in="consolidator"
|
400
|
+
name="translate",
|
401
|
+
fan_out=["translate_fr","translate_de","translate_es"]
|
386
402
|
)
|
387
403
|
|
388
|
-
|
389
|
-
|
404
|
+
@fast.chain(
|
405
|
+
"post_writer",
|
406
|
+
sequence=["url_fetcher","social_media","translate"]
|
407
|
+
)
|
390
408
|
```
|
391
409
|
|
392
|
-
Look at the `parallel.py` workflow example for more
|
410
|
+
Look at the `parallel.py` workflow example for more examples. If you don't specify a `fan-in` agent, the `parallel` returns Agent results verbatim.
|
411
|
+
|
412
|
+
The Parallel is also useful to ensemble ideas from different LLMs.
|
393
413
|
|
394
414
|
### Evaluator-Optimizer
|
395
415
|
|
396
|
-
Evaluator-Optimizers
|
416
|
+
Evaluator-Optimizers combine 2 agents: one to generate content (the `generator`), and the other to judge that content and provide actionable feedback (the `evaluator`). Messages are sent to the generator first, then the pair run in a loop until either the evaluator is satisfied with the quality, or the maximum number of refinements is reached.
|
397
417
|
|
398
418
|
```python
|
399
419
|
@fast.evaluator_optimizer(
|
@@ -408,11 +428,11 @@ async with fast.run() as agent:
|
|
408
428
|
await agent.researcher.send("produce a report on how to make the perfect espresso")
|
409
429
|
```
|
410
430
|
|
411
|
-
See the `evaluator.py` workflow example, or `fast-agent bootstrap researcher` for a more complete example.
|
431
|
+
See the `evaluator.py` workflow example, or `fast-agent bootstrap researcher` for a more complete example.
|
412
432
|
|
413
433
|
### Router
|
414
434
|
|
415
|
-
Routers use an LLM to assess a message, and route it to the most appropriate Agent
|
435
|
+
Routers use an LLM to assess a message, and route it to the most appropriate Agent. The routing prompt is automatically generated based on the Agent instructions and available Servers.
|
416
436
|
|
417
437
|
```python
|
418
438
|
@fast.router(
|
@@ -425,7 +445,7 @@ Look at the `router.py` workflow for an example.
|
|
425
445
|
|
426
446
|
### Orchestrator
|
427
447
|
|
428
|
-
Given a task,
|
448
|
+
Given a complex task, the Orchestrator uses an LLM to generate a plan to divide the task amongst the available Agents. The planning and aggregation prompts are generated by the Orchestrator, which benefits from using more capable models. Plans can either be built once at the beginning (`plantype="full"`) or iteratively (`plantype="iterative"`).
|
429
449
|
|
430
450
|
```python
|
431
451
|
@fast.orchestrator(
|
@@ -434,6 +454,8 @@ Given a task, an Orchestrator uses an LLM to generate a plan to divide the task
|
|
434
454
|
)
|
435
455
|
```
|
436
456
|
|
457
|
+
See `orchestrator.py` in the workflow examples.
|
458
|
+
|
437
459
|
## Agent Features
|
438
460
|
|
439
461
|
```python
|
@@ -474,5 +496,4 @@ When `human_input` is set to true for an Agent, it is presented with the option
|
|
474
496
|
|
475
497
|
### Features to add.
|
476
498
|
|
477
|
-
|
478
|
-
|
499
|
+
- Chat History Clear.
|
@@ -17,10 +17,16 @@ mcp_agent/cli/commands/bootstrap.py,sha256=z1wZSy8vO_GZPGLrFGzG3EKFQgAHC08jiIdVy
|
|
17
17
|
mcp_agent/cli/commands/config.py,sha256=32YTS5jmsYAs9QzAhjkG70_daAHqOemf4XbZBBSMz6g,204
|
18
18
|
mcp_agent/cli/commands/setup.py,sha256=8ofxUAF2nUSu1IarDZSAsTt6_6PoEht3TGbz9N6WSbs,6239
|
19
19
|
mcp_agent/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
20
|
+
mcp_agent/core/agent_app.py,sha256=2gnORb52cpWYGjRDNTJ9lVCEEc11c7Xi874ho7bbYVQ,6097
|
21
|
+
mcp_agent/core/agent_types.py,sha256=yKiMbv9QO2dduq4zXmoMZlOZpXJZhM4oNwIq1-134FE,318
|
22
|
+
mcp_agent/core/agent_utils.py,sha256=yUJ-qvw5TblqqOsB1vj0Qvcz9mass9awPA6UNNvuw0A,1738
|
20
23
|
mcp_agent/core/enhanced_prompt.py,sha256=0V5q0xcCk8PBwtc0p62B8JJ1VvqxN_wuJiXC2QPqv1M,12750
|
24
|
+
mcp_agent/core/error_handling.py,sha256=D3HMW5odrbJvaKqcpCGj6eDXrbFcuqYaCZz7fyYiTu4,623
|
21
25
|
mcp_agent/core/exceptions.py,sha256=a2-JGRwFFRoQEPuAq0JC5PhAJ5TO3xVJfdS4-VN29cw,2225
|
22
|
-
mcp_agent/core/fastagent.py,sha256=
|
26
|
+
mcp_agent/core/fastagent.py,sha256=MLMdQ4_Cjb2svWyGVWLGf8EzI3dSFCl4BS5qq8UrKgg,56966
|
27
|
+
mcp_agent/core/proxies.py,sha256=hXDUpsgGO4xBTIjdUeXj6vULPb8sf55vAFVQh6Ybn60,4411
|
23
28
|
mcp_agent/core/server_validation.py,sha256=_59cn16nNT4HGPwg19HgxMtHK4MsdWYDUw_CuL-5xek,1696
|
29
|
+
mcp_agent/core/types.py,sha256=Zhi9iW7uiOfdpSt9NC0FCtGRFtJPg4mpZPK2aYi7a7M,817
|
24
30
|
mcp_agent/eval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
25
31
|
mcp_agent/executor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
26
32
|
mcp_agent/executor/decorator_registry.py,sha256=eONv5WvIcjKd43jVqeP7iB2EkAK-ErhdmXt6ogN0K_w,3848
|
@@ -53,6 +59,7 @@ mcp_agent/resources/examples/data-analysis/fastagent.config.yaml,sha256=eTKGbjnT
|
|
53
59
|
mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv,sha256=pcMeOL1_r8m8MziE6xgbBrQbjl5Ijo98yycZn7O-dlk,227977
|
54
60
|
mcp_agent/resources/examples/internal/agent.py,sha256=f-jTgYabV3nWCQm0ZP9NtSEWjx3nQbRngzArRufcELg,384
|
55
61
|
mcp_agent/resources/examples/internal/job.py,sha256=WEKIAANMEAuKr13__rYf3PqJeTAsNB_kqYqbqVYQlUM,4093
|
62
|
+
mcp_agent/resources/examples/internal/social.py,sha256=Cot2lg3PLhLm13gPdVFvFEN28-mm6x3-jHu2YsV4N3s,1707
|
56
63
|
mcp_agent/resources/examples/mcp_researcher/researcher-eval.py,sha256=kNPjIU-JwE0oIBQKwhv6lZsUF_SPtYVkiEEbY1ZVZxk,1807
|
57
64
|
mcp_agent/resources/examples/researcher/fastagent.config.yaml,sha256=2_VXZneckR6zk6RWzzL-smV_oWmgg4uSkLWqZv8jF0I,1995
|
58
65
|
mcp_agent/resources/examples/researcher/researcher-eval.py,sha256=kNPjIU-JwE0oIBQKwhv6lZsUF_SPtYVkiEEbY1ZVZxk,1807
|
@@ -63,7 +70,7 @@ mcp_agent/resources/examples/workflows/evaluator.py,sha256=FZy-ciZafdqSHUW67LKdH
|
|
63
70
|
mcp_agent/resources/examples/workflows/fastagent.config.yaml,sha256=k2AiapOcK42uqG2nWDVvnSLqN4okQIQZK0FTbZufBpY,809
|
64
71
|
mcp_agent/resources/examples/workflows/human_input.py,sha256=c8cBdLEPbaMXddFwsfN3Z7RFs5PZXsdrjANfvq1VTPM,605
|
65
72
|
mcp_agent/resources/examples/workflows/orchestrator.py,sha256=pRJqB-ok79_iEj8aG4FysHyXz6wAHLUX-5tS8khUI7k,2574
|
66
|
-
mcp_agent/resources/examples/workflows/parallel.py,sha256=
|
73
|
+
mcp_agent/resources/examples/workflows/parallel.py,sha256=dowCw6i8mVaWFWqLvyEEJz1kZDQqdeiltcM-hEz38iY,3222
|
67
74
|
mcp_agent/resources/examples/workflows/router.py,sha256=XT_ewCrxPxdUTMCYQGw34qZQ3GGu8TYY_v5Lige8By4,1707
|
68
75
|
mcp_agent/telemetry/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
69
76
|
mcp_agent/telemetry/usage_tracking.py,sha256=ePujKMSjPxB7k6X34DGaVlnsV1728mcWZq38OqahiCU,501
|
@@ -83,8 +90,8 @@ mcp_agent/workflows/intent_classifier/intent_classifier_llm.py,sha256=WSLUv2Casb
|
|
83
90
|
mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py,sha256=Hp4454IniWFxV4ml50Ml8ip9rS1La5FBn5pd7vm1FHA,1964
|
84
91
|
mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py,sha256=zj76WlTYnSCYjBQ_IDi5vFBQGmNwYaoUq1rT730sY98,1940
|
85
92
|
mcp_agent/workflows/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
86
|
-
mcp_agent/workflows/llm/augmented_llm.py,sha256=
|
87
|
-
mcp_agent/workflows/llm/augmented_llm_anthropic.py,sha256=
|
93
|
+
mcp_agent/workflows/llm/augmented_llm.py,sha256=Hyx-jwgbMjE_WQ--YjIUvdj6HAgX36IvXBesGy6uic0,25884
|
94
|
+
mcp_agent/workflows/llm/augmented_llm_anthropic.py,sha256=4hyC8xI8zTLm7NgbyQRoWRu2ycoTP4Qu6bnPXR1p9ow,22473
|
88
95
|
mcp_agent/workflows/llm/augmented_llm_openai.py,sha256=5PwTh0QJSQ29EtK0UuiltgX6snRSBoau75C35S4xQcQ,24477
|
89
96
|
mcp_agent/workflows/llm/llm_selector.py,sha256=G7pIybuBDwtmyxUDov_QrNYH2FoI0qFRu2JfoxWUF5Y,11045
|
90
97
|
mcp_agent/workflows/llm/model_factory.py,sha256=7zTJrO2ReHa_6dfh_gY6xO8dTySqGFCKlOG9-AMJ-i8,6920
|
@@ -95,7 +102,7 @@ mcp_agent/workflows/orchestrator/orchestrator_prompts.py,sha256=-ogkjDoCXBDOyYE9
|
|
95
102
|
mcp_agent/workflows/parallel/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
96
103
|
mcp_agent/workflows/parallel/fan_in.py,sha256=EivpUL5-qftctws-tlfwmYS1QeSwr07POIbBUbwvwOk,13184
|
97
104
|
mcp_agent/workflows/parallel/fan_out.py,sha256=J-yezgjzAWxfueW_Qcgwoet4PFDRIh0h4m48lIbFA4c,7023
|
98
|
-
mcp_agent/workflows/parallel/parallel_llm.py,sha256=
|
105
|
+
mcp_agent/workflows/parallel/parallel_llm.py,sha256=fk88DhBRAI41Ph0spe_yBtrMTSj0g47yoA-ozuOxZhE,5807
|
99
106
|
mcp_agent/workflows/router/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
100
107
|
mcp_agent/workflows/router/router_base.py,sha256=1Qr3Fx9_KxpotMV-eaNT79etayAxWuQOmanDfk1qjtI,10250
|
101
108
|
mcp_agent/workflows/router/router_embedding.py,sha256=wEU49li9OqTX-Xucm0HDUFLZjlND1WuewOcQVAo0s2E,7944
|
@@ -106,8 +113,8 @@ mcp_agent/workflows/swarm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
|
|
106
113
|
mcp_agent/workflows/swarm/swarm.py,sha256=-lAIeSWDqbGHGRPTvjiP9nIKWvxxy9DAojl9yQzO1Pw,11050
|
107
114
|
mcp_agent/workflows/swarm/swarm_anthropic.py,sha256=pW8zFx5baUWGd5Vw3nIDF2oVOOGNorij4qvGJKdYPcs,1624
|
108
115
|
mcp_agent/workflows/swarm/swarm_openai.py,sha256=wfteywvAGkT5bLmIxX_StHJq8144whYmCRnJASAjOes,1596
|
109
|
-
fast_agent_mcp-0.0.
|
110
|
-
fast_agent_mcp-0.0.
|
111
|
-
fast_agent_mcp-0.0.
|
112
|
-
fast_agent_mcp-0.0.
|
113
|
-
fast_agent_mcp-0.0.
|
116
|
+
fast_agent_mcp-0.0.15.dist-info/METADATA,sha256=KL03PExGMGz9tQ9E72MKAnDS6Fi6rSGjoqyYAV3JLd4,22779
|
117
|
+
fast_agent_mcp-0.0.15.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
118
|
+
fast_agent_mcp-0.0.15.dist-info/entry_points.txt,sha256=2IXtSmDK9XjWN__RWuRIJTgWyW17wJnJ_h-pb0pZAxo,174
|
119
|
+
fast_agent_mcp-0.0.15.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
|
120
|
+
fast_agent_mcp-0.0.15.dist-info/RECORD,,
|
@@ -0,0 +1,163 @@
|
|
1
|
+
"""
|
2
|
+
Main application wrapper for interacting with agents.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from typing import Optional, Dict, TYPE_CHECKING
|
6
|
+
|
7
|
+
from mcp_agent.app import MCPApp
|
8
|
+
from mcp_agent.progress_display import progress_display
|
9
|
+
from mcp_agent.workflows.orchestrator.orchestrator import Orchestrator
|
10
|
+
from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM
|
11
|
+
from mcp_agent.workflows.evaluator_optimizer.evaluator_optimizer import (
|
12
|
+
EvaluatorOptimizerLLM,
|
13
|
+
)
|
14
|
+
|
15
|
+
# Import proxies directly - they handle their own circular imports
|
16
|
+
from mcp_agent.core.proxies import (
|
17
|
+
BaseAgentProxy,
|
18
|
+
AgentProxy,
|
19
|
+
LLMAgentProxy,
|
20
|
+
RouterProxy,
|
21
|
+
ChainProxy,
|
22
|
+
WorkflowProxy,
|
23
|
+
)
|
24
|
+
|
25
|
+
# Handle possible circular imports with types
|
26
|
+
if TYPE_CHECKING:
|
27
|
+
from mcp_agent.core.types import ProxyDict
|
28
|
+
else:
|
29
|
+
ProxyDict = Dict[str, BaseAgentProxy]
|
30
|
+
|
31
|
+
|
32
|
+
class AgentApp:
|
33
|
+
"""Main application wrapper"""
|
34
|
+
|
35
|
+
def __init__(self, app: MCPApp, agents: ProxyDict):
|
36
|
+
self._app = app
|
37
|
+
self._agents = agents
|
38
|
+
# Optional: set default agent for direct calls
|
39
|
+
self._default = next(iter(agents)) if agents else None
|
40
|
+
|
41
|
+
async def send(self, agent_name: str, message: Optional[str]) -> str:
|
42
|
+
"""Core message sending"""
|
43
|
+
if agent_name not in self._agents:
|
44
|
+
raise ValueError(f"No agent named '{agent_name}'")
|
45
|
+
|
46
|
+
if not message or "" == message:
|
47
|
+
return await self.prompt(agent_name)
|
48
|
+
|
49
|
+
proxy = self._agents[agent_name]
|
50
|
+
return await proxy.generate_str(message)
|
51
|
+
|
52
|
+
async def prompt(self, agent_name: Optional[str] = None, default: str = "") -> str:
|
53
|
+
"""
|
54
|
+
Interactive prompt for sending messages with advanced features.
|
55
|
+
|
56
|
+
Args:
|
57
|
+
agent_name: Optional target agent name (uses default if not specified)
|
58
|
+
default: Default message to use when user presses enter
|
59
|
+
"""
|
60
|
+
from mcp_agent.core.enhanced_prompt import (
|
61
|
+
get_enhanced_input,
|
62
|
+
handle_special_commands,
|
63
|
+
)
|
64
|
+
|
65
|
+
agent = agent_name or self._default
|
66
|
+
|
67
|
+
if agent not in self._agents:
|
68
|
+
raise ValueError(f"No agent named '{agent}'")
|
69
|
+
|
70
|
+
# Pass all available agent names for auto-completion
|
71
|
+
available_agents = list(self._agents.keys())
|
72
|
+
|
73
|
+
# Create agent_types dictionary mapping agent names to their types
|
74
|
+
agent_types = {}
|
75
|
+
for name, proxy in self._agents.items():
|
76
|
+
# Determine agent type based on the proxy type
|
77
|
+
if isinstance(proxy, LLMAgentProxy):
|
78
|
+
# Convert AgentType.BASIC.value ("agent") to "Agent"
|
79
|
+
agent_types[name] = "Agent"
|
80
|
+
elif isinstance(proxy, RouterProxy):
|
81
|
+
agent_types[name] = "Router"
|
82
|
+
elif isinstance(proxy, ChainProxy):
|
83
|
+
agent_types[name] = "Chain"
|
84
|
+
elif isinstance(proxy, WorkflowProxy):
|
85
|
+
# For workflow proxies, check the workflow type
|
86
|
+
workflow = proxy._workflow
|
87
|
+
if isinstance(workflow, Orchestrator):
|
88
|
+
agent_types[name] = "Orchestrator"
|
89
|
+
elif isinstance(workflow, ParallelLLM):
|
90
|
+
agent_types[name] = "Parallel"
|
91
|
+
elif isinstance(workflow, EvaluatorOptimizerLLM):
|
92
|
+
agent_types[name] = "Evaluator"
|
93
|
+
else:
|
94
|
+
agent_types[name] = "Workflow"
|
95
|
+
|
96
|
+
result = ""
|
97
|
+
while True:
|
98
|
+
with progress_display.paused():
|
99
|
+
# Use the enhanced input method with advanced features
|
100
|
+
user_input = await get_enhanced_input(
|
101
|
+
agent_name=agent,
|
102
|
+
default=default,
|
103
|
+
show_default=(default != ""),
|
104
|
+
show_stop_hint=True,
|
105
|
+
multiline=False, # Default to single-line mode
|
106
|
+
available_agent_names=available_agents,
|
107
|
+
syntax=None, # Can enable syntax highlighting for code input
|
108
|
+
agent_types=agent_types, # Pass agent types for display
|
109
|
+
)
|
110
|
+
|
111
|
+
# Handle special commands
|
112
|
+
command_result = await handle_special_commands(user_input, self)
|
113
|
+
|
114
|
+
# Check if we should switch agents
|
115
|
+
if (
|
116
|
+
isinstance(command_result, dict)
|
117
|
+
and "switch_agent" in command_result
|
118
|
+
):
|
119
|
+
agent = command_result["switch_agent"]
|
120
|
+
continue
|
121
|
+
|
122
|
+
# Skip further processing if command was handled
|
123
|
+
if command_result:
|
124
|
+
continue
|
125
|
+
|
126
|
+
if user_input.upper() == "STOP":
|
127
|
+
return result
|
128
|
+
if user_input == "":
|
129
|
+
continue
|
130
|
+
|
131
|
+
result = await self.send(agent, user_input)
|
132
|
+
|
133
|
+
# Check if current agent is a chain that should continue with final agent
|
134
|
+
if agent_types.get(agent) == "Chain":
|
135
|
+
proxy = self._agents[agent]
|
136
|
+
if isinstance(proxy, ChainProxy) and proxy._continue_with_final:
|
137
|
+
# Get the last agent in the sequence
|
138
|
+
last_agent = proxy._sequence[-1]
|
139
|
+
# Switch to that agent for the next iteration
|
140
|
+
agent = last_agent
|
141
|
+
|
142
|
+
return result
|
143
|
+
|
144
|
+
def __getattr__(self, name: str) -> BaseAgentProxy:
|
145
|
+
"""Support: agent.researcher"""
|
146
|
+
if name not in self._agents:
|
147
|
+
raise AttributeError(f"No agent named '{name}'")
|
148
|
+
return AgentProxy(self, name)
|
149
|
+
|
150
|
+
def __getitem__(self, name: str) -> BaseAgentProxy:
|
151
|
+
"""Support: agent['researcher']"""
|
152
|
+
if name not in self._agents:
|
153
|
+
raise KeyError(f"No agent named '{name}'")
|
154
|
+
return AgentProxy(self, name)
|
155
|
+
|
156
|
+
async def __call__(
|
157
|
+
self, message: Optional[str] = "", agent_name: Optional[str] = None
|
158
|
+
) -> str:
|
159
|
+
"""Support: agent('message')"""
|
160
|
+
target = agent_name or self._default
|
161
|
+
if not target:
|
162
|
+
raise ValueError("No default agent available")
|
163
|
+
return await self.send(target, message)
|
@@ -0,0 +1,16 @@
|
|
1
|
+
"""
|
2
|
+
Enum definitions for supported agent types.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from enum import Enum
|
6
|
+
|
7
|
+
|
8
|
+
class AgentType(Enum):
|
9
|
+
"""Enumeration of supported agent types."""
|
10
|
+
|
11
|
+
BASIC = "agent"
|
12
|
+
ORCHESTRATOR = "orchestrator"
|
13
|
+
PARALLEL = "parallel"
|
14
|
+
EVALUATOR_OPTIMIZER = "evaluator_optimizer"
|
15
|
+
ROUTER = "router"
|
16
|
+
CHAIN = "chain"
|
@@ -0,0 +1,65 @@
|
|
1
|
+
"""
|
2
|
+
Utility functions for agent operations.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from typing import List, TYPE_CHECKING
|
6
|
+
|
7
|
+
from mcp_agent.event_progress import ProgressAction
|
8
|
+
|
9
|
+
# Handle circular imports
|
10
|
+
if TYPE_CHECKING:
|
11
|
+
from mcp_agent.core.proxies import BaseAgentProxy, LLMAgentProxy
|
12
|
+
from mcp_agent.core.types import AgentOrWorkflow, ProxyDict
|
13
|
+
else:
|
14
|
+
from mcp_agent.core.proxies import BaseAgentProxy, LLMAgentProxy
|
15
|
+
# Define minimal types for runtime
|
16
|
+
AgentOrWorkflow = object # Simple placeholder
|
17
|
+
ProxyDict = dict # Simple placeholder
|
18
|
+
|
19
|
+
|
20
|
+
def unwrap_proxy(proxy: BaseAgentProxy) -> AgentOrWorkflow:
|
21
|
+
"""
|
22
|
+
Unwrap a proxy to get the underlying agent or workflow instance.
|
23
|
+
|
24
|
+
Args:
|
25
|
+
proxy: The proxy object to unwrap
|
26
|
+
|
27
|
+
Returns:
|
28
|
+
The underlying Agent or workflow instance
|
29
|
+
"""
|
30
|
+
if isinstance(proxy, LLMAgentProxy):
|
31
|
+
return proxy._agent
|
32
|
+
return proxy._workflow
|
33
|
+
|
34
|
+
|
35
|
+
def get_agent_instances(
|
36
|
+
agent_names: List[str], active_agents: ProxyDict
|
37
|
+
) -> List[AgentOrWorkflow]:
|
38
|
+
"""
|
39
|
+
Get list of actual agent/workflow instances from a list of names.
|
40
|
+
|
41
|
+
Args:
|
42
|
+
agent_names: List of agent names to look up
|
43
|
+
active_agents: Dictionary of active agent proxies
|
44
|
+
|
45
|
+
Returns:
|
46
|
+
List of unwrapped agent/workflow instances
|
47
|
+
"""
|
48
|
+
return [unwrap_proxy(active_agents[name]) for name in agent_names]
|
49
|
+
|
50
|
+
|
51
|
+
def log_agent_load(app, agent_name: str) -> None:
|
52
|
+
"""
|
53
|
+
Log agent loading event to application logger.
|
54
|
+
|
55
|
+
Args:
|
56
|
+
app: The application instance
|
57
|
+
agent_name: Name of the agent being loaded
|
58
|
+
"""
|
59
|
+
app._logger.info(
|
60
|
+
f"Loaded {agent_name}",
|
61
|
+
data={
|
62
|
+
"progress_action": ProgressAction.LOADED,
|
63
|
+
"agent_name": agent_name,
|
64
|
+
},
|
65
|
+
)
|
@@ -0,0 +1,23 @@
|
|
1
|
+
"""
|
2
|
+
Error handling utilities for agent operations.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from rich import print
|
6
|
+
|
7
|
+
|
8
|
+
def handle_error(e: Exception, error_type: str, suggestion: str = None) -> None:
|
9
|
+
"""
|
10
|
+
Handle errors with consistent formatting and messaging.
|
11
|
+
|
12
|
+
Args:
|
13
|
+
e: The exception that was raised
|
14
|
+
error_type: Type of error to display
|
15
|
+
suggestion: Optional suggestion message to display
|
16
|
+
"""
|
17
|
+
print(f"\n[bold red]{error_type}:")
|
18
|
+
print(getattr(e, "message", str(e)))
|
19
|
+
if hasattr(e, "details") and e.details:
|
20
|
+
print("\nDetails:")
|
21
|
+
print(e.details)
|
22
|
+
if suggestion:
|
23
|
+
print(f"\n{suggestion}")
|