fast-agent-mcp 0.0.13__py3-none-any.whl → 0.0.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.0.13.dist-info → fast_agent_mcp-0.0.15.dist-info}/METADATA +213 -15
- {fast_agent_mcp-0.0.13.dist-info → fast_agent_mcp-0.0.15.dist-info}/RECORD +20 -13
- mcp_agent/cli/commands/bootstrap.py +1 -1
- mcp_agent/core/agent_app.py +163 -0
- mcp_agent/core/agent_types.py +16 -0
- mcp_agent/core/agent_utils.py +65 -0
- mcp_agent/core/error_handling.py +23 -0
- mcp_agent/core/exceptions.py +7 -0
- mcp_agent/core/fastagent.py +407 -301
- mcp_agent/core/proxies.py +127 -0
- mcp_agent/core/types.py +22 -0
- mcp_agent/resources/examples/internal/social.py +66 -0
- mcp_agent/resources/examples/workflows/chaining.py +16 -6
- mcp_agent/resources/examples/workflows/parallel.py +4 -0
- mcp_agent/workflows/llm/augmented_llm.py +75 -1
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +49 -18
- mcp_agent/workflows/parallel/parallel_llm.py +32 -7
- {fast_agent_mcp-0.0.13.dist-info → fast_agent_mcp-0.0.15.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.0.13.dist-info → fast_agent_mcp-0.0.15.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.0.13.dist-info → fast_agent_mcp-0.0.15.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: fast-agent-mcp
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.15
|
4
4
|
Summary: Define, Prompt and Test MCP enabled Agents and Workflows
|
5
5
|
Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
|
6
6
|
License: Apache License
|
@@ -247,13 +247,21 @@ Description-Content-Type: text/markdown
|
|
247
247
|
|
248
248
|
## Overview
|
249
249
|
|
250
|
-
**`fast-agent`**
|
250
|
+
**`fast-agent`** enables you to create and interact with sophisticated Agents and Workflows in minutes.
|
251
251
|
|
252
|
-
The simple declarative syntax lets you concentrate on
|
252
|
+
The simple declarative syntax lets you concentrate on composing your Prompts and MCP Servers to [build effective agents](https://www.anthropic.com/research/building-effective-agents).
|
253
253
|
|
254
|
-
|
254
|
+
Evaluate how different models handle Agent and MCP Server calling tasks, then build multi-model workflows using the best provider for each task.
|
255
255
|
|
256
|
-
###
|
256
|
+
### Agent Application Development
|
257
|
+
|
258
|
+
Prompts and configurations that define your Agent Applications are stored in simple files, with minimal boilerplate, enabling simple management and version control.
|
259
|
+
|
260
|
+
Chat with individual Agents and Components before, during and after workflow execution to tune and diagnose your application.
|
261
|
+
|
262
|
+
Simple model selection makes testing Model <-> MCP Server interaction painless.
|
263
|
+
|
264
|
+
## Get started:
|
257
265
|
|
258
266
|
Start by installing the [uv package manager](https://docs.astral.sh/uv/) for Python. Then:
|
259
267
|
|
@@ -265,15 +273,209 @@ uv run agent.py --model=o3-mini.low # specify a model
|
|
265
273
|
fast-agent bootstrap workflow # create "building effective agents" examples
|
266
274
|
```
|
267
275
|
|
268
|
-
Other bootstrap examples include a Researcher (with Evaluator-Optimizer workflow) and Data Analysis (similar to ChatGPT experience), demonstrating MCP Roots support.
|
276
|
+
Other bootstrap examples include a Researcher Agent (with Evaluator-Optimizer workflow) and Data Analysis Agent (similar to the ChatGPT experience), demonstrating MCP Roots support.
|
269
277
|
|
270
278
|
> Windows Users - there are a couple of configuration changes needed for the Filesystem and Docker MCP Servers - necessary changes are detailed within the configuration files.
|
271
279
|
|
272
|
-
|
280
|
+
### Basic Agents
|
281
|
+
|
282
|
+
Defining an agent is as simple as:
|
283
|
+
|
284
|
+
```python
|
285
|
+
@fast.agent(
|
286
|
+
instruction="Given an object, respond only with an estimate of its size."
|
287
|
+
)
|
288
|
+
```
|
289
|
+
|
290
|
+
We can then send messages to the Agent:
|
291
|
+
|
292
|
+
```python
|
293
|
+
async with fast.run() as agent:
|
294
|
+
moon_size = await agent("the moon")
|
295
|
+
print(moon_size)
|
296
|
+
```
|
297
|
+
|
298
|
+
Or start an interactive chat with the Agent:
|
299
|
+
|
300
|
+
```python
|
301
|
+
async with fast.run() as agent:
|
302
|
+
await agent()
|
303
|
+
```
|
304
|
+
|
305
|
+
Here is the complete `sizer.py` Agent application, with boilerplate code:
|
306
|
+
|
307
|
+
```python
|
308
|
+
import asyncio
|
309
|
+
from mcp_agent.core.fastagent import FastAgent
|
310
|
+
|
311
|
+
# Create the application
|
312
|
+
fast = FastAgent("Agent Example")
|
313
|
+
|
314
|
+
@fast.agent(
|
315
|
+
instruction="Given an object, respond only with an estimate of its size."
|
316
|
+
)
|
317
|
+
|
318
|
+
async def main():
|
319
|
+
async with fast.run() as agent:
|
320
|
+
await agent()
|
321
|
+
|
322
|
+
if __name__ == "__main__":
|
323
|
+
asyncio.run(main())
|
324
|
+
```
|
325
|
+
|
326
|
+
The Agent can then be run with `uv run sizer.py`.
|
327
|
+
|
328
|
+
Specify a model with the `--model` switch - for example `uv run sizer.py --model sonnet`.
|
329
|
+
|
330
|
+
### Combining Agents and using MCP Servers
|
331
|
+
|
332
|
+
_To generate examples use `fast-agent bootstrap workflow`. This example can be run with `uv run chaining.py`. fast-agent looks for configuration files in the current directory before checking parent directories recursively._
|
333
|
+
|
334
|
+
Agents can be chained to build a workflow, using MCP Servers defined in the `fastagent.config.yaml` file:
|
335
|
+
|
336
|
+
```python
|
337
|
+
@fast.agent(
|
338
|
+
"url_fetcher",
|
339
|
+
"Given a URL, provide a complete and comprehensive summary",
|
340
|
+
servers=["fetch"], # Name of an MCP Server defined in fastagent.config.yaml
|
341
|
+
)
|
342
|
+
@fast.agent(
|
343
|
+
"social_media",
|
344
|
+
"""
|
345
|
+
Write a 280 character social media post for any given text.
|
346
|
+
Respond only with the post, never use hashtags.
|
347
|
+
""",
|
348
|
+
)
|
349
|
+
|
350
|
+
async def main():
|
351
|
+
async with fast.run() as agent:
|
352
|
+
await agent.social_media(
|
353
|
+
await agent.url_fetcher("http://llmindset.co.uk/resources/mcp-hfspace/")
|
354
|
+
)
|
355
|
+
```
|
356
|
+
|
357
|
+
All Agents and Workflows respond to `.send("message")` and `.prompt()` to begin a chat session.
|
358
|
+
|
359
|
+
Saved as `social.py` we can now run this workflow from the command line with:
|
360
|
+
|
361
|
+
```bash
|
362
|
+
uv run social.py --agent social_media --message "<url>"
|
363
|
+
```
|
364
|
+
|
365
|
+
Add the `--quiet` switch to only return the final response.
|
366
|
+
|
367
|
+
## Workflows
|
368
|
+
|
369
|
+
### Chain
|
370
|
+
|
371
|
+
The `chain` workflow offers a more declarative approach to calling Agents in sequence:
|
372
|
+
|
373
|
+
```python
|
273
374
|
|
274
|
-
|
375
|
+
@fast.chain(
|
376
|
+
"post_writer",
|
377
|
+
sequence=["url_fetcher","social_media"]
|
378
|
+
)
|
275
379
|
|
276
|
-
|
380
|
+
# we can them prompt it directly:
|
381
|
+
async with fast.run() as agent:
|
382
|
+
await agent.post_writer()
|
383
|
+
|
384
|
+
```
|
385
|
+
|
386
|
+
This starts an interactive session, which produces a short social media post for a given URL. If a _chain_ is prompted, by default it returns to a chat with last Agent in the chain.
|
387
|
+
|
388
|
+
Chains can be incorporated in other workflows, or contain other workflow elements (including other Chains). You can set an `instruction` to precisely describe it's capabilities to other workflow steps if needed.
|
389
|
+
|
390
|
+
### Parallel
|
391
|
+
|
392
|
+
The Parallel Workflow sends the same message to multiple Agents simultaneously (`fan-out`), then uses the `fan-in` Agent to process the combined content.
|
393
|
+
|
394
|
+
```python
|
395
|
+
@fast.agent("translate_fr", "Translate the text to French")
|
396
|
+
@fast.agent("translate_de", "Translate the text to German")
|
397
|
+
@fast.agent("translate_es", "Translate the text to Spanish")
|
398
|
+
|
399
|
+
@fast.parallel(
|
400
|
+
name="translate",
|
401
|
+
fan_out=["translate_fr","translate_de","translate_es"]
|
402
|
+
)
|
403
|
+
|
404
|
+
@fast.chain(
|
405
|
+
"post_writer",
|
406
|
+
sequence=["url_fetcher","social_media","translate"]
|
407
|
+
)
|
408
|
+
```
|
409
|
+
|
410
|
+
Look at the `parallel.py` workflow example for more examples. If you don't specify a `fan-in` agent, the `parallel` returns Agent results verbatim.
|
411
|
+
|
412
|
+
The Parallel is also useful to ensemble ideas from different LLMs.
|
413
|
+
|
414
|
+
### Evaluator-Optimizer
|
415
|
+
|
416
|
+
Evaluator-Optimizers combine 2 agents: one to generate content (the `generator`), and the other to judge that content and provide actionable feedback (the `evaluator`). Messages are sent to the generator first, then the pair run in a loop until either the evaluator is satisfied with the quality, or the maximum number of refinements is reached.
|
417
|
+
|
418
|
+
```python
|
419
|
+
@fast.evaluator_optimizer(
|
420
|
+
name="researcher"
|
421
|
+
generator="web_searcher"
|
422
|
+
evaluator="quality_assurance"
|
423
|
+
min_rating="EXCELLENT"
|
424
|
+
max_refinements=3
|
425
|
+
)
|
426
|
+
|
427
|
+
async with fast.run() as agent:
|
428
|
+
await agent.researcher.send("produce a report on how to make the perfect espresso")
|
429
|
+
```
|
430
|
+
|
431
|
+
See the `evaluator.py` workflow example, or `fast-agent bootstrap researcher` for a more complete example.
|
432
|
+
|
433
|
+
### Router
|
434
|
+
|
435
|
+
Routers use an LLM to assess a message, and route it to the most appropriate Agent. The routing prompt is automatically generated based on the Agent instructions and available Servers.
|
436
|
+
|
437
|
+
```python
|
438
|
+
@fast.router(
|
439
|
+
name="route"
|
440
|
+
agents["agent1","agent2","agent3"]
|
441
|
+
)
|
442
|
+
```
|
443
|
+
|
444
|
+
Look at the `router.py` workflow for an example.
|
445
|
+
|
446
|
+
### Orchestrator
|
447
|
+
|
448
|
+
Given a complex task, the Orchestrator uses an LLM to generate a plan to divide the task amongst the available Agents. The planning and aggregation prompts are generated by the Orchestrator, which benefits from using more capable models. Plans can either be built once at the beginning (`plantype="full"`) or iteratively (`plantype="iterative"`).
|
449
|
+
|
450
|
+
```python
|
451
|
+
@fast.orchestrator(
|
452
|
+
name="orchestrate"
|
453
|
+
agents=["task1","task2","task3"]
|
454
|
+
)
|
455
|
+
```
|
456
|
+
|
457
|
+
See `orchestrator.py` in the workflow examples.
|
458
|
+
|
459
|
+
## Agent Features
|
460
|
+
|
461
|
+
```python
|
462
|
+
@fast.agent(
|
463
|
+
name="agent",
|
464
|
+
instructions="instructions",
|
465
|
+
servers=["filesystem"], # list of MCP Servers for the agent, configured in fastagent.config.yaml
|
466
|
+
model="o3-mini.high", # specify a model for the agent
|
467
|
+
use_history=True, # agent can maintain chat history
|
468
|
+
human_input=True, # agent can request human input
|
469
|
+
)
|
470
|
+
```
|
471
|
+
|
472
|
+
### Human Input
|
473
|
+
|
474
|
+
When `human_input` is set to true for an Agent, it is presented with the option to prompt the User for input.
|
475
|
+
|
476
|
+
## Project Notes
|
477
|
+
|
478
|
+
`fast-agent` builds on the [`mcp-agent`](https://github.com/lastmile-ai/mcp-agent) project by Sarmad Qadri.
|
277
479
|
|
278
480
|
### llmindset.co.uk fork:
|
279
481
|
|
@@ -292,10 +494,6 @@ FastAgent lets you interact with Agents during a workflow, enabling "warm-up" an
|
|
292
494
|
- Enhanced Human Input Messaging and Handling
|
293
495
|
- Declarative workflows
|
294
496
|
|
295
|
-
|
296
|
-
|
297
|
-
We recommend using [uv](https://docs.astral.sh/uv/) to manage your Python projects:
|
298
|
-
|
299
|
-
## Table of Contents
|
497
|
+
### Features to add.
|
300
498
|
|
301
|
-
|
499
|
+
- Chat History Clear.
|
@@ -13,14 +13,20 @@ mcp_agent/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
13
13
|
mcp_agent/cli/__main__.py,sha256=bhxe66GYqy0q78OQhi7dkuubY1Tn0bQL6hU5Nn47E34,73
|
14
14
|
mcp_agent/cli/main.py,sha256=cqRxYTpeZ656lzf9qLR3LPnQXrFVDxlWm5gRuqyzUQg,2456
|
15
15
|
mcp_agent/cli/terminal.py,sha256=5fqrKlJvIpKEuvpvZ653OueQSYFFktBEbosjr2ucMUc,1026
|
16
|
-
mcp_agent/cli/commands/bootstrap.py,sha256=
|
16
|
+
mcp_agent/cli/commands/bootstrap.py,sha256=z1wZSy8vO_GZPGLrFGzG3EKFQgAHC08jiIdVyylo-58,10778
|
17
17
|
mcp_agent/cli/commands/config.py,sha256=32YTS5jmsYAs9QzAhjkG70_daAHqOemf4XbZBBSMz6g,204
|
18
18
|
mcp_agent/cli/commands/setup.py,sha256=8ofxUAF2nUSu1IarDZSAsTt6_6PoEht3TGbz9N6WSbs,6239
|
19
19
|
mcp_agent/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
20
|
+
mcp_agent/core/agent_app.py,sha256=2gnORb52cpWYGjRDNTJ9lVCEEc11c7Xi874ho7bbYVQ,6097
|
21
|
+
mcp_agent/core/agent_types.py,sha256=yKiMbv9QO2dduq4zXmoMZlOZpXJZhM4oNwIq1-134FE,318
|
22
|
+
mcp_agent/core/agent_utils.py,sha256=yUJ-qvw5TblqqOsB1vj0Qvcz9mass9awPA6UNNvuw0A,1738
|
20
23
|
mcp_agent/core/enhanced_prompt.py,sha256=0V5q0xcCk8PBwtc0p62B8JJ1VvqxN_wuJiXC2QPqv1M,12750
|
21
|
-
mcp_agent/core/
|
22
|
-
mcp_agent/core/
|
24
|
+
mcp_agent/core/error_handling.py,sha256=D3HMW5odrbJvaKqcpCGj6eDXrbFcuqYaCZz7fyYiTu4,623
|
25
|
+
mcp_agent/core/exceptions.py,sha256=a2-JGRwFFRoQEPuAq0JC5PhAJ5TO3xVJfdS4-VN29cw,2225
|
26
|
+
mcp_agent/core/fastagent.py,sha256=MLMdQ4_Cjb2svWyGVWLGf8EzI3dSFCl4BS5qq8UrKgg,56966
|
27
|
+
mcp_agent/core/proxies.py,sha256=hXDUpsgGO4xBTIjdUeXj6vULPb8sf55vAFVQh6Ybn60,4411
|
23
28
|
mcp_agent/core/server_validation.py,sha256=_59cn16nNT4HGPwg19HgxMtHK4MsdWYDUw_CuL-5xek,1696
|
29
|
+
mcp_agent/core/types.py,sha256=Zhi9iW7uiOfdpSt9NC0FCtGRFtJPg4mpZPK2aYi7a7M,817
|
24
30
|
mcp_agent/eval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
25
31
|
mcp_agent/executor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
26
32
|
mcp_agent/executor/decorator_registry.py,sha256=eONv5WvIcjKd43jVqeP7iB2EkAK-ErhdmXt6ogN0K_w,3848
|
@@ -53,17 +59,18 @@ mcp_agent/resources/examples/data-analysis/fastagent.config.yaml,sha256=eTKGbjnT
|
|
53
59
|
mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv,sha256=pcMeOL1_r8m8MziE6xgbBrQbjl5Ijo98yycZn7O-dlk,227977
|
54
60
|
mcp_agent/resources/examples/internal/agent.py,sha256=f-jTgYabV3nWCQm0ZP9NtSEWjx3nQbRngzArRufcELg,384
|
55
61
|
mcp_agent/resources/examples/internal/job.py,sha256=WEKIAANMEAuKr13__rYf3PqJeTAsNB_kqYqbqVYQlUM,4093
|
62
|
+
mcp_agent/resources/examples/internal/social.py,sha256=Cot2lg3PLhLm13gPdVFvFEN28-mm6x3-jHu2YsV4N3s,1707
|
56
63
|
mcp_agent/resources/examples/mcp_researcher/researcher-eval.py,sha256=kNPjIU-JwE0oIBQKwhv6lZsUF_SPtYVkiEEbY1ZVZxk,1807
|
57
64
|
mcp_agent/resources/examples/researcher/fastagent.config.yaml,sha256=2_VXZneckR6zk6RWzzL-smV_oWmgg4uSkLWqZv8jF0I,1995
|
58
65
|
mcp_agent/resources/examples/researcher/researcher-eval.py,sha256=kNPjIU-JwE0oIBQKwhv6lZsUF_SPtYVkiEEbY1ZVZxk,1807
|
59
66
|
mcp_agent/resources/examples/researcher/researcher.py,sha256=jPRafm7jbpHKkX_dQiYGG3Sw-e1Dm86q-JZT-WZDhM0,1425
|
60
67
|
mcp_agent/resources/examples/workflows/agent_build.py,sha256=vdjS02rZR88RU53WYzXxPscfFNEFFe_niHYE_i49I8Q,2396
|
61
|
-
mcp_agent/resources/examples/workflows/chaining.py,sha256=
|
68
|
+
mcp_agent/resources/examples/workflows/chaining.py,sha256=1G_0XBcFkSJCOXb6N_iXWlSc_oGAlhENR0k_CN1vJKI,1208
|
62
69
|
mcp_agent/resources/examples/workflows/evaluator.py,sha256=FZy-ciZafdqSHUW67LKdHw0t9rvX6X67waMOoeIN3GY,3147
|
63
70
|
mcp_agent/resources/examples/workflows/fastagent.config.yaml,sha256=k2AiapOcK42uqG2nWDVvnSLqN4okQIQZK0FTbZufBpY,809
|
64
71
|
mcp_agent/resources/examples/workflows/human_input.py,sha256=c8cBdLEPbaMXddFwsfN3Z7RFs5PZXsdrjANfvq1VTPM,605
|
65
72
|
mcp_agent/resources/examples/workflows/orchestrator.py,sha256=pRJqB-ok79_iEj8aG4FysHyXz6wAHLUX-5tS8khUI7k,2574
|
66
|
-
mcp_agent/resources/examples/workflows/parallel.py,sha256=
|
73
|
+
mcp_agent/resources/examples/workflows/parallel.py,sha256=dowCw6i8mVaWFWqLvyEEJz1kZDQqdeiltcM-hEz38iY,3222
|
67
74
|
mcp_agent/resources/examples/workflows/router.py,sha256=XT_ewCrxPxdUTMCYQGw34qZQ3GGu8TYY_v5Lige8By4,1707
|
68
75
|
mcp_agent/telemetry/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
69
76
|
mcp_agent/telemetry/usage_tracking.py,sha256=ePujKMSjPxB7k6X34DGaVlnsV1728mcWZq38OqahiCU,501
|
@@ -83,8 +90,8 @@ mcp_agent/workflows/intent_classifier/intent_classifier_llm.py,sha256=WSLUv2Casb
|
|
83
90
|
mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py,sha256=Hp4454IniWFxV4ml50Ml8ip9rS1La5FBn5pd7vm1FHA,1964
|
84
91
|
mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py,sha256=zj76WlTYnSCYjBQ_IDi5vFBQGmNwYaoUq1rT730sY98,1940
|
85
92
|
mcp_agent/workflows/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
86
|
-
mcp_agent/workflows/llm/augmented_llm.py,sha256=
|
87
|
-
mcp_agent/workflows/llm/augmented_llm_anthropic.py,sha256=
|
93
|
+
mcp_agent/workflows/llm/augmented_llm.py,sha256=Hyx-jwgbMjE_WQ--YjIUvdj6HAgX36IvXBesGy6uic0,25884
|
94
|
+
mcp_agent/workflows/llm/augmented_llm_anthropic.py,sha256=4hyC8xI8zTLm7NgbyQRoWRu2ycoTP4Qu6bnPXR1p9ow,22473
|
88
95
|
mcp_agent/workflows/llm/augmented_llm_openai.py,sha256=5PwTh0QJSQ29EtK0UuiltgX6snRSBoau75C35S4xQcQ,24477
|
89
96
|
mcp_agent/workflows/llm/llm_selector.py,sha256=G7pIybuBDwtmyxUDov_QrNYH2FoI0qFRu2JfoxWUF5Y,11045
|
90
97
|
mcp_agent/workflows/llm/model_factory.py,sha256=7zTJrO2ReHa_6dfh_gY6xO8dTySqGFCKlOG9-AMJ-i8,6920
|
@@ -95,7 +102,7 @@ mcp_agent/workflows/orchestrator/orchestrator_prompts.py,sha256=-ogkjDoCXBDOyYE9
|
|
95
102
|
mcp_agent/workflows/parallel/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
96
103
|
mcp_agent/workflows/parallel/fan_in.py,sha256=EivpUL5-qftctws-tlfwmYS1QeSwr07POIbBUbwvwOk,13184
|
97
104
|
mcp_agent/workflows/parallel/fan_out.py,sha256=J-yezgjzAWxfueW_Qcgwoet4PFDRIh0h4m48lIbFA4c,7023
|
98
|
-
mcp_agent/workflows/parallel/parallel_llm.py,sha256=
|
105
|
+
mcp_agent/workflows/parallel/parallel_llm.py,sha256=fk88DhBRAI41Ph0spe_yBtrMTSj0g47yoA-ozuOxZhE,5807
|
99
106
|
mcp_agent/workflows/router/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
100
107
|
mcp_agent/workflows/router/router_base.py,sha256=1Qr3Fx9_KxpotMV-eaNT79etayAxWuQOmanDfk1qjtI,10250
|
101
108
|
mcp_agent/workflows/router/router_embedding.py,sha256=wEU49li9OqTX-Xucm0HDUFLZjlND1WuewOcQVAo0s2E,7944
|
@@ -106,8 +113,8 @@ mcp_agent/workflows/swarm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
|
|
106
113
|
mcp_agent/workflows/swarm/swarm.py,sha256=-lAIeSWDqbGHGRPTvjiP9nIKWvxxy9DAojl9yQzO1Pw,11050
|
107
114
|
mcp_agent/workflows/swarm/swarm_anthropic.py,sha256=pW8zFx5baUWGd5Vw3nIDF2oVOOGNorij4qvGJKdYPcs,1624
|
108
115
|
mcp_agent/workflows/swarm/swarm_openai.py,sha256=wfteywvAGkT5bLmIxX_StHJq8144whYmCRnJASAjOes,1596
|
109
|
-
fast_agent_mcp-0.0.
|
110
|
-
fast_agent_mcp-0.0.
|
111
|
-
fast_agent_mcp-0.0.
|
112
|
-
fast_agent_mcp-0.0.
|
113
|
-
fast_agent_mcp-0.0.
|
116
|
+
fast_agent_mcp-0.0.15.dist-info/METADATA,sha256=KL03PExGMGz9tQ9E72MKAnDS6Fi6rSGjoqyYAV3JLd4,22779
|
117
|
+
fast_agent_mcp-0.0.15.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
118
|
+
fast_agent_mcp-0.0.15.dist-info/entry_points.txt,sha256=2IXtSmDK9XjWN__RWuRIJTgWyW17wJnJ_h-pb0pZAxo,174
|
119
|
+
fast_agent_mcp-0.0.15.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
|
120
|
+
fast_agent_mcp-0.0.15.dist-info/RECORD,,
|
@@ -0,0 +1,163 @@
|
|
1
|
+
"""
|
2
|
+
Main application wrapper for interacting with agents.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from typing import Optional, Dict, TYPE_CHECKING
|
6
|
+
|
7
|
+
from mcp_agent.app import MCPApp
|
8
|
+
from mcp_agent.progress_display import progress_display
|
9
|
+
from mcp_agent.workflows.orchestrator.orchestrator import Orchestrator
|
10
|
+
from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM
|
11
|
+
from mcp_agent.workflows.evaluator_optimizer.evaluator_optimizer import (
|
12
|
+
EvaluatorOptimizerLLM,
|
13
|
+
)
|
14
|
+
|
15
|
+
# Import proxies directly - they handle their own circular imports
|
16
|
+
from mcp_agent.core.proxies import (
|
17
|
+
BaseAgentProxy,
|
18
|
+
AgentProxy,
|
19
|
+
LLMAgentProxy,
|
20
|
+
RouterProxy,
|
21
|
+
ChainProxy,
|
22
|
+
WorkflowProxy,
|
23
|
+
)
|
24
|
+
|
25
|
+
# Handle possible circular imports with types
|
26
|
+
if TYPE_CHECKING:
|
27
|
+
from mcp_agent.core.types import ProxyDict
|
28
|
+
else:
|
29
|
+
ProxyDict = Dict[str, BaseAgentProxy]
|
30
|
+
|
31
|
+
|
32
|
+
class AgentApp:
|
33
|
+
"""Main application wrapper"""
|
34
|
+
|
35
|
+
def __init__(self, app: MCPApp, agents: ProxyDict):
|
36
|
+
self._app = app
|
37
|
+
self._agents = agents
|
38
|
+
# Optional: set default agent for direct calls
|
39
|
+
self._default = next(iter(agents)) if agents else None
|
40
|
+
|
41
|
+
async def send(self, agent_name: str, message: Optional[str]) -> str:
|
42
|
+
"""Core message sending"""
|
43
|
+
if agent_name not in self._agents:
|
44
|
+
raise ValueError(f"No agent named '{agent_name}'")
|
45
|
+
|
46
|
+
if not message or "" == message:
|
47
|
+
return await self.prompt(agent_name)
|
48
|
+
|
49
|
+
proxy = self._agents[agent_name]
|
50
|
+
return await proxy.generate_str(message)
|
51
|
+
|
52
|
+
async def prompt(self, agent_name: Optional[str] = None, default: str = "") -> str:
|
53
|
+
"""
|
54
|
+
Interactive prompt for sending messages with advanced features.
|
55
|
+
|
56
|
+
Args:
|
57
|
+
agent_name: Optional target agent name (uses default if not specified)
|
58
|
+
default: Default message to use when user presses enter
|
59
|
+
"""
|
60
|
+
from mcp_agent.core.enhanced_prompt import (
|
61
|
+
get_enhanced_input,
|
62
|
+
handle_special_commands,
|
63
|
+
)
|
64
|
+
|
65
|
+
agent = agent_name or self._default
|
66
|
+
|
67
|
+
if agent not in self._agents:
|
68
|
+
raise ValueError(f"No agent named '{agent}'")
|
69
|
+
|
70
|
+
# Pass all available agent names for auto-completion
|
71
|
+
available_agents = list(self._agents.keys())
|
72
|
+
|
73
|
+
# Create agent_types dictionary mapping agent names to their types
|
74
|
+
agent_types = {}
|
75
|
+
for name, proxy in self._agents.items():
|
76
|
+
# Determine agent type based on the proxy type
|
77
|
+
if isinstance(proxy, LLMAgentProxy):
|
78
|
+
# Convert AgentType.BASIC.value ("agent") to "Agent"
|
79
|
+
agent_types[name] = "Agent"
|
80
|
+
elif isinstance(proxy, RouterProxy):
|
81
|
+
agent_types[name] = "Router"
|
82
|
+
elif isinstance(proxy, ChainProxy):
|
83
|
+
agent_types[name] = "Chain"
|
84
|
+
elif isinstance(proxy, WorkflowProxy):
|
85
|
+
# For workflow proxies, check the workflow type
|
86
|
+
workflow = proxy._workflow
|
87
|
+
if isinstance(workflow, Orchestrator):
|
88
|
+
agent_types[name] = "Orchestrator"
|
89
|
+
elif isinstance(workflow, ParallelLLM):
|
90
|
+
agent_types[name] = "Parallel"
|
91
|
+
elif isinstance(workflow, EvaluatorOptimizerLLM):
|
92
|
+
agent_types[name] = "Evaluator"
|
93
|
+
else:
|
94
|
+
agent_types[name] = "Workflow"
|
95
|
+
|
96
|
+
result = ""
|
97
|
+
while True:
|
98
|
+
with progress_display.paused():
|
99
|
+
# Use the enhanced input method with advanced features
|
100
|
+
user_input = await get_enhanced_input(
|
101
|
+
agent_name=agent,
|
102
|
+
default=default,
|
103
|
+
show_default=(default != ""),
|
104
|
+
show_stop_hint=True,
|
105
|
+
multiline=False, # Default to single-line mode
|
106
|
+
available_agent_names=available_agents,
|
107
|
+
syntax=None, # Can enable syntax highlighting for code input
|
108
|
+
agent_types=agent_types, # Pass agent types for display
|
109
|
+
)
|
110
|
+
|
111
|
+
# Handle special commands
|
112
|
+
command_result = await handle_special_commands(user_input, self)
|
113
|
+
|
114
|
+
# Check if we should switch agents
|
115
|
+
if (
|
116
|
+
isinstance(command_result, dict)
|
117
|
+
and "switch_agent" in command_result
|
118
|
+
):
|
119
|
+
agent = command_result["switch_agent"]
|
120
|
+
continue
|
121
|
+
|
122
|
+
# Skip further processing if command was handled
|
123
|
+
if command_result:
|
124
|
+
continue
|
125
|
+
|
126
|
+
if user_input.upper() == "STOP":
|
127
|
+
return result
|
128
|
+
if user_input == "":
|
129
|
+
continue
|
130
|
+
|
131
|
+
result = await self.send(agent, user_input)
|
132
|
+
|
133
|
+
# Check if current agent is a chain that should continue with final agent
|
134
|
+
if agent_types.get(agent) == "Chain":
|
135
|
+
proxy = self._agents[agent]
|
136
|
+
if isinstance(proxy, ChainProxy) and proxy._continue_with_final:
|
137
|
+
# Get the last agent in the sequence
|
138
|
+
last_agent = proxy._sequence[-1]
|
139
|
+
# Switch to that agent for the next iteration
|
140
|
+
agent = last_agent
|
141
|
+
|
142
|
+
return result
|
143
|
+
|
144
|
+
def __getattr__(self, name: str) -> BaseAgentProxy:
|
145
|
+
"""Support: agent.researcher"""
|
146
|
+
if name not in self._agents:
|
147
|
+
raise AttributeError(f"No agent named '{name}'")
|
148
|
+
return AgentProxy(self, name)
|
149
|
+
|
150
|
+
def __getitem__(self, name: str) -> BaseAgentProxy:
|
151
|
+
"""Support: agent['researcher']"""
|
152
|
+
if name not in self._agents:
|
153
|
+
raise KeyError(f"No agent named '{name}'")
|
154
|
+
return AgentProxy(self, name)
|
155
|
+
|
156
|
+
async def __call__(
|
157
|
+
self, message: Optional[str] = "", agent_name: Optional[str] = None
|
158
|
+
) -> str:
|
159
|
+
"""Support: agent('message')"""
|
160
|
+
target = agent_name or self._default
|
161
|
+
if not target:
|
162
|
+
raise ValueError("No default agent available")
|
163
|
+
return await self.send(target, message)
|
@@ -0,0 +1,16 @@
|
|
1
|
+
"""
|
2
|
+
Enum definitions for supported agent types.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from enum import Enum
|
6
|
+
|
7
|
+
|
8
|
+
class AgentType(Enum):
|
9
|
+
"""Enumeration of supported agent types."""
|
10
|
+
|
11
|
+
BASIC = "agent"
|
12
|
+
ORCHESTRATOR = "orchestrator"
|
13
|
+
PARALLEL = "parallel"
|
14
|
+
EVALUATOR_OPTIMIZER = "evaluator_optimizer"
|
15
|
+
ROUTER = "router"
|
16
|
+
CHAIN = "chain"
|
@@ -0,0 +1,65 @@
|
|
1
|
+
"""
|
2
|
+
Utility functions for agent operations.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from typing import List, TYPE_CHECKING
|
6
|
+
|
7
|
+
from mcp_agent.event_progress import ProgressAction
|
8
|
+
|
9
|
+
# Handle circular imports
|
10
|
+
if TYPE_CHECKING:
|
11
|
+
from mcp_agent.core.proxies import BaseAgentProxy, LLMAgentProxy
|
12
|
+
from mcp_agent.core.types import AgentOrWorkflow, ProxyDict
|
13
|
+
else:
|
14
|
+
from mcp_agent.core.proxies import BaseAgentProxy, LLMAgentProxy
|
15
|
+
# Define minimal types for runtime
|
16
|
+
AgentOrWorkflow = object # Simple placeholder
|
17
|
+
ProxyDict = dict # Simple placeholder
|
18
|
+
|
19
|
+
|
20
|
+
def unwrap_proxy(proxy: BaseAgentProxy) -> AgentOrWorkflow:
|
21
|
+
"""
|
22
|
+
Unwrap a proxy to get the underlying agent or workflow instance.
|
23
|
+
|
24
|
+
Args:
|
25
|
+
proxy: The proxy object to unwrap
|
26
|
+
|
27
|
+
Returns:
|
28
|
+
The underlying Agent or workflow instance
|
29
|
+
"""
|
30
|
+
if isinstance(proxy, LLMAgentProxy):
|
31
|
+
return proxy._agent
|
32
|
+
return proxy._workflow
|
33
|
+
|
34
|
+
|
35
|
+
def get_agent_instances(
|
36
|
+
agent_names: List[str], active_agents: ProxyDict
|
37
|
+
) -> List[AgentOrWorkflow]:
|
38
|
+
"""
|
39
|
+
Get list of actual agent/workflow instances from a list of names.
|
40
|
+
|
41
|
+
Args:
|
42
|
+
agent_names: List of agent names to look up
|
43
|
+
active_agents: Dictionary of active agent proxies
|
44
|
+
|
45
|
+
Returns:
|
46
|
+
List of unwrapped agent/workflow instances
|
47
|
+
"""
|
48
|
+
return [unwrap_proxy(active_agents[name]) for name in agent_names]
|
49
|
+
|
50
|
+
|
51
|
+
def log_agent_load(app, agent_name: str) -> None:
|
52
|
+
"""
|
53
|
+
Log agent loading event to application logger.
|
54
|
+
|
55
|
+
Args:
|
56
|
+
app: The application instance
|
57
|
+
agent_name: Name of the agent being loaded
|
58
|
+
"""
|
59
|
+
app._logger.info(
|
60
|
+
f"Loaded {agent_name}",
|
61
|
+
data={
|
62
|
+
"progress_action": ProgressAction.LOADED,
|
63
|
+
"agent_name": agent_name,
|
64
|
+
},
|
65
|
+
)
|
@@ -0,0 +1,23 @@
|
|
1
|
+
"""
|
2
|
+
Error handling utilities for agent operations.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from rich import print
|
6
|
+
|
7
|
+
|
8
|
+
def handle_error(e: Exception, error_type: str, suggestion: str = None) -> None:
|
9
|
+
"""
|
10
|
+
Handle errors with consistent formatting and messaging.
|
11
|
+
|
12
|
+
Args:
|
13
|
+
e: The exception that was raised
|
14
|
+
error_type: Type of error to display
|
15
|
+
suggestion: Optional suggestion message to display
|
16
|
+
"""
|
17
|
+
print(f"\n[bold red]{error_type}:")
|
18
|
+
print(getattr(e, "message", str(e)))
|
19
|
+
if hasattr(e, "details") and e.details:
|
20
|
+
print("\nDetails:")
|
21
|
+
print(e.details)
|
22
|
+
if suggestion:
|
23
|
+
print(f"\n{suggestion}")
|
mcp_agent/core/exceptions.py
CHANGED
@@ -56,6 +56,13 @@ class ModelConfigError(FastAgentError):
|
|
56
56
|
super().__init__(message, details)
|
57
57
|
|
58
58
|
|
59
|
+
class CircularDependencyError(FastAgentError):
|
60
|
+
"""Raised when we detect a Circular Dependency in the workflow"""
|
61
|
+
|
62
|
+
def __init__(self, message: str, details: str = ""):
|
63
|
+
super().__init__(message, details)
|
64
|
+
|
65
|
+
|
59
66
|
class PromptExitError(FastAgentError):
|
60
67
|
"""Raised from enhanced_prompt when the user requests hard exits"""
|
61
68
|
|