fast-agent-mcp 0.0.13__tar.gz → 0.0.14__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/PKG-INFO +191 -14
- fast_agent_mcp-0.0.14/README.md +241 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/pyproject.toml +1 -1
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/cli/commands/bootstrap.py +1 -1
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/core/exceptions.py +7 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/core/fastagent.py +219 -27
- fast_agent_mcp-0.0.14/src/mcp_agent/resources/examples/workflows/chaining.py +44 -0
- fast_agent_mcp-0.0.13/README.md +0 -64
- fast_agent_mcp-0.0.13/src/mcp_agent/resources/examples/workflows/chaining.py +0 -34
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/.gitignore +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/LICENSE +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/__init__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/agents/__init__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/agents/agent.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/app.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/cli/__init__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/cli/__main__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/cli/commands/config.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/cli/commands/setup.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/cli/main.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/cli/terminal.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/config.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/console.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/context.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/context_dependent.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/core/__init__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/core/enhanced_prompt.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/core/server_validation.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/eval/__init__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/event_progress.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/executor/__init__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/executor/decorator_registry.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/executor/executor.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/executor/task_registry.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/executor/temporal.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/executor/workflow.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/executor/workflow_signal.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/human_input/__init__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/human_input/handler.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/human_input/types.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/logging/__init__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/logging/events.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/logging/json_serializer.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/logging/listeners.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/logging/logger.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/logging/rich_progress.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/logging/tracing.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/logging/transport.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/mcp/__init__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/mcp/gen_client.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/mcp/mcp_activity.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/mcp/mcp_agent_client_session.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/mcp/mcp_agent_server.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/mcp/mcp_aggregator.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/mcp/mcp_connection_manager.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/mcp/stdio.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/mcp_server_registry.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/progress_display.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/resources/examples/data-analysis/analysis.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/resources/examples/internal/agent.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/resources/examples/internal/job.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/resources/examples/researcher/researcher-eval.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/resources/examples/researcher/researcher.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/resources/examples/workflows/agent_build.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/resources/examples/workflows/evaluator.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/resources/examples/workflows/human_input.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/resources/examples/workflows/orchestrator.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/resources/examples/workflows/parallel.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/resources/examples/workflows/router.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/telemetry/__init__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/telemetry/usage_tracking.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/__init__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/embedding/__init__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/embedding/embedding_base.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/embedding/embedding_cohere.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/embedding/embedding_openai.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/intent_classifier/__init__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/llm/__init__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/llm/augmented_llm.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/llm/augmented_llm_anthropic.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/llm/augmented_llm_openai.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/llm/llm_selector.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/llm/model_factory.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/orchestrator/__init__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/orchestrator/orchestrator.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/orchestrator/orchestrator_models.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/orchestrator/orchestrator_prompts.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/parallel/__init__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/parallel/fan_in.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/parallel/fan_out.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/parallel/parallel_llm.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/router/__init__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/router/router_base.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/router/router_embedding.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/router/router_embedding_cohere.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/router/router_embedding_openai.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/router/router_llm.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/swarm/__init__.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/swarm/swarm.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/swarm/swarm_anthropic.py +0 -0
- {fast_agent_mcp-0.0.13 → fast_agent_mcp-0.0.14}/src/mcp_agent/workflows/swarm/swarm_openai.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: fast-agent-mcp
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.14
|
4
4
|
Summary: Define, Prompt and Test MCP enabled Agents and Workflows
|
5
5
|
Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
|
6
6
|
License: Apache License
|
@@ -247,13 +247,19 @@ Description-Content-Type: text/markdown
|
|
247
247
|
|
248
248
|
## Overview
|
249
249
|
|
250
|
-
**`fast-agent`** lets you
|
250
|
+
**`fast-agent`** lets you build and interact with Agents and Workflows in minutes.
|
251
251
|
|
252
|
-
The simple declarative syntax lets you concentrate on
|
252
|
+
The simple declarative syntax lets you concentrate on composing your Prompts and MCP Servers to [build effective agents](https://www.anthropic.com/research/building-effective-agents).
|
253
253
|
|
254
|
-
|
254
|
+
Evaluate how different models handle Agent and MCP Server calling tasks, then build multi-model workflows using the best provider for each task.
|
255
255
|
|
256
|
-
###
|
256
|
+
### Agent Application Development
|
257
|
+
|
258
|
+
Prompts and configurations that define your Agent Applications are stored in simple files, with minimal boilerplate, enabling simple management and version control.
|
259
|
+
|
260
|
+
Chat with individual Agents and Components before, during and after workflow execution to tune and diagnose your agent application. Simple model selection makes testing Model <-> MCP Server interaction painless.
|
261
|
+
|
262
|
+
## Get started:
|
257
263
|
|
258
264
|
Start by installing the [uv package manager](https://docs.astral.sh/uv/) for Python. Then:
|
259
265
|
|
@@ -265,15 +271,189 @@ uv run agent.py --model=o3-mini.low # specify a model
|
|
265
271
|
fast-agent bootstrap workflow # create "building effective agents" examples
|
266
272
|
```
|
267
273
|
|
268
|
-
Other bootstrap examples include a Researcher (with Evaluator-Optimizer workflow) and Data Analysis (similar to ChatGPT experience), demonstrating MCP Roots support.
|
274
|
+
Other bootstrap examples include a Researcher Agent (with Evaluator-Optimizer workflow) and Data Analysis Agent (similar to the ChatGPT experience), demonstrating MCP Roots support.
|
269
275
|
|
270
276
|
> Windows Users - there are a couple of configuration changes needed for the Filesystem and Docker MCP Servers - necessary changes are detailed within the configuration files.
|
271
277
|
|
272
|
-
|
278
|
+
### Basic Agents
|
279
|
+
|
280
|
+
Defining an agent is as simple as:
|
281
|
+
|
282
|
+
```python
|
283
|
+
@fast.agent(
|
284
|
+
instruction="Given an object, respond only with an estimate of its size."
|
285
|
+
)
|
286
|
+
```
|
287
|
+
|
288
|
+
We can then send messages to the Agent:
|
289
|
+
|
290
|
+
```python
|
291
|
+
async with fast.run() as agent:
|
292
|
+
moon_size = await agent("the moon")
|
293
|
+
print(moon_size)
|
294
|
+
```
|
295
|
+
|
296
|
+
Or start an interactive chat with the Agent:
|
297
|
+
```python
|
298
|
+
async with fast.run() as agent:
|
299
|
+
await agent()
|
300
|
+
```
|
301
|
+
|
302
|
+
Here is the complete `sizer.py` Agent application, with boilerplate code:
|
303
|
+
```python
|
304
|
+
import asyncio
|
305
|
+
from mcp_agent.core.fastagent import FastAgent
|
306
|
+
|
307
|
+
# Create the application
|
308
|
+
fast = FastAgent("Agent Example")
|
309
|
+
|
310
|
+
@fast.agent(
|
311
|
+
instruction="Given an object, respond only with an estimate of its size."
|
312
|
+
)
|
313
|
+
|
314
|
+
async def main():
|
315
|
+
async with fast.run() as agent:
|
316
|
+
await agent()
|
317
|
+
|
318
|
+
if __name__ == "__main__":
|
319
|
+
asyncio.run(main())
|
320
|
+
```
|
321
|
+
|
322
|
+
The Agent can be run with `uv run sizer.py` and with a specific model using the command line option `--model gpt-4o-mini`.
|
323
|
+
|
324
|
+
### Combining Agents and using MCP Servers
|
325
|
+
|
326
|
+
_To generate examples use `fast-agent bootstrap workflow`. This example can be run with `uv run chaining.py`. fast-agent looks for configuration files in the current directory before checking parent directories recursively._
|
327
|
+
|
328
|
+
Agents can be chained to build a workflow:
|
329
|
+
```python
|
330
|
+
@fast.agent(
|
331
|
+
"url_fetcher",
|
332
|
+
instruction="Given a URL, provide a complete and comprehensive summary",
|
333
|
+
servers=["fetch"], # Name of an MCP Server defined in fastagent.config.yaml
|
334
|
+
)
|
335
|
+
@fast.agent(
|
336
|
+
"social_media",
|
337
|
+
instruction="""
|
338
|
+
Write a 280 character social media post for any given text.
|
339
|
+
Respond only with the post, never use hashtags.
|
340
|
+
""",
|
341
|
+
)
|
342
|
+
|
343
|
+
async def main():
|
344
|
+
async with fast.run() as agent:
|
345
|
+
await agent.social_media(
|
346
|
+
await agent.url_fetcher("http://llmindset.co.uk/resources/mcp-hfspace/")
|
347
|
+
)
|
348
|
+
```
|
349
|
+
|
350
|
+
All Agents and Workflows respond to `.send("message")` to send a message and `.prompt()` to begin a chat session.
|
351
|
+
|
352
|
+
## Workflows
|
353
|
+
|
354
|
+
### Chain
|
355
|
+
|
356
|
+
Alternatively, use the `chain` workflow type and the `prompt()` method to capture user input:
|
357
|
+
```python
|
358
|
+
|
359
|
+
@fast.chain(
|
360
|
+
"post_writer",
|
361
|
+
sequence=["url_fetcher","social_media"]
|
362
|
+
)
|
363
|
+
|
364
|
+
# we can them prompt it directly:
|
365
|
+
async with fast.run() as agent:
|
366
|
+
await agent.post_writer.prompt()
|
367
|
+
|
368
|
+
```
|
369
|
+
Chains can be incorporated in other workflows, or contain other workflow elements (including other Chains). You can set an `instruction` to precisely describe it's capabilities to other workflow steps if needed.
|
370
|
+
|
371
|
+
### Parallel
|
372
|
+
|
373
|
+
The Parallel Workflow sends the same message to multiple Agents simultaneously (`fan-out`), then uses the `fan-in` agent to process the combined content.
|
374
|
+
|
375
|
+
```python
|
376
|
+
|
377
|
+
@fast.agent(
|
378
|
+
name="consolidator"
|
379
|
+
instruction="combine the lists, remove duplicates"
|
380
|
+
)
|
381
|
+
|
382
|
+
@fast.parallel(
|
383
|
+
name="ensemble"
|
384
|
+
fan_out=["agent_o3-mini","agent_sonnet37",...]
|
385
|
+
fan_in="consolidator"
|
386
|
+
)
|
387
|
+
|
388
|
+
async with fast.run() as agent:
|
389
|
+
result = agent.ensemble.send("what are the 10 most important aspects of project management")
|
390
|
+
```
|
391
|
+
|
392
|
+
Look at the `parallel.py` workflow example for more details.
|
393
|
+
|
394
|
+
### Evaluator-Optimizer
|
395
|
+
|
396
|
+
Evaluator-Optimizers use 2 agents: one to generate content (the `generator`), and one to judge the content and provide actionable feedback (the `evaluator`). Messages are sent to the generator first, then the pair run in a loop until either the evaluator is satisfied with the quality, or the maximum number of refinements is reached.
|
397
|
+
|
398
|
+
```python
|
399
|
+
@fast.evaluator_optimizer(
|
400
|
+
name="researcher"
|
401
|
+
generator="web_searcher"
|
402
|
+
evaluator="quality_assurance"
|
403
|
+
min_rating="EXCELLENT"
|
404
|
+
max_refinements=3
|
405
|
+
)
|
406
|
+
|
407
|
+
async with fast.run() as agent:
|
408
|
+
await agent.researcher.send("produce a report on how to make the perfect espresso")
|
409
|
+
```
|
410
|
+
|
411
|
+
See the `evaluator.py` workflow example, or `fast-agent bootstrap researcher` for a more complete example.
|
273
412
|
|
274
|
-
|
413
|
+
### Router
|
275
414
|
|
276
|
-
|
415
|
+
Routers use an LLM to assess a message, and route it to the most appropriate Agent direct . The routing prompt is automatically generated by the router.
|
416
|
+
|
417
|
+
```python
|
418
|
+
@fast.router(
|
419
|
+
name="route"
|
420
|
+
agents["agent1","agent2","agent3"]
|
421
|
+
)
|
422
|
+
```
|
423
|
+
|
424
|
+
Look at the `router.py` workflow for an example.
|
425
|
+
|
426
|
+
### Orchestrator
|
427
|
+
|
428
|
+
Given a task, an Orchestrator uses an LLM to generate a plan to divide the task amongst the available agents and aggregate a result. The planning and aggregation prompts are generated by the Orchestrator, which benefits from using more capable models. Plans can either be built once at the beginning (`plantype="full"`) or iteratively (`plantype="iterative"`).
|
429
|
+
|
430
|
+
```python
|
431
|
+
@fast.orchestrator(
|
432
|
+
name="orchestrate"
|
433
|
+
agents=["task1","task2","task3"]
|
434
|
+
)
|
435
|
+
```
|
436
|
+
|
437
|
+
## Agent Features
|
438
|
+
|
439
|
+
```python
|
440
|
+
@fast.agent(
|
441
|
+
name="agent",
|
442
|
+
instructions="instructions",
|
443
|
+
servers=["filesystem"], # list of MCP Servers for the agent, configured in fastagent.config.yaml
|
444
|
+
model="o3-mini.high", # specify a model for the agent
|
445
|
+
use_history=True, # agent can maintain chat history
|
446
|
+
human_input=True, # agent can request human input
|
447
|
+
)
|
448
|
+
```
|
449
|
+
|
450
|
+
### Human Input
|
451
|
+
|
452
|
+
When `human_input` is set to true for an Agent, it is presented with the option to prompt the User for input.
|
453
|
+
|
454
|
+
## Project Notes
|
455
|
+
|
456
|
+
`fast-agent` builds on the [`mcp-agent`](https://github.com/lastmile-ai/mcp-agent) project by Sarmad Qadri.
|
277
457
|
|
278
458
|
### llmindset.co.uk fork:
|
279
459
|
|
@@ -292,10 +472,7 @@ FastAgent lets you interact with Agents during a workflow, enabling "warm-up" an
|
|
292
472
|
- Enhanced Human Input Messaging and Handling
|
293
473
|
- Declarative workflows
|
294
474
|
|
295
|
-
|
296
|
-
|
297
|
-
We recommend using [uv](https://docs.astral.sh/uv/) to manage your Python projects:
|
475
|
+
### Features to add.
|
298
476
|
|
299
|
-
|
477
|
+
- Chat History Clear.
|
300
478
|
|
301
|
-
We welcome any and all kinds of contributions. Please see the [CONTRIBUTING guidelines](./CONTRIBUTING.md) to get started.
|
@@ -0,0 +1,241 @@
|
|
1
|
+
## FastAgent
|
2
|
+
|
3
|
+
<p align="center">
|
4
|
+
<a href="https://pypi.org/project/fast-agent-mcp/"><img src="https://img.shields.io/pypi/v/fast-agent-mcp?color=%2334D058&label=pypi" /></a>
|
5
|
+
<a href="https://github.com/evalstate/fast-agent/issues"><img src="https://img.shields.io/github/issues-raw/evalstate/fast-agent" /></a>
|
6
|
+
<a href="https://lmai.link/discord/mcp-agent"><img src="https://shields.io/discord/1089284610329952357" alt="discord" /></a>
|
7
|
+
<img alt="Pepy Total Downloads" src="https://img.shields.io/pepy/dt/fast-agent-mcp?label=pypi%20%7C%20downloads"/>
|
8
|
+
<a href="https://github.com/evalstate/fast-agent-mcp/blob/main/LICENSE"><img src="https://img.shields.io/pypi/l/fast-agent-mcp" /></a>
|
9
|
+
</p>
|
10
|
+
|
11
|
+
## Overview
|
12
|
+
|
13
|
+
**`fast-agent`** lets you build and interact with Agents and Workflows in minutes.
|
14
|
+
|
15
|
+
The simple declarative syntax lets you concentrate on composing your Prompts and MCP Servers to [build effective agents](https://www.anthropic.com/research/building-effective-agents).
|
16
|
+
|
17
|
+
Evaluate how different models handle Agent and MCP Server calling tasks, then build multi-model workflows using the best provider for each task.
|
18
|
+
|
19
|
+
### Agent Application Development
|
20
|
+
|
21
|
+
Prompts and configurations that define your Agent Applications are stored in simple files, with minimal boilerplate, enabling simple management and version control.
|
22
|
+
|
23
|
+
Chat with individual Agents and Components before, during and after workflow execution to tune and diagnose your agent application. Simple model selection makes testing Model <-> MCP Server interaction painless.
|
24
|
+
|
25
|
+
## Get started:
|
26
|
+
|
27
|
+
Start by installing the [uv package manager](https://docs.astral.sh/uv/) for Python. Then:
|
28
|
+
|
29
|
+
```bash
|
30
|
+
uv pip install fast-agent-mcp # install fast-agent
|
31
|
+
fast-agent setup # create an example agent and config files
|
32
|
+
uv run agent.py # run your first agent
|
33
|
+
uv run agent.py --model=o3-mini.low # specify a model
|
34
|
+
fast-agent bootstrap workflow # create "building effective agents" examples
|
35
|
+
```
|
36
|
+
|
37
|
+
Other bootstrap examples include a Researcher Agent (with Evaluator-Optimizer workflow) and Data Analysis Agent (similar to the ChatGPT experience), demonstrating MCP Roots support.
|
38
|
+
|
39
|
+
> Windows Users - there are a couple of configuration changes needed for the Filesystem and Docker MCP Servers - necessary changes are detailed within the configuration files.
|
40
|
+
|
41
|
+
### Basic Agents
|
42
|
+
|
43
|
+
Defining an agent is as simple as:
|
44
|
+
|
45
|
+
```python
|
46
|
+
@fast.agent(
|
47
|
+
instruction="Given an object, respond only with an estimate of its size."
|
48
|
+
)
|
49
|
+
```
|
50
|
+
|
51
|
+
We can then send messages to the Agent:
|
52
|
+
|
53
|
+
```python
|
54
|
+
async with fast.run() as agent:
|
55
|
+
moon_size = await agent("the moon")
|
56
|
+
print(moon_size)
|
57
|
+
```
|
58
|
+
|
59
|
+
Or start an interactive chat with the Agent:
|
60
|
+
```python
|
61
|
+
async with fast.run() as agent:
|
62
|
+
await agent()
|
63
|
+
```
|
64
|
+
|
65
|
+
Here is the complete `sizer.py` Agent application, with boilerplate code:
|
66
|
+
```python
|
67
|
+
import asyncio
|
68
|
+
from mcp_agent.core.fastagent import FastAgent
|
69
|
+
|
70
|
+
# Create the application
|
71
|
+
fast = FastAgent("Agent Example")
|
72
|
+
|
73
|
+
@fast.agent(
|
74
|
+
instruction="Given an object, respond only with an estimate of its size."
|
75
|
+
)
|
76
|
+
|
77
|
+
async def main():
|
78
|
+
async with fast.run() as agent:
|
79
|
+
await agent()
|
80
|
+
|
81
|
+
if __name__ == "__main__":
|
82
|
+
asyncio.run(main())
|
83
|
+
```
|
84
|
+
|
85
|
+
The Agent can be run with `uv run sizer.py` and with a specific model using the command line option `--model gpt-4o-mini`.
|
86
|
+
|
87
|
+
### Combining Agents and using MCP Servers
|
88
|
+
|
89
|
+
_To generate examples use `fast-agent bootstrap workflow`. This example can be run with `uv run chaining.py`. fast-agent looks for configuration files in the current directory before checking parent directories recursively._
|
90
|
+
|
91
|
+
Agents can be chained to build a workflow:
|
92
|
+
```python
|
93
|
+
@fast.agent(
|
94
|
+
"url_fetcher",
|
95
|
+
instruction="Given a URL, provide a complete and comprehensive summary",
|
96
|
+
servers=["fetch"], # Name of an MCP Server defined in fastagent.config.yaml
|
97
|
+
)
|
98
|
+
@fast.agent(
|
99
|
+
"social_media",
|
100
|
+
instruction="""
|
101
|
+
Write a 280 character social media post for any given text.
|
102
|
+
Respond only with the post, never use hashtags.
|
103
|
+
""",
|
104
|
+
)
|
105
|
+
|
106
|
+
async def main():
|
107
|
+
async with fast.run() as agent:
|
108
|
+
await agent.social_media(
|
109
|
+
await agent.url_fetcher("http://llmindset.co.uk/resources/mcp-hfspace/")
|
110
|
+
)
|
111
|
+
```
|
112
|
+
|
113
|
+
All Agents and Workflows respond to `.send("message")` to send a message and `.prompt()` to begin a chat session.
|
114
|
+
|
115
|
+
## Workflows
|
116
|
+
|
117
|
+
### Chain
|
118
|
+
|
119
|
+
Alternatively, use the `chain` workflow type and the `prompt()` method to capture user input:
|
120
|
+
```python
|
121
|
+
|
122
|
+
@fast.chain(
|
123
|
+
"post_writer",
|
124
|
+
sequence=["url_fetcher","social_media"]
|
125
|
+
)
|
126
|
+
|
127
|
+
# we can them prompt it directly:
|
128
|
+
async with fast.run() as agent:
|
129
|
+
await agent.post_writer.prompt()
|
130
|
+
|
131
|
+
```
|
132
|
+
Chains can be incorporated in other workflows, or contain other workflow elements (including other Chains). You can set an `instruction` to precisely describe it's capabilities to other workflow steps if needed.
|
133
|
+
|
134
|
+
### Parallel
|
135
|
+
|
136
|
+
The Parallel Workflow sends the same message to multiple Agents simultaneously (`fan-out`), then uses the `fan-in` agent to process the combined content.
|
137
|
+
|
138
|
+
```python
|
139
|
+
|
140
|
+
@fast.agent(
|
141
|
+
name="consolidator"
|
142
|
+
instruction="combine the lists, remove duplicates"
|
143
|
+
)
|
144
|
+
|
145
|
+
@fast.parallel(
|
146
|
+
name="ensemble"
|
147
|
+
fan_out=["agent_o3-mini","agent_sonnet37",...]
|
148
|
+
fan_in="consolidator"
|
149
|
+
)
|
150
|
+
|
151
|
+
async with fast.run() as agent:
|
152
|
+
result = agent.ensemble.send("what are the 10 most important aspects of project management")
|
153
|
+
```
|
154
|
+
|
155
|
+
Look at the `parallel.py` workflow example for more details.
|
156
|
+
|
157
|
+
### Evaluator-Optimizer
|
158
|
+
|
159
|
+
Evaluator-Optimizers use 2 agents: one to generate content (the `generator`), and one to judge the content and provide actionable feedback (the `evaluator`). Messages are sent to the generator first, then the pair run in a loop until either the evaluator is satisfied with the quality, or the maximum number of refinements is reached.
|
160
|
+
|
161
|
+
```python
|
162
|
+
@fast.evaluator_optimizer(
|
163
|
+
name="researcher"
|
164
|
+
generator="web_searcher"
|
165
|
+
evaluator="quality_assurance"
|
166
|
+
min_rating="EXCELLENT"
|
167
|
+
max_refinements=3
|
168
|
+
)
|
169
|
+
|
170
|
+
async with fast.run() as agent:
|
171
|
+
await agent.researcher.send("produce a report on how to make the perfect espresso")
|
172
|
+
```
|
173
|
+
|
174
|
+
See the `evaluator.py` workflow example, or `fast-agent bootstrap researcher` for a more complete example.
|
175
|
+
|
176
|
+
### Router
|
177
|
+
|
178
|
+
Routers use an LLM to assess a message, and route it to the most appropriate Agent direct . The routing prompt is automatically generated by the router.
|
179
|
+
|
180
|
+
```python
|
181
|
+
@fast.router(
|
182
|
+
name="route"
|
183
|
+
agents["agent1","agent2","agent3"]
|
184
|
+
)
|
185
|
+
```
|
186
|
+
|
187
|
+
Look at the `router.py` workflow for an example.
|
188
|
+
|
189
|
+
### Orchestrator
|
190
|
+
|
191
|
+
Given a task, an Orchestrator uses an LLM to generate a plan to divide the task amongst the available agents and aggregate a result. The planning and aggregation prompts are generated by the Orchestrator, which benefits from using more capable models. Plans can either be built once at the beginning (`plantype="full"`) or iteratively (`plantype="iterative"`).
|
192
|
+
|
193
|
+
```python
|
194
|
+
@fast.orchestrator(
|
195
|
+
name="orchestrate"
|
196
|
+
agents=["task1","task2","task3"]
|
197
|
+
)
|
198
|
+
```
|
199
|
+
|
200
|
+
## Agent Features
|
201
|
+
|
202
|
+
```python
|
203
|
+
@fast.agent(
|
204
|
+
name="agent",
|
205
|
+
instructions="instructions",
|
206
|
+
servers=["filesystem"], # list of MCP Servers for the agent, configured in fastagent.config.yaml
|
207
|
+
model="o3-mini.high", # specify a model for the agent
|
208
|
+
use_history=True, # agent can maintain chat history
|
209
|
+
human_input=True, # agent can request human input
|
210
|
+
)
|
211
|
+
```
|
212
|
+
|
213
|
+
### Human Input
|
214
|
+
|
215
|
+
When `human_input` is set to true for an Agent, it is presented with the option to prompt the User for input.
|
216
|
+
|
217
|
+
## Project Notes
|
218
|
+
|
219
|
+
`fast-agent` builds on the [`mcp-agent`](https://github.com/lastmile-ai/mcp-agent) project by Sarmad Qadri.
|
220
|
+
|
221
|
+
### llmindset.co.uk fork:
|
222
|
+
|
223
|
+
- "FastAgent" style prototyping, with per-agent models
|
224
|
+
- Api keys through Environment Variables
|
225
|
+
- Warm-up / Post-Workflow Agent Interactions
|
226
|
+
- Quick Setup
|
227
|
+
- Interactive Prompt Mode
|
228
|
+
- Simple Model Selection with aliases
|
229
|
+
- User/Assistant and Tool Call message display
|
230
|
+
- MCP Sever Environment Variable support
|
231
|
+
- MCP Roots support
|
232
|
+
- Comprehensive Progress display
|
233
|
+
- JSONL file logging with secret revokation
|
234
|
+
- OpenAI o1/o3-mini support with reasoning level
|
235
|
+
- Enhanced Human Input Messaging and Handling
|
236
|
+
- Declarative workflows
|
237
|
+
|
238
|
+
### Features to add.
|
239
|
+
|
240
|
+
- Chat History Clear.
|
241
|
+
|
@@ -56,6 +56,13 @@ class ModelConfigError(FastAgentError):
|
|
56
56
|
super().__init__(message, details)
|
57
57
|
|
58
58
|
|
59
|
+
class CircularDependencyError(FastAgentError):
|
60
|
+
"""Raised when we detect a Circular Dependency in the workflow"""
|
61
|
+
|
62
|
+
def __init__(self, message: str, details: str = ""):
|
63
|
+
super().__init__(message, details)
|
64
|
+
|
65
|
+
|
59
66
|
class PromptExitError(FastAgentError):
|
60
67
|
"""Raised from enhanced_prompt when the user requests hard exits"""
|
61
68
|
|