fast-agent-mcp 0.1.11__py3-none-any.whl → 0.1.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/METADATA +1 -1
- fast_agent_mcp-0.1.13.dist-info/RECORD +164 -0
- mcp_agent/agents/agent.py +37 -102
- mcp_agent/app.py +16 -27
- mcp_agent/cli/commands/bootstrap.py +22 -52
- mcp_agent/cli/commands/config.py +4 -4
- mcp_agent/cli/commands/setup.py +11 -26
- mcp_agent/cli/main.py +6 -9
- mcp_agent/cli/terminal.py +2 -2
- mcp_agent/config.py +1 -5
- mcp_agent/context.py +13 -26
- mcp_agent/context_dependent.py +3 -7
- mcp_agent/core/agent_app.py +46 -122
- mcp_agent/core/agent_types.py +29 -2
- mcp_agent/core/agent_utils.py +3 -5
- mcp_agent/core/decorators.py +6 -14
- mcp_agent/core/enhanced_prompt.py +25 -52
- mcp_agent/core/error_handling.py +1 -1
- mcp_agent/core/exceptions.py +8 -8
- mcp_agent/core/factory.py +30 -72
- mcp_agent/core/fastagent.py +48 -88
- mcp_agent/core/mcp_content.py +10 -19
- mcp_agent/core/prompt.py +8 -15
- mcp_agent/core/proxies.py +34 -25
- mcp_agent/core/request_params.py +46 -0
- mcp_agent/core/types.py +6 -6
- mcp_agent/core/validation.py +16 -16
- mcp_agent/executor/decorator_registry.py +11 -23
- mcp_agent/executor/executor.py +8 -17
- mcp_agent/executor/task_registry.py +2 -4
- mcp_agent/executor/temporal.py +28 -74
- mcp_agent/executor/workflow.py +3 -5
- mcp_agent/executor/workflow_signal.py +17 -29
- mcp_agent/human_input/handler.py +4 -9
- mcp_agent/human_input/types.py +2 -3
- mcp_agent/logging/events.py +1 -5
- mcp_agent/logging/json_serializer.py +7 -6
- mcp_agent/logging/listeners.py +20 -23
- mcp_agent/logging/logger.py +15 -17
- mcp_agent/logging/rich_progress.py +10 -8
- mcp_agent/logging/tracing.py +4 -6
- mcp_agent/logging/transport.py +24 -24
- mcp_agent/mcp/gen_client.py +4 -12
- mcp_agent/mcp/interfaces.py +107 -88
- mcp_agent/mcp/mcp_agent_client_session.py +11 -19
- mcp_agent/mcp/mcp_agent_server.py +8 -10
- mcp_agent/mcp/mcp_aggregator.py +49 -122
- mcp_agent/mcp/mcp_connection_manager.py +16 -37
- mcp_agent/mcp/prompt_message_multipart.py +12 -18
- mcp_agent/mcp/prompt_serialization.py +13 -38
- mcp_agent/mcp/prompts/prompt_load.py +99 -0
- mcp_agent/mcp/prompts/prompt_server.py +21 -128
- mcp_agent/mcp/prompts/prompt_template.py +20 -42
- mcp_agent/mcp/resource_utils.py +8 -17
- mcp_agent/mcp/sampling.py +62 -64
- mcp_agent/mcp/stdio.py +11 -8
- mcp_agent/mcp_server/__init__.py +1 -1
- mcp_agent/mcp_server/agent_server.py +10 -17
- mcp_agent/mcp_server_registry.py +13 -35
- mcp_agent/resources/examples/data-analysis/analysis-campaign.py +1 -1
- mcp_agent/resources/examples/data-analysis/analysis.py +1 -1
- mcp_agent/resources/examples/data-analysis/slides.py +110 -0
- mcp_agent/resources/examples/internal/agent.py +2 -1
- mcp_agent/resources/examples/internal/job.py +2 -1
- mcp_agent/resources/examples/internal/prompt_category.py +1 -1
- mcp_agent/resources/examples/internal/prompt_sizing.py +3 -5
- mcp_agent/resources/examples/internal/sizer.py +2 -1
- mcp_agent/resources/examples/internal/social.py +2 -1
- mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +1 -1
- mcp_agent/resources/examples/prompting/__init__.py +1 -1
- mcp_agent/resources/examples/prompting/agent.py +2 -1
- mcp_agent/resources/examples/prompting/image_server.py +5 -11
- mcp_agent/resources/examples/researcher/researcher-eval.py +1 -1
- mcp_agent/resources/examples/researcher/researcher-imp.py +3 -4
- mcp_agent/resources/examples/researcher/researcher.py +2 -1
- mcp_agent/resources/examples/workflows/agent_build.py +2 -1
- mcp_agent/resources/examples/workflows/chaining.py +2 -1
- mcp_agent/resources/examples/workflows/evaluator.py +2 -1
- mcp_agent/resources/examples/workflows/human_input.py +2 -1
- mcp_agent/resources/examples/workflows/orchestrator.py +2 -1
- mcp_agent/resources/examples/workflows/parallel.py +2 -1
- mcp_agent/resources/examples/workflows/router.py +2 -1
- mcp_agent/resources/examples/workflows/sse.py +1 -1
- mcp_agent/telemetry/usage_tracking.py +2 -1
- mcp_agent/ui/console_display.py +17 -41
- mcp_agent/workflows/embedding/embedding_base.py +1 -4
- mcp_agent/workflows/embedding/embedding_cohere.py +2 -2
- mcp_agent/workflows/embedding/embedding_openai.py +4 -13
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +23 -57
- mcp_agent/workflows/intent_classifier/intent_classifier_base.py +5 -8
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +7 -11
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +4 -8
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +4 -8
- mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +11 -22
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +3 -3
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +4 -6
- mcp_agent/workflows/llm/anthropic_utils.py +8 -29
- mcp_agent/workflows/llm/augmented_llm.py +94 -332
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +43 -76
- mcp_agent/workflows/llm/augmented_llm_openai.py +46 -100
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +42 -20
- mcp_agent/workflows/llm/augmented_llm_playback.py +8 -6
- mcp_agent/workflows/llm/memory.py +103 -0
- mcp_agent/workflows/llm/model_factory.py +9 -21
- mcp_agent/workflows/llm/openai_utils.py +1 -1
- mcp_agent/workflows/llm/prompt_utils.py +39 -27
- mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +246 -184
- mcp_agent/workflows/llm/providers/multipart_converter_openai.py +212 -202
- mcp_agent/workflows/llm/providers/openai_multipart.py +19 -61
- mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +11 -212
- mcp_agent/workflows/llm/providers/sampling_converter_openai.py +13 -215
- mcp_agent/workflows/llm/sampling_converter.py +117 -0
- mcp_agent/workflows/llm/sampling_format_converter.py +12 -29
- mcp_agent/workflows/orchestrator/orchestrator.py +24 -67
- mcp_agent/workflows/orchestrator/orchestrator_models.py +14 -40
- mcp_agent/workflows/parallel/fan_in.py +17 -47
- mcp_agent/workflows/parallel/fan_out.py +6 -12
- mcp_agent/workflows/parallel/parallel_llm.py +9 -26
- mcp_agent/workflows/router/router_base.py +29 -59
- mcp_agent/workflows/router/router_embedding.py +11 -25
- mcp_agent/workflows/router/router_embedding_cohere.py +2 -2
- mcp_agent/workflows/router/router_embedding_openai.py +2 -2
- mcp_agent/workflows/router/router_llm.py +12 -28
- mcp_agent/workflows/swarm/swarm.py +20 -48
- mcp_agent/workflows/swarm/swarm_anthropic.py +2 -2
- mcp_agent/workflows/swarm/swarm_openai.py +2 -2
- fast_agent_mcp-0.1.11.dist-info/RECORD +0 -160
- mcp_agent/workflows/llm/llm_selector.py +0 -345
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/licenses/LICENSE +0 -0
mcp_agent/core/fastagent.py
CHANGED
@@ -3,61 +3,59 @@ Decorator-based interface for MCP Agent applications.
|
|
3
3
|
Provides a simplified way to create and manage agents using decorators.
|
4
4
|
"""
|
5
5
|
|
6
|
+
import argparse
|
6
7
|
import asyncio
|
8
|
+
from contextlib import asynccontextmanager
|
9
|
+
from functools import partial
|
7
10
|
from typing import (
|
8
|
-
|
11
|
+
Any,
|
9
12
|
Dict,
|
13
|
+
Optional,
|
10
14
|
TypeVar,
|
11
|
-
Any,
|
12
15
|
)
|
16
|
+
|
13
17
|
import yaml
|
14
|
-
|
15
|
-
|
16
|
-
|
18
|
+
|
19
|
+
# TODO -- reinstate once Windows&Python 3.13 platform issues are fixed
|
20
|
+
# import readline # noqa: F401
|
21
|
+
from rich import print
|
17
22
|
|
18
23
|
from mcp_agent.app import MCPApp
|
19
24
|
from mcp_agent.config import Settings
|
20
|
-
|
21
25
|
from mcp_agent.core.agent_app import AgentApp
|
22
26
|
from mcp_agent.core.agent_types import AgentType
|
27
|
+
from mcp_agent.core.decorators import (
|
28
|
+
_create_decorator,
|
29
|
+
agent,
|
30
|
+
chain,
|
31
|
+
evaluator_optimizer,
|
32
|
+
orchestrator,
|
33
|
+
parallel,
|
34
|
+
passthrough,
|
35
|
+
router,
|
36
|
+
)
|
23
37
|
from mcp_agent.core.error_handling import handle_error
|
24
|
-
from mcp_agent.core.proxies import LLMAgentProxy
|
25
|
-
from mcp_agent.core.types import ProxyDict
|
26
38
|
from mcp_agent.core.exceptions import (
|
27
39
|
AgentConfigError,
|
28
40
|
CircularDependencyError,
|
29
41
|
ModelConfigError,
|
30
42
|
PromptExitError,
|
31
|
-
ServerConfigError,
|
32
43
|
ProviderKeyError,
|
44
|
+
ServerConfigError,
|
33
45
|
ServerInitializationError,
|
34
46
|
)
|
35
|
-
from mcp_agent.core.
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
evaluator_optimizer,
|
41
|
-
router,
|
42
|
-
chain,
|
43
|
-
passthrough,
|
47
|
+
from mcp_agent.core.factory import (
|
48
|
+
create_agents_by_type,
|
49
|
+
create_agents_in_dependency_order,
|
50
|
+
create_basic_agents,
|
51
|
+
get_model_factory,
|
44
52
|
)
|
53
|
+
from mcp_agent.core.proxies import LLMAgentProxy
|
54
|
+
from mcp_agent.core.types import ProxyDict
|
45
55
|
from mcp_agent.core.validation import (
|
46
56
|
validate_server_references,
|
47
57
|
validate_workflow_references,
|
48
58
|
)
|
49
|
-
from mcp_agent.core.factory import (
|
50
|
-
get_model_factory,
|
51
|
-
create_basic_agents,
|
52
|
-
create_agents_in_dependency_order,
|
53
|
-
create_agents_by_type,
|
54
|
-
)
|
55
|
-
|
56
|
-
# TODO -- reinstate once Windows&Python 3.13 platform issues are fixed
|
57
|
-
# import readline # noqa: F401
|
58
|
-
|
59
|
-
from rich import print
|
60
|
-
|
61
59
|
from mcp_agent.mcp_server import AgentMCPServer
|
62
60
|
|
63
61
|
T = TypeVar("T") # For the wrapper classes
|
@@ -74,7 +72,7 @@ class FastAgent:
|
|
74
72
|
name: str,
|
75
73
|
config_path: Optional[str] = None,
|
76
74
|
ignore_unknown_args: bool = False,
|
77
|
-
):
|
75
|
+
) -> None:
|
78
76
|
"""
|
79
77
|
Initialize the decorator interface.
|
80
78
|
|
@@ -218,9 +216,7 @@ class FastAgent:
|
|
218
216
|
model_factory_func = partial(self._get_model_factory)
|
219
217
|
return await create_basic_agents(agent_app, self.agents, model_factory_func)
|
220
218
|
|
221
|
-
async def _create_orchestrators(
|
222
|
-
self, agent_app: MCPApp, active_agents: ProxyDict
|
223
|
-
) -> ProxyDict:
|
219
|
+
async def _create_orchestrators(self, agent_app: MCPApp, active_agents: ProxyDict) -> ProxyDict:
|
224
220
|
"""
|
225
221
|
Create orchestrator agents.
|
226
222
|
|
@@ -231,13 +227,9 @@ class FastAgent:
|
|
231
227
|
Returns:
|
232
228
|
Dictionary of initialized orchestrator agents wrapped in appropriate proxies
|
233
229
|
"""
|
234
|
-
return await self._create_agents_by_type(
|
235
|
-
agent_app, AgentType.ORCHESTRATOR, active_agents
|
236
|
-
)
|
230
|
+
return await self._create_agents_by_type(agent_app, AgentType.ORCHESTRATOR, active_agents)
|
237
231
|
|
238
|
-
async def _create_evaluator_optimizers(
|
239
|
-
self, agent_app: MCPApp, active_agents: ProxyDict
|
240
|
-
) -> ProxyDict:
|
232
|
+
async def _create_evaluator_optimizers(self, agent_app: MCPApp, active_agents: ProxyDict) -> ProxyDict:
|
241
233
|
"""
|
242
234
|
Create evaluator-optimizer workflows.
|
243
235
|
|
@@ -248,13 +240,9 @@ class FastAgent:
|
|
248
240
|
Returns:
|
249
241
|
Dictionary of initialized evaluator-optimizer workflows
|
250
242
|
"""
|
251
|
-
return await self._create_agents_by_type(
|
252
|
-
agent_app, AgentType.EVALUATOR_OPTIMIZER, active_agents
|
253
|
-
)
|
243
|
+
return await self._create_agents_by_type(agent_app, AgentType.EVALUATOR_OPTIMIZER, active_agents)
|
254
244
|
|
255
|
-
async def _create_parallel_agents(
|
256
|
-
self, agent_app: MCPApp, active_agents: ProxyDict
|
257
|
-
) -> ProxyDict:
|
245
|
+
async def _create_parallel_agents(self, agent_app: MCPApp, active_agents: ProxyDict) -> ProxyDict:
|
258
246
|
"""
|
259
247
|
Create parallel execution agents in dependency order.
|
260
248
|
|
@@ -274,9 +262,7 @@ class FastAgent:
|
|
274
262
|
model_factory_func,
|
275
263
|
)
|
276
264
|
|
277
|
-
async def _create_agents_in_dependency_order(
|
278
|
-
self, agent_app: MCPApp, active_agents: ProxyDict, agent_type: AgentType
|
279
|
-
) -> ProxyDict:
|
265
|
+
async def _create_agents_in_dependency_order(self, agent_app: MCPApp, active_agents: ProxyDict, agent_type: AgentType) -> ProxyDict:
|
280
266
|
"""
|
281
267
|
Create agents in dependency order to avoid circular references.
|
282
268
|
Works for both Parallel and Chain workflows.
|
@@ -290,13 +276,9 @@ class FastAgent:
|
|
290
276
|
Dictionary of initialized agents
|
291
277
|
"""
|
292
278
|
model_factory_func = partial(self._get_model_factory)
|
293
|
-
return await create_agents_in_dependency_order(
|
294
|
-
agent_app, self.agents, active_agents, agent_type, model_factory_func
|
295
|
-
)
|
279
|
+
return await create_agents_in_dependency_order(agent_app, self.agents, active_agents, agent_type, model_factory_func)
|
296
280
|
|
297
|
-
async def _create_routers(
|
298
|
-
self, agent_app: MCPApp, active_agents: ProxyDict
|
299
|
-
) -> ProxyDict:
|
281
|
+
async def _create_routers(self, agent_app: MCPApp, active_agents: ProxyDict) -> ProxyDict:
|
300
282
|
"""
|
301
283
|
Create router agents.
|
302
284
|
|
@@ -307,9 +289,7 @@ class FastAgent:
|
|
307
289
|
Returns:
|
308
290
|
Dictionary of initialized router agents
|
309
291
|
"""
|
310
|
-
return await self._create_agents_by_type(
|
311
|
-
agent_app, AgentType.ROUTER, active_agents
|
312
|
-
)
|
292
|
+
return await self._create_agents_by_type(agent_app, AgentType.ROUTER, active_agents)
|
313
293
|
|
314
294
|
@asynccontextmanager
|
315
295
|
async def run(self):
|
@@ -327,11 +307,7 @@ class FastAgent:
|
|
327
307
|
try:
|
328
308
|
async with self.app.run() as agent_app:
|
329
309
|
# Apply quiet mode directly to the context's config if needed
|
330
|
-
if (
|
331
|
-
quiet_mode
|
332
|
-
and hasattr(agent_app.context, "config")
|
333
|
-
and hasattr(agent_app.context.config, "logger")
|
334
|
-
):
|
310
|
+
if quiet_mode and hasattr(agent_app.context, "config") and hasattr(agent_app.context.config, "logger"):
|
335
311
|
# Apply after initialization but before agents are created
|
336
312
|
agent_app.context.config.logger.progress_display = False
|
337
313
|
agent_app.context.config.logger.show_chat = False
|
@@ -351,9 +327,7 @@ class FastAgent:
|
|
351
327
|
active_agents = await self._create_basic_agents(agent_app)
|
352
328
|
|
353
329
|
# Create parallel agents next as they might be dependencies
|
354
|
-
parallel_agents = await self._create_parallel_agents(
|
355
|
-
agent_app, active_agents
|
356
|
-
)
|
330
|
+
parallel_agents = await self._create_parallel_agents(agent_app, active_agents)
|
357
331
|
active_agents.update(parallel_agents)
|
358
332
|
|
359
333
|
# Create routers next
|
@@ -361,21 +335,15 @@ class FastAgent:
|
|
361
335
|
active_agents.update(routers)
|
362
336
|
|
363
337
|
# Create chains next - MOVED UP because evaluator-optimizers might depend on chains
|
364
|
-
chains = await self._create_agents_in_dependency_order(
|
365
|
-
agent_app, active_agents, AgentType.CHAIN
|
366
|
-
)
|
338
|
+
chains = await self._create_agents_in_dependency_order(agent_app, active_agents, AgentType.CHAIN)
|
367
339
|
active_agents.update(chains)
|
368
340
|
|
369
341
|
# Now create evaluator-optimizers AFTER chains are available
|
370
|
-
evaluator_optimizers = await self._create_evaluator_optimizers(
|
371
|
-
agent_app, active_agents
|
372
|
-
)
|
342
|
+
evaluator_optimizers = await self._create_evaluator_optimizers(agent_app, active_agents)
|
373
343
|
active_agents.update(evaluator_optimizers)
|
374
344
|
|
375
345
|
# Create orchestrators last as they might depend on any other agent type
|
376
|
-
orchestrators = await self._create_orchestrators(
|
377
|
-
agent_app, active_agents
|
378
|
-
)
|
346
|
+
orchestrators = await self._create_orchestrators(agent_app, active_agents)
|
379
347
|
|
380
348
|
# Add orchestrators to active_agents (other types were already added)
|
381
349
|
active_agents.update(orchestrators)
|
@@ -393,9 +361,7 @@ class FastAgent:
|
|
393
361
|
|
394
362
|
if agent_name not in active_agents:
|
395
363
|
available_agents = ", ".join(active_agents.keys())
|
396
|
-
print(
|
397
|
-
f"\n\nError: Agent '{agent_name}' not found. Available agents: {available_agents}"
|
398
|
-
)
|
364
|
+
print(f"\n\nError: Agent '{agent_name}' not found. Available agents: {available_agents}")
|
399
365
|
raise SystemExit(1)
|
400
366
|
|
401
367
|
try:
|
@@ -408,9 +374,7 @@ class FastAgent:
|
|
408
374
|
|
409
375
|
raise SystemExit(0)
|
410
376
|
except Exception as e:
|
411
|
-
print(
|
412
|
-
f"\n\nError sending message to agent '{agent_name}': {str(e)}"
|
413
|
-
)
|
377
|
+
print(f"\n\nError sending message to agent '{agent_name}': {str(e)}")
|
414
378
|
raise SystemExit(1)
|
415
379
|
|
416
380
|
yield wrapper
|
@@ -488,9 +452,7 @@ class FastAgent:
|
|
488
452
|
print(f"DEBUG {e.message}")
|
489
453
|
pass # Ignore cleanup errors
|
490
454
|
|
491
|
-
def _handle_error(
|
492
|
-
self, e: Exception, error_type: str, suggestion: str = None
|
493
|
-
) -> None:
|
455
|
+
def _handle_error(self, e: Exception, error_type: str, suggestion: str = None) -> None:
|
494
456
|
"""
|
495
457
|
Handle errors with consistent formatting and messaging.
|
496
458
|
|
@@ -535,7 +497,7 @@ class FastAgent:
|
|
535
497
|
port: int = 8000,
|
536
498
|
server_name: str = None,
|
537
499
|
server_description: str = None,
|
538
|
-
):
|
500
|
+
) -> None:
|
539
501
|
"""
|
540
502
|
Run the FastAgent application and expose agents through an MCP server.
|
541
503
|
|
@@ -555,9 +517,7 @@ class FastAgent:
|
|
555
517
|
)
|
556
518
|
|
557
519
|
# Run the MCP server in a separate task
|
558
|
-
server_task = asyncio.create_task(
|
559
|
-
mcp_server.run_async(transport=transport, host=host, port=port)
|
560
|
-
)
|
520
|
+
server_task = asyncio.create_task(mcp_server.run_async(transport=transport, host=host, port=port))
|
561
521
|
|
562
522
|
try:
|
563
523
|
# Wait for the server task to complete (or be cancelled)
|
mcp_agent/core/mcp_content.py
CHANGED
@@ -7,14 +7,14 @@ EmbeddedResource, and other MCP content types with minimal boilerplate.
|
|
7
7
|
|
8
8
|
import base64
|
9
9
|
from pathlib import Path
|
10
|
-
from typing import
|
10
|
+
from typing import Any, List, Literal, Optional, Union
|
11
11
|
|
12
12
|
from mcp.types import (
|
13
|
-
|
14
|
-
ImageContent,
|
13
|
+
BlobResourceContents,
|
15
14
|
EmbeddedResource,
|
15
|
+
ImageContent,
|
16
|
+
TextContent,
|
16
17
|
TextResourceContents,
|
17
|
-
BlobResourceContents,
|
18
18
|
)
|
19
19
|
|
20
20
|
from mcp_agent.mcp.mime_utils import (
|
@@ -86,9 +86,7 @@ def MCPImage(
|
|
86
86
|
|
87
87
|
return {
|
88
88
|
"role": role,
|
89
|
-
"content": ImageContent(
|
90
|
-
type="image", data=b64_data, mimeType=mime_type, annotations=annotations
|
91
|
-
),
|
89
|
+
"content": ImageContent(type="image", data=b64_data, mimeType=mime_type, annotations=annotations),
|
92
90
|
}
|
93
91
|
|
94
92
|
|
@@ -134,27 +132,20 @@ def MCPFile(
|
|
134
132
|
# Fallback to binary if text read fails
|
135
133
|
binary_data = path.read_bytes()
|
136
134
|
b64_data = base64.b64encode(binary_data).decode("ascii")
|
137
|
-
resource = BlobResourceContents(
|
138
|
-
uri=uri, blob=b64_data, mimeType=mime_type or "application/octet-stream"
|
139
|
-
)
|
135
|
+
resource = BlobResourceContents(uri=uri, blob=b64_data, mimeType=mime_type or "application/octet-stream")
|
140
136
|
|
141
137
|
return {
|
142
138
|
"role": role,
|
143
|
-
"content": EmbeddedResource(
|
144
|
-
type="resource", resource=resource, annotations=annotations
|
145
|
-
),
|
139
|
+
"content": EmbeddedResource(type="resource", resource=resource, annotations=annotations),
|
146
140
|
}
|
147
141
|
|
148
142
|
|
149
|
-
|
150
|
-
def MCPPrompt(
|
151
|
-
*content_items, role: Literal["user", "assistant"] = "user"
|
152
|
-
) -> List[dict]:
|
143
|
+
def MCPPrompt(*content_items, role: Literal["user", "assistant"] = "user") -> List[dict]:
|
153
144
|
"""
|
154
145
|
Create one or more prompt messages with various content types.
|
155
146
|
|
156
147
|
This function intelligently creates different content types:
|
157
|
-
- Strings become TextContent
|
148
|
+
- Strings become TextContent
|
158
149
|
- File paths with image mime types become ImageContent
|
159
150
|
- File paths with text mime types or other mime types become EmbeddedResource
|
160
151
|
- Dicts with role and content are passed through unchanged
|
@@ -180,7 +171,7 @@ def MCPPrompt(
|
|
180
171
|
# File path - determine the content type based on mime type
|
181
172
|
path_str = str(item)
|
182
173
|
mime_type = guess_mime_type(path_str)
|
183
|
-
|
174
|
+
|
184
175
|
if is_image_mime_type(mime_type):
|
185
176
|
# Image files (except SVG which is handled as text)
|
186
177
|
result.append(MCPImage(path=item, role=role))
|
mcp_agent/core/prompt.py
CHANGED
@@ -5,10 +5,11 @@ Prompt class for easily creating and working with MCP prompt content.
|
|
5
5
|
from typing import List, Literal
|
6
6
|
|
7
7
|
from mcp.types import PromptMessage
|
8
|
+
|
8
9
|
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
9
10
|
|
10
11
|
# Import our content helper functions
|
11
|
-
from .mcp_content import
|
12
|
+
from .mcp_content import Assistant, MCPPrompt, User
|
12
13
|
|
13
14
|
|
14
15
|
class Prompt:
|
@@ -39,9 +40,7 @@ class Prompt:
|
|
39
40
|
A PromptMessageMultipart with user role and the specified content
|
40
41
|
"""
|
41
42
|
messages = User(*content_items)
|
42
|
-
return PromptMessageMultipart(
|
43
|
-
role="user", content=[msg["content"] for msg in messages]
|
44
|
-
)
|
43
|
+
return PromptMessageMultipart(role="user", content=[msg["content"] for msg in messages])
|
45
44
|
|
46
45
|
@classmethod
|
47
46
|
def assistant(cls, *content_items) -> PromptMessageMultipart:
|
@@ -55,14 +54,10 @@ class Prompt:
|
|
55
54
|
A PromptMessageMultipart with assistant role and the specified content
|
56
55
|
"""
|
57
56
|
messages = Assistant(*content_items)
|
58
|
-
return PromptMessageMultipart(
|
59
|
-
role="assistant", content=[msg["content"] for msg in messages]
|
60
|
-
)
|
57
|
+
return PromptMessageMultipart(role="assistant", content=[msg["content"] for msg in messages])
|
61
58
|
|
62
59
|
@classmethod
|
63
|
-
def message(
|
64
|
-
cls, *content_items, role: Literal["user", "assistant"] = "user"
|
65
|
-
) -> PromptMessageMultipart:
|
60
|
+
def message(cls, *content_items, role: Literal["user", "assistant"] = "user") -> PromptMessageMultipart:
|
66
61
|
"""
|
67
62
|
Create a PromptMessageMultipart with the specified role and content items.
|
68
63
|
|
@@ -100,7 +95,7 @@ class Prompt:
|
|
100
95
|
for item in messages:
|
101
96
|
if isinstance(item, PromptMessageMultipart):
|
102
97
|
# Convert PromptMessageMultipart to a list of PromptMessages
|
103
|
-
result.extend(item.
|
98
|
+
result.extend(item.from_multipart())
|
104
99
|
elif isinstance(item, dict) and "role" in item and "content" in item:
|
105
100
|
# Convert a single message dict to PromptMessage
|
106
101
|
result.append(PromptMessage(**item))
|
@@ -114,9 +109,7 @@ class Prompt:
|
|
114
109
|
return result
|
115
110
|
|
116
111
|
@classmethod
|
117
|
-
def from_multipart(
|
118
|
-
cls, multipart: List[PromptMessageMultipart]
|
119
|
-
) -> List[PromptMessage]:
|
112
|
+
def from_multipart(cls, multipart: List[PromptMessageMultipart]) -> List[PromptMessage]:
|
120
113
|
"""
|
121
114
|
Convert a list of PromptMessageMultipart objects to PromptMessages.
|
122
115
|
|
@@ -128,5 +121,5 @@ class Prompt:
|
|
128
121
|
"""
|
129
122
|
result = []
|
130
123
|
for mp in multipart:
|
131
|
-
result.extend(mp.
|
124
|
+
result.extend(mp.from_multipart())
|
132
125
|
return result
|
mcp_agent/core/proxies.py
CHANGED
@@ -6,16 +6,18 @@ FOR COMPATIBILITY WITH LEGACY MCP-AGENT CODE
|
|
6
6
|
|
7
7
|
"""
|
8
8
|
|
9
|
-
from typing import
|
9
|
+
from typing import TYPE_CHECKING, Dict, List, Optional, Union
|
10
|
+
|
11
|
+
from mcp.types import EmbeddedResource
|
10
12
|
|
11
13
|
from mcp_agent.agents.agent import Agent
|
12
14
|
from mcp_agent.app import MCPApp
|
15
|
+
from mcp_agent.core.request_params import RequestParams
|
13
16
|
from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
|
14
|
-
from mcp.types import EmbeddedResource
|
15
17
|
|
16
18
|
# Handle circular imports
|
17
19
|
if TYPE_CHECKING:
|
18
|
-
from mcp_agent.core.types import
|
20
|
+
from mcp_agent.core.types import ProxyDict, WorkflowType
|
19
21
|
else:
|
20
22
|
# Define minimal versions for runtime
|
21
23
|
from typing import Any
|
@@ -28,7 +30,7 @@ else:
|
|
28
30
|
class BaseAgentProxy:
|
29
31
|
"""Base class for all proxy types"""
|
30
32
|
|
31
|
-
def __init__(self, app: MCPApp, name: str):
|
33
|
+
def __init__(self, app: MCPApp, name: str) -> None:
|
32
34
|
self._app = app
|
33
35
|
self._name = name
|
34
36
|
|
@@ -39,9 +41,7 @@ class BaseAgentProxy:
|
|
39
41
|
return await self.prompt()
|
40
42
|
return await self.send(message)
|
41
43
|
|
42
|
-
async def send(
|
43
|
-
self, message: Optional[Union[str, PromptMessageMultipart]] = None
|
44
|
-
) -> str:
|
44
|
+
async def send(self, message: Optional[Union[str, PromptMessageMultipart]] = None) -> str:
|
45
45
|
"""
|
46
46
|
Allow: agent.researcher.send('message') or agent.researcher.send(Prompt.user('message'))
|
47
47
|
|
@@ -87,9 +87,7 @@ class BaseAgentProxy:
|
|
87
87
|
"""Send a message to the agent and return the response"""
|
88
88
|
raise NotImplementedError("Subclasses must implement send(prompt)")
|
89
89
|
|
90
|
-
async def apply_prompt(
|
91
|
-
self, prompt_name: str = None, arguments: dict[str, str] = None
|
92
|
-
) -> str:
|
90
|
+
async def apply_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
|
93
91
|
"""
|
94
92
|
Apply a Prompt from an MCP Server - implemented by subclasses.
|
95
93
|
This is the preferred method for applying prompts.
|
@@ -105,7 +103,7 @@ class BaseAgentProxy:
|
|
105
103
|
class LLMAgentProxy(BaseAgentProxy):
|
106
104
|
"""Proxy for regular agents that use _llm.generate_str()"""
|
107
105
|
|
108
|
-
def __init__(self, app: MCPApp, name: str, agent: Agent):
|
106
|
+
def __init__(self, app: MCPApp, name: str, agent: Agent) -> None:
|
109
107
|
super().__init__(app, name)
|
110
108
|
self._agent = agent
|
111
109
|
|
@@ -117,9 +115,7 @@ class LLMAgentProxy(BaseAgentProxy):
|
|
117
115
|
"""Send a message to the agent and return the response"""
|
118
116
|
return await self._agent._llm.generate_prompt(prompt, None)
|
119
117
|
|
120
|
-
async def apply_prompt(
|
121
|
-
self, prompt_name: str = None, arguments: dict[str, str] = None
|
122
|
-
) -> str:
|
118
|
+
async def apply_prompt(self, prompt_name: str = None, arguments: dict[str, str] = None) -> str:
|
123
119
|
"""
|
124
120
|
Apply a prompt from an MCP server.
|
125
121
|
This is the preferred method for applying prompts.
|
@@ -134,9 +130,7 @@ class LLMAgentProxy(BaseAgentProxy):
|
|
134
130
|
return await self._agent.apply_prompt(prompt_name, arguments)
|
135
131
|
|
136
132
|
# Add the new methods
|
137
|
-
async def get_embedded_resources(
|
138
|
-
self, server_name: str, resource_name: str
|
139
|
-
) -> List[EmbeddedResource]:
|
133
|
+
async def get_embedded_resources(self, server_name: str, resource_name: str) -> List[EmbeddedResource]:
|
140
134
|
"""
|
141
135
|
Get a resource from an MCP server and return it as a list of embedded resources ready for use in prompts.
|
142
136
|
|
@@ -166,15 +160,32 @@ class LLMAgentProxy(BaseAgentProxy):
|
|
166
160
|
Returns:
|
167
161
|
The agent's response as a string
|
168
162
|
"""
|
169
|
-
return await self._agent.with_resource(
|
170
|
-
|
171
|
-
|
163
|
+
return await self._agent.with_resource(prompt_content, server_name, resource_name)
|
164
|
+
|
165
|
+
async def apply_prompt_messages(
|
166
|
+
self,
|
167
|
+
multipart_messages: List["PromptMessageMultipart"],
|
168
|
+
request_params: RequestParams | None = None,
|
169
|
+
) -> str:
|
170
|
+
"""
|
171
|
+
Apply a list of PromptMessageMultipart messages directly to the LLM.
|
172
|
+
This is a cleaner interface to _apply_prompt_template_provider_specific.
|
173
|
+
|
174
|
+
Args:
|
175
|
+
multipart_messages: List of PromptMessageMultipart objects
|
176
|
+
request_params: Optional parameters to configure the LLM request
|
177
|
+
|
178
|
+
Returns:
|
179
|
+
String representation of the assistant's response
|
180
|
+
"""
|
181
|
+
# Delegate to the provider-specific implementation
|
182
|
+
return await self._agent._llm._apply_prompt_template_provider_specific(multipart_messages, request_params)
|
172
183
|
|
173
184
|
|
174
185
|
class WorkflowProxy(BaseAgentProxy):
|
175
186
|
"""Proxy for workflow types that implement generate_str() directly"""
|
176
187
|
|
177
|
-
def __init__(self, app: MCPApp, name: str, workflow: WorkflowType):
|
188
|
+
def __init__(self, app: MCPApp, name: str, workflow: WorkflowType) -> None:
|
178
189
|
super().__init__(app, name)
|
179
190
|
self._workflow = workflow
|
180
191
|
|
@@ -186,7 +197,7 @@ class WorkflowProxy(BaseAgentProxy):
|
|
186
197
|
class RouterProxy(BaseAgentProxy):
|
187
198
|
"""Proxy for LLM Routers"""
|
188
199
|
|
189
|
-
def __init__(self, app: MCPApp, name: str, workflow: WorkflowType):
|
200
|
+
def __init__(self, app: MCPApp, name: str, workflow: WorkflowType) -> None:
|
190
201
|
super().__init__(app, name)
|
191
202
|
self._workflow = workflow
|
192
203
|
|
@@ -215,9 +226,7 @@ class RouterProxy(BaseAgentProxy):
|
|
215
226
|
class ChainProxy(BaseAgentProxy):
|
216
227
|
"""Proxy for chained agent operations"""
|
217
228
|
|
218
|
-
def __init__(
|
219
|
-
self, app: MCPApp, name: str, sequence: List[str], agent_proxies: ProxyDict
|
220
|
-
):
|
229
|
+
def __init__(self, app: MCPApp, name: str, sequence: List[str], agent_proxies: ProxyDict) -> None:
|
221
230
|
super().__init__(app, name)
|
222
231
|
self._sequence = sequence
|
223
232
|
self._agent_proxies = agent_proxies
|
@@ -0,0 +1,46 @@
|
|
1
|
+
"""
|
2
|
+
Request parameters definitions for LLM interactions.
|
3
|
+
"""
|
4
|
+
|
5
|
+
from typing import List
|
6
|
+
|
7
|
+
from mcp import SamplingMessage
|
8
|
+
from mcp.types import CreateMessageRequestParams
|
9
|
+
from pydantic import Field
|
10
|
+
|
11
|
+
|
12
|
+
class RequestParams(CreateMessageRequestParams):
|
13
|
+
"""
|
14
|
+
Parameters to configure the AugmentedLLM 'generate' requests.
|
15
|
+
"""
|
16
|
+
|
17
|
+
messages: List[SamplingMessage] = Field(exclude=True, default=[])
|
18
|
+
"""
|
19
|
+
Ignored. 'messages' are removed from CreateMessageRequestParams
|
20
|
+
to avoid confusion with the 'message' parameter on 'generate' method.
|
21
|
+
"""
|
22
|
+
|
23
|
+
maxTokens: int = 2048
|
24
|
+
"""The maximum number of tokens to sample, as requested by the server."""
|
25
|
+
|
26
|
+
model: str | None = None
|
27
|
+
"""
|
28
|
+
The model to use for the LLM generation.
|
29
|
+
If specified, this overrides the 'modelPreferences' selection criteria.
|
30
|
+
"""
|
31
|
+
|
32
|
+
use_history: bool = True
|
33
|
+
"""
|
34
|
+
Include the message history in the generate request.
|
35
|
+
"""
|
36
|
+
|
37
|
+
max_iterations: int = 10
|
38
|
+
"""
|
39
|
+
The maximum number of iterations to run the LLM for.
|
40
|
+
"""
|
41
|
+
|
42
|
+
parallel_tool_calls: bool = True
|
43
|
+
"""
|
44
|
+
Whether to allow multiple tool calls per iteration.
|
45
|
+
Also known as multi-step tool use.
|
46
|
+
"""
|
mcp_agent/core/types.py
CHANGED
@@ -2,12 +2,14 @@
|
|
2
2
|
Type definitions for fast-agent core module.
|
3
3
|
"""
|
4
4
|
|
5
|
-
from typing import
|
5
|
+
from typing import TYPE_CHECKING, Dict, TypeAlias, Union
|
6
6
|
|
7
7
|
from mcp_agent.agents.agent import Agent
|
8
|
+
from mcp_agent.workflows.evaluator_optimizer.evaluator_optimizer import (
|
9
|
+
EvaluatorOptimizerLLM,
|
10
|
+
)
|
8
11
|
from mcp_agent.workflows.orchestrator.orchestrator import Orchestrator
|
9
12
|
from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM
|
10
|
-
from mcp_agent.workflows.evaluator_optimizer.evaluator_optimizer import EvaluatorOptimizerLLM
|
11
13
|
from mcp_agent.workflows.router.router_llm import LLMRouter
|
12
14
|
|
13
15
|
# Avoid circular imports
|
@@ -15,8 +17,6 @@ if TYPE_CHECKING:
|
|
15
17
|
from mcp_agent.core.proxies import BaseAgentProxy
|
16
18
|
|
17
19
|
# Type aliases for better readability
|
18
|
-
WorkflowType: TypeAlias = Union[
|
19
|
-
Orchestrator, ParallelLLM, EvaluatorOptimizerLLM, LLMRouter
|
20
|
-
]
|
20
|
+
WorkflowType: TypeAlias = Union[Orchestrator, ParallelLLM, EvaluatorOptimizerLLM, LLMRouter]
|
21
21
|
AgentOrWorkflow: TypeAlias = Union[Agent, WorkflowType]
|
22
|
-
ProxyDict: TypeAlias = Dict[str, "BaseAgentProxy"] # Forward reference as string
|
22
|
+
ProxyDict: TypeAlias = Dict[str, "BaseAgentProxy"] # Forward reference as string
|