fast-agent-mcp 0.1.12__py3-none-any.whl → 0.1.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.1.13.dist-info}/METADATA +1 -1
- fast_agent_mcp-0.1.13.dist-info/RECORD +164 -0
- mcp_agent/agents/agent.py +37 -79
- mcp_agent/app.py +16 -22
- mcp_agent/cli/commands/bootstrap.py +22 -52
- mcp_agent/cli/commands/config.py +4 -4
- mcp_agent/cli/commands/setup.py +11 -26
- mcp_agent/cli/main.py +6 -9
- mcp_agent/cli/terminal.py +2 -2
- mcp_agent/config.py +1 -5
- mcp_agent/context.py +13 -24
- mcp_agent/context_dependent.py +3 -7
- mcp_agent/core/agent_app.py +45 -121
- mcp_agent/core/agent_utils.py +3 -5
- mcp_agent/core/decorators.py +5 -12
- mcp_agent/core/enhanced_prompt.py +25 -52
- mcp_agent/core/exceptions.py +8 -8
- mcp_agent/core/factory.py +29 -70
- mcp_agent/core/fastagent.py +48 -88
- mcp_agent/core/mcp_content.py +8 -16
- mcp_agent/core/prompt.py +8 -15
- mcp_agent/core/proxies.py +34 -25
- mcp_agent/core/request_params.py +6 -3
- mcp_agent/core/types.py +4 -6
- mcp_agent/core/validation.py +4 -3
- mcp_agent/executor/decorator_registry.py +11 -23
- mcp_agent/executor/executor.py +8 -17
- mcp_agent/executor/task_registry.py +2 -4
- mcp_agent/executor/temporal.py +28 -74
- mcp_agent/executor/workflow.py +3 -5
- mcp_agent/executor/workflow_signal.py +17 -29
- mcp_agent/human_input/handler.py +4 -9
- mcp_agent/human_input/types.py +2 -3
- mcp_agent/logging/events.py +1 -5
- mcp_agent/logging/json_serializer.py +7 -6
- mcp_agent/logging/listeners.py +20 -23
- mcp_agent/logging/logger.py +15 -17
- mcp_agent/logging/rich_progress.py +10 -8
- mcp_agent/logging/tracing.py +4 -6
- mcp_agent/logging/transport.py +22 -22
- mcp_agent/mcp/gen_client.py +4 -12
- mcp_agent/mcp/interfaces.py +71 -86
- mcp_agent/mcp/mcp_agent_client_session.py +11 -19
- mcp_agent/mcp/mcp_agent_server.py +8 -10
- mcp_agent/mcp/mcp_aggregator.py +45 -117
- mcp_agent/mcp/mcp_connection_manager.py +16 -37
- mcp_agent/mcp/prompt_message_multipart.py +12 -18
- mcp_agent/mcp/prompt_serialization.py +13 -38
- mcp_agent/mcp/prompts/prompt_load.py +99 -0
- mcp_agent/mcp/prompts/prompt_server.py +21 -128
- mcp_agent/mcp/prompts/prompt_template.py +20 -42
- mcp_agent/mcp/resource_utils.py +8 -17
- mcp_agent/mcp/sampling.py +5 -14
- mcp_agent/mcp/stdio.py +11 -8
- mcp_agent/mcp_server/agent_server.py +10 -17
- mcp_agent/mcp_server_registry.py +13 -35
- mcp_agent/resources/examples/data-analysis/analysis-campaign.py +1 -1
- mcp_agent/resources/examples/data-analysis/analysis.py +1 -1
- mcp_agent/resources/examples/data-analysis/slides.py +110 -0
- mcp_agent/resources/examples/internal/agent.py +2 -1
- mcp_agent/resources/examples/internal/job.py +2 -1
- mcp_agent/resources/examples/internal/prompt_category.py +1 -1
- mcp_agent/resources/examples/internal/prompt_sizing.py +3 -5
- mcp_agent/resources/examples/internal/sizer.py +2 -1
- mcp_agent/resources/examples/internal/social.py +2 -1
- mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +1 -1
- mcp_agent/resources/examples/prompting/agent.py +2 -1
- mcp_agent/resources/examples/prompting/image_server.py +5 -11
- mcp_agent/resources/examples/researcher/researcher-eval.py +1 -1
- mcp_agent/resources/examples/researcher/researcher-imp.py +3 -4
- mcp_agent/resources/examples/researcher/researcher.py +2 -1
- mcp_agent/resources/examples/workflows/agent_build.py +2 -1
- mcp_agent/resources/examples/workflows/chaining.py +2 -1
- mcp_agent/resources/examples/workflows/evaluator.py +2 -1
- mcp_agent/resources/examples/workflows/human_input.py +2 -1
- mcp_agent/resources/examples/workflows/orchestrator.py +2 -1
- mcp_agent/resources/examples/workflows/parallel.py +2 -1
- mcp_agent/resources/examples/workflows/router.py +2 -1
- mcp_agent/resources/examples/workflows/sse.py +1 -1
- mcp_agent/telemetry/usage_tracking.py +2 -1
- mcp_agent/ui/console_display.py +15 -39
- mcp_agent/workflows/embedding/embedding_base.py +1 -4
- mcp_agent/workflows/embedding/embedding_cohere.py +2 -2
- mcp_agent/workflows/embedding/embedding_openai.py +4 -13
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +23 -57
- mcp_agent/workflows/intent_classifier/intent_classifier_base.py +5 -8
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +7 -11
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +4 -8
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +4 -8
- mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +11 -22
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +3 -3
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +4 -6
- mcp_agent/workflows/llm/anthropic_utils.py +8 -29
- mcp_agent/workflows/llm/augmented_llm.py +69 -247
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +39 -73
- mcp_agent/workflows/llm/augmented_llm_openai.py +42 -97
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +13 -20
- mcp_agent/workflows/llm/augmented_llm_playback.py +8 -6
- mcp_agent/workflows/llm/memory.py +103 -0
- mcp_agent/workflows/llm/model_factory.py +8 -20
- mcp_agent/workflows/llm/openai_utils.py +1 -1
- mcp_agent/workflows/llm/prompt_utils.py +1 -3
- mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +47 -89
- mcp_agent/workflows/llm/providers/multipart_converter_openai.py +20 -55
- mcp_agent/workflows/llm/providers/openai_multipart.py +19 -61
- mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +10 -12
- mcp_agent/workflows/llm/providers/sampling_converter_openai.py +7 -11
- mcp_agent/workflows/llm/sampling_converter.py +4 -11
- mcp_agent/workflows/llm/sampling_format_converter.py +12 -12
- mcp_agent/workflows/orchestrator/orchestrator.py +24 -67
- mcp_agent/workflows/orchestrator/orchestrator_models.py +14 -40
- mcp_agent/workflows/parallel/fan_in.py +17 -47
- mcp_agent/workflows/parallel/fan_out.py +6 -12
- mcp_agent/workflows/parallel/parallel_llm.py +9 -26
- mcp_agent/workflows/router/router_base.py +19 -49
- mcp_agent/workflows/router/router_embedding.py +11 -25
- mcp_agent/workflows/router/router_embedding_cohere.py +2 -2
- mcp_agent/workflows/router/router_embedding_openai.py +2 -2
- mcp_agent/workflows/router/router_llm.py +12 -28
- mcp_agent/workflows/swarm/swarm.py +20 -48
- mcp_agent/workflows/swarm/swarm_anthropic.py +2 -2
- mcp_agent/workflows/swarm/swarm_openai.py +2 -2
- fast_agent_mcp-0.1.12.dist-info/RECORD +0 -161
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.1.13.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.1.13.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.1.13.dist-info}/licenses/LICENSE +0 -0
@@ -1,4 +1,4 @@
|
|
1
|
-
from typing import List, Optional
|
1
|
+
from typing import TYPE_CHECKING, List, Optional
|
2
2
|
|
3
3
|
from mcp_agent.workflows.embedding.embedding_cohere import CohereEmbeddingModel
|
4
4
|
from mcp_agent.workflows.intent_classifier.intent_classifier_base import Intent
|
@@ -21,11 +21,9 @@ class CohereEmbeddingIntentClassifier(EmbeddingIntentClassifier):
|
|
21
21
|
embedding_model: CohereEmbeddingModel | None = None,
|
22
22
|
context: Optional["Context"] = None,
|
23
23
|
**kwargs,
|
24
|
-
):
|
24
|
+
) -> None:
|
25
25
|
embedding_model = embedding_model or CohereEmbeddingModel()
|
26
|
-
super().__init__(
|
27
|
-
embedding_model=embedding_model, intents=intents, context=context, **kwargs
|
28
|
-
)
|
26
|
+
super().__init__(embedding_model=embedding_model, intents=intents, context=context, **kwargs)
|
29
27
|
|
30
28
|
@classmethod
|
31
29
|
async def create(
|
@@ -38,8 +36,6 @@ class CohereEmbeddingIntentClassifier(EmbeddingIntentClassifier):
|
|
38
36
|
Factory method to create and initialize a classifier.
|
39
37
|
Use this instead of constructor since we need async initialization.
|
40
38
|
"""
|
41
|
-
instance = cls(
|
42
|
-
intents=intents, embedding_model=embedding_model, context=context
|
43
|
-
)
|
39
|
+
instance = cls(intents=intents, embedding_model=embedding_model, context=context)
|
44
40
|
await instance.initialize()
|
45
41
|
return instance
|
@@ -1,4 +1,4 @@
|
|
1
|
-
from typing import List, Optional
|
1
|
+
from typing import TYPE_CHECKING, List, Optional
|
2
2
|
|
3
3
|
from mcp_agent.workflows.embedding.embedding_openai import OpenAIEmbeddingModel
|
4
4
|
from mcp_agent.workflows.intent_classifier.intent_classifier_base import Intent
|
@@ -21,11 +21,9 @@ class OpenAIEmbeddingIntentClassifier(EmbeddingIntentClassifier):
|
|
21
21
|
embedding_model: OpenAIEmbeddingModel | None = None,
|
22
22
|
context: Optional["Context"] = None,
|
23
23
|
**kwargs,
|
24
|
-
):
|
24
|
+
) -> None:
|
25
25
|
embedding_model = embedding_model or OpenAIEmbeddingModel()
|
26
|
-
super().__init__(
|
27
|
-
embedding_model=embedding_model, intents=intents, context=context, **kwargs
|
28
|
-
)
|
26
|
+
super().__init__(embedding_model=embedding_model, intents=intents, context=context, **kwargs)
|
29
27
|
|
30
28
|
@classmethod
|
31
29
|
async def create(
|
@@ -38,8 +36,6 @@ class OpenAIEmbeddingIntentClassifier(EmbeddingIntentClassifier):
|
|
38
36
|
Factory method to create and initialize a classifier.
|
39
37
|
Use this instead of constructor since we need async initialization.
|
40
38
|
"""
|
41
|
-
instance = cls(
|
42
|
-
intents=intents, embedding_model=embedding_model, context=context
|
43
|
-
)
|
39
|
+
instance = cls(intents=intents, embedding_model=embedding_model, context=context)
|
44
40
|
await instance.initialize()
|
45
41
|
return instance
|
@@ -1,12 +1,13 @@
|
|
1
|
-
from typing import List, Literal, Optional
|
1
|
+
from typing import TYPE_CHECKING, List, Literal, Optional
|
2
|
+
|
2
3
|
from pydantic import BaseModel
|
3
4
|
|
4
|
-
from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM
|
5
5
|
from mcp_agent.workflows.intent_classifier.intent_classifier_base import (
|
6
6
|
Intent,
|
7
|
-
IntentClassifier,
|
8
7
|
IntentClassificationResult,
|
8
|
+
IntentClassifier,
|
9
9
|
)
|
10
|
+
from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM
|
10
11
|
|
11
12
|
if TYPE_CHECKING:
|
12
13
|
from mcp_agent.context import Context
|
@@ -75,7 +76,7 @@ class LLMIntentClassifier(IntentClassifier):
|
|
75
76
|
classification_instruction: str | None = None,
|
76
77
|
context: Optional["Context"] = None,
|
77
78
|
**kwargs,
|
78
|
-
):
|
79
|
+
) -> None:
|
79
80
|
super().__init__(intents=intents, context=context, **kwargs)
|
80
81
|
self.llm = llm
|
81
82
|
self.classification_instruction = classification_instruction
|
@@ -99,28 +100,20 @@ class LLMIntentClassifier(IntentClassifier):
|
|
99
100
|
await instance.initialize()
|
100
101
|
return instance
|
101
102
|
|
102
|
-
async def classify(
|
103
|
-
self, request: str, top_k: int = 1
|
104
|
-
) -> List[LLMIntentClassificationResult]:
|
103
|
+
async def classify(self, request: str, top_k: int = 1) -> List[LLMIntentClassificationResult]:
|
105
104
|
if not self.initialized:
|
106
105
|
self.initialize()
|
107
106
|
|
108
|
-
classification_instruction =
|
109
|
-
self.classification_instruction or DEFAULT_INTENT_CLASSIFICATION_INSTRUCTION
|
110
|
-
)
|
107
|
+
classification_instruction = self.classification_instruction or DEFAULT_INTENT_CLASSIFICATION_INSTRUCTION
|
111
108
|
|
112
109
|
# Generate the context with intent descriptions and examples
|
113
110
|
context = self._generate_context()
|
114
111
|
|
115
112
|
# Format the prompt with all the necessary information
|
116
|
-
prompt = classification_instruction.format(
|
117
|
-
context=context, request=request, top_k=top_k
|
118
|
-
)
|
113
|
+
prompt = classification_instruction.format(context=context, request=request, top_k=top_k)
|
119
114
|
|
120
115
|
# Get classification from LLM
|
121
|
-
response = await self.llm.generate_structured(
|
122
|
-
message=prompt, response_model=StructuredIntentResponse
|
123
|
-
)
|
116
|
+
response = await self.llm.generate_structured(message=prompt, response_model=StructuredIntentResponse)
|
124
117
|
|
125
118
|
if not response or not response.classifications:
|
126
119
|
return []
|
@@ -142,18 +135,14 @@ class LLMIntentClassifier(IntentClassifier):
|
|
142
135
|
context_parts = []
|
143
136
|
|
144
137
|
for idx, intent in enumerate(self.intents.values(), 1):
|
145
|
-
description =
|
146
|
-
f"{idx}. Intent: {intent.name}\nDescription: {intent.description}"
|
147
|
-
)
|
138
|
+
description = f"{idx}. Intent: {intent.name}\nDescription: {intent.description}"
|
148
139
|
|
149
140
|
if intent.examples:
|
150
141
|
examples = "\n".join(f"- {example}" for example in intent.examples)
|
151
142
|
description += f"\nExamples:\n{examples}"
|
152
143
|
|
153
144
|
if intent.metadata:
|
154
|
-
metadata = "\n".join(
|
155
|
-
f"- {key}: {value}" for key, value in intent.metadata.items()
|
156
|
-
)
|
145
|
+
metadata = "\n".join(f"- {key}: {value}" for key, value in intent.metadata.items())
|
157
146
|
description += f"\nAdditional Information:\n{metadata}"
|
158
147
|
|
159
148
|
context_parts.append(description)
|
@@ -1,10 +1,10 @@
|
|
1
|
-
from typing import List, Optional
|
1
|
+
from typing import TYPE_CHECKING, List, Optional
|
2
2
|
|
3
|
-
from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM
|
4
3
|
from mcp_agent.workflows.intent_classifier.intent_classifier_base import Intent
|
5
4
|
from mcp_agent.workflows.intent_classifier.intent_classifier_llm import (
|
6
5
|
LLMIntentClassifier,
|
7
6
|
)
|
7
|
+
from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM
|
8
8
|
|
9
9
|
if TYPE_CHECKING:
|
10
10
|
from mcp_agent.context import Context
|
@@ -27,7 +27,7 @@ class AnthropicLLMIntentClassifier(LLMIntentClassifier):
|
|
27
27
|
classification_instruction: str | None = None,
|
28
28
|
context: Optional["Context"] = None,
|
29
29
|
**kwargs,
|
30
|
-
):
|
30
|
+
) -> None:
|
31
31
|
anthropic_llm = AnthropicAugmentedLLM(
|
32
32
|
instruction=CLASSIFIER_SYSTEM_INSTRUCTION, context=context
|
33
33
|
)
|
@@ -1,10 +1,10 @@
|
|
1
|
-
from typing import List, Optional
|
1
|
+
from typing import TYPE_CHECKING, List, Optional
|
2
2
|
|
3
|
-
from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM
|
4
3
|
from mcp_agent.workflows.intent_classifier.intent_classifier_base import Intent
|
5
4
|
from mcp_agent.workflows.intent_classifier.intent_classifier_llm import (
|
6
5
|
LLMIntentClassifier,
|
7
6
|
)
|
7
|
+
from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM
|
8
8
|
|
9
9
|
if TYPE_CHECKING:
|
10
10
|
from mcp_agent.context import Context
|
@@ -27,10 +27,8 @@ class OpenAILLMIntentClassifier(LLMIntentClassifier):
|
|
27
27
|
classification_instruction: str | None = None,
|
28
28
|
context: Optional["Context"] = None,
|
29
29
|
**kwargs,
|
30
|
-
):
|
31
|
-
openai_llm = OpenAIAugmentedLLM(
|
32
|
-
instruction=CLASSIFIER_SYSTEM_INSTRUCTION, context=context
|
33
|
-
)
|
30
|
+
) -> None:
|
31
|
+
openai_llm = OpenAIAugmentedLLM(instruction=CLASSIFIER_SYSTEM_INSTRUCTION, context=context)
|
34
32
|
|
35
33
|
super().__init__(
|
36
34
|
llm=openai_llm,
|
@@ -8,11 +8,10 @@ leveraging existing code for resource handling and delimited formats.
|
|
8
8
|
from anthropic.types import (
|
9
9
|
MessageParam,
|
10
10
|
)
|
11
|
-
|
12
11
|
from mcp.types import (
|
13
|
-
TextContent,
|
14
|
-
ImageContent,
|
15
12
|
EmbeddedResource,
|
13
|
+
ImageContent,
|
14
|
+
TextContent,
|
16
15
|
TextResourceContents,
|
17
16
|
)
|
18
17
|
|
@@ -37,9 +36,7 @@ def anthropic_message_param_to_prompt_message_multipart(
|
|
37
36
|
|
38
37
|
# Handle string content (user messages can be simple strings)
|
39
38
|
if isinstance(content, str):
|
40
|
-
return PromptMessageMultipart(
|
41
|
-
role=role, content=[TextContent(type="text", text=content)]
|
42
|
-
)
|
39
|
+
return PromptMessageMultipart(role=role, content=[TextContent(type="text", text=content)])
|
43
40
|
|
44
41
|
# Convert content blocks to MCP content types
|
45
42
|
mcp_contents = []
|
@@ -50,29 +47,13 @@ def anthropic_message_param_to_prompt_message_multipart(
|
|
50
47
|
text = block.get("text", "")
|
51
48
|
|
52
49
|
# Check if this is a resource marker
|
53
|
-
if (
|
54
|
-
text
|
55
|
-
and (
|
56
|
-
text.startswith("[Resource:")
|
57
|
-
or text.startswith("[Binary Resource:")
|
58
|
-
)
|
59
|
-
and "\n" in text
|
60
|
-
):
|
50
|
+
if text and (text.startswith("[Resource:") or text.startswith("[Binary Resource:")) and "\n" in text:
|
61
51
|
header, content_text = text.split("\n", 1)
|
62
52
|
if "MIME:" in header:
|
63
53
|
mime_match = header.split("MIME:", 1)[1].split("]")[0].strip()
|
64
|
-
if
|
65
|
-
|
66
|
-
|
67
|
-
if (
|
68
|
-
"Resource:" in header
|
69
|
-
and "Binary Resource:" not in header
|
70
|
-
):
|
71
|
-
uri = (
|
72
|
-
header.split("Resource:", 1)[1]
|
73
|
-
.split(",")[0]
|
74
|
-
.strip()
|
75
|
-
)
|
54
|
+
if mime_match != "text/plain": # Only process non-plain text resources
|
55
|
+
if "Resource:" in header and "Binary Resource:" not in header:
|
56
|
+
uri = header.split("Resource:", 1)[1].split(",")[0].strip()
|
76
57
|
mcp_contents.append(
|
77
58
|
EmbeddedResource(
|
78
59
|
type="resource",
|
@@ -94,8 +75,6 @@ def anthropic_message_param_to_prompt_message_multipart(
|
|
94
75
|
if isinstance(source, dict) and source.get("type") == "base64":
|
95
76
|
media_type = source.get("media_type", "image/png")
|
96
77
|
data = source.get("data", "")
|
97
|
-
mcp_contents.append(
|
98
|
-
ImageContent(type="image", data=data, mimeType=media_type)
|
99
|
-
)
|
78
|
+
mcp_contents.append(ImageContent(type="image", data=data, mimeType=media_type))
|
100
79
|
|
101
80
|
return PromptMessageMultipart(role=role, content=mcp_contents)
|