fast-agent-mcp 0.1.12__py3-none-any.whl → 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/METADATA +3 -4
- fast_agent_mcp-0.2.0.dist-info/RECORD +123 -0
- mcp_agent/__init__.py +75 -0
- mcp_agent/agents/agent.py +61 -415
- mcp_agent/agents/base_agent.py +522 -0
- mcp_agent/agents/workflow/__init__.py +1 -0
- mcp_agent/agents/workflow/chain_agent.py +173 -0
- mcp_agent/agents/workflow/evaluator_optimizer.py +362 -0
- mcp_agent/agents/workflow/orchestrator_agent.py +591 -0
- mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_models.py +11 -21
- mcp_agent/agents/workflow/parallel_agent.py +182 -0
- mcp_agent/agents/workflow/router_agent.py +307 -0
- mcp_agent/app.py +15 -19
- mcp_agent/cli/commands/bootstrap.py +19 -38
- mcp_agent/cli/commands/config.py +4 -4
- mcp_agent/cli/commands/setup.py +7 -14
- mcp_agent/cli/main.py +7 -10
- mcp_agent/cli/terminal.py +3 -3
- mcp_agent/config.py +25 -40
- mcp_agent/context.py +12 -21
- mcp_agent/context_dependent.py +3 -5
- mcp_agent/core/agent_types.py +10 -7
- mcp_agent/core/direct_agent_app.py +179 -0
- mcp_agent/core/direct_decorators.py +443 -0
- mcp_agent/core/direct_factory.py +476 -0
- mcp_agent/core/enhanced_prompt.py +23 -55
- mcp_agent/core/exceptions.py +8 -8
- mcp_agent/core/fastagent.py +145 -371
- mcp_agent/core/interactive_prompt.py +424 -0
- mcp_agent/core/mcp_content.py +17 -17
- mcp_agent/core/prompt.py +6 -9
- mcp_agent/core/request_params.py +6 -3
- mcp_agent/core/validation.py +92 -18
- mcp_agent/executor/decorator_registry.py +9 -17
- mcp_agent/executor/executor.py +8 -17
- mcp_agent/executor/task_registry.py +2 -4
- mcp_agent/executor/temporal.py +19 -41
- mcp_agent/executor/workflow.py +3 -5
- mcp_agent/executor/workflow_signal.py +15 -21
- mcp_agent/human_input/handler.py +4 -7
- mcp_agent/human_input/types.py +2 -3
- mcp_agent/llm/__init__.py +2 -0
- mcp_agent/llm/augmented_llm.py +450 -0
- mcp_agent/llm/augmented_llm_passthrough.py +162 -0
- mcp_agent/llm/augmented_llm_playback.py +83 -0
- mcp_agent/llm/memory.py +103 -0
- mcp_agent/{workflows/llm → llm}/model_factory.py +22 -16
- mcp_agent/{workflows/llm → llm}/prompt_utils.py +1 -3
- mcp_agent/llm/providers/__init__.py +8 -0
- mcp_agent/{workflows/llm → llm/providers}/anthropic_utils.py +8 -25
- mcp_agent/{workflows/llm → llm/providers}/augmented_llm_anthropic.py +56 -194
- mcp_agent/llm/providers/augmented_llm_deepseek.py +53 -0
- mcp_agent/{workflows/llm → llm/providers}/augmented_llm_openai.py +99 -190
- mcp_agent/{workflows/llm → llm}/providers/multipart_converter_anthropic.py +72 -71
- mcp_agent/{workflows/llm → llm}/providers/multipart_converter_openai.py +65 -71
- mcp_agent/{workflows/llm → llm}/providers/openai_multipart.py +16 -44
- mcp_agent/{workflows/llm → llm/providers}/openai_utils.py +4 -4
- mcp_agent/{workflows/llm → llm}/providers/sampling_converter_anthropic.py +9 -11
- mcp_agent/{workflows/llm → llm}/providers/sampling_converter_openai.py +8 -12
- mcp_agent/{workflows/llm → llm}/sampling_converter.py +3 -31
- mcp_agent/llm/sampling_format_converter.py +37 -0
- mcp_agent/logging/events.py +1 -5
- mcp_agent/logging/json_serializer.py +7 -6
- mcp_agent/logging/listeners.py +20 -23
- mcp_agent/logging/logger.py +17 -19
- mcp_agent/logging/rich_progress.py +10 -8
- mcp_agent/logging/tracing.py +4 -6
- mcp_agent/logging/transport.py +22 -22
- mcp_agent/mcp/gen_client.py +1 -3
- mcp_agent/mcp/interfaces.py +117 -110
- mcp_agent/mcp/logger_textio.py +97 -0
- mcp_agent/mcp/mcp_agent_client_session.py +7 -7
- mcp_agent/mcp/mcp_agent_server.py +8 -8
- mcp_agent/mcp/mcp_aggregator.py +102 -143
- mcp_agent/mcp/mcp_connection_manager.py +20 -27
- mcp_agent/mcp/prompt_message_multipart.py +68 -16
- mcp_agent/mcp/prompt_render.py +77 -0
- mcp_agent/mcp/prompt_serialization.py +30 -48
- mcp_agent/mcp/prompts/prompt_constants.py +18 -0
- mcp_agent/mcp/prompts/prompt_helpers.py +327 -0
- mcp_agent/mcp/prompts/prompt_load.py +109 -0
- mcp_agent/mcp/prompts/prompt_server.py +155 -195
- mcp_agent/mcp/prompts/prompt_template.py +35 -66
- mcp_agent/mcp/resource_utils.py +7 -14
- mcp_agent/mcp/sampling.py +17 -17
- mcp_agent/mcp_server/agent_server.py +13 -17
- mcp_agent/mcp_server_registry.py +13 -22
- mcp_agent/resources/examples/{workflows → in_dev}/agent_build.py +3 -2
- mcp_agent/resources/examples/in_dev/slides.py +110 -0
- mcp_agent/resources/examples/internal/agent.py +6 -3
- mcp_agent/resources/examples/internal/fastagent.config.yaml +8 -2
- mcp_agent/resources/examples/internal/job.py +2 -1
- mcp_agent/resources/examples/internal/prompt_category.py +1 -1
- mcp_agent/resources/examples/internal/prompt_sizing.py +3 -5
- mcp_agent/resources/examples/internal/sizer.py +2 -1
- mcp_agent/resources/examples/internal/social.py +2 -1
- mcp_agent/resources/examples/prompting/agent.py +2 -1
- mcp_agent/resources/examples/prompting/image_server.py +4 -8
- mcp_agent/resources/examples/prompting/work_with_image.py +19 -0
- mcp_agent/ui/console_display.py +16 -20
- fast_agent_mcp-0.1.12.dist-info/RECORD +0 -161
- mcp_agent/core/agent_app.py +0 -646
- mcp_agent/core/agent_utils.py +0 -71
- mcp_agent/core/decorators.py +0 -455
- mcp_agent/core/factory.py +0 -463
- mcp_agent/core/proxies.py +0 -269
- mcp_agent/core/types.py +0 -24
- mcp_agent/eval/__init__.py +0 -0
- mcp_agent/mcp/stdio.py +0 -111
- mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -188
- mcp_agent/resources/examples/data-analysis/analysis.py +0 -65
- mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -41
- mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -1471
- mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -53
- mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -66
- mcp_agent/resources/examples/researcher/researcher-eval.py +0 -53
- mcp_agent/resources/examples/researcher/researcher-imp.py +0 -190
- mcp_agent/resources/examples/researcher/researcher.py +0 -38
- mcp_agent/resources/examples/workflows/chaining.py +0 -44
- mcp_agent/resources/examples/workflows/evaluator.py +0 -78
- mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -24
- mcp_agent/resources/examples/workflows/human_input.py +0 -25
- mcp_agent/resources/examples/workflows/orchestrator.py +0 -73
- mcp_agent/resources/examples/workflows/parallel.py +0 -78
- mcp_agent/resources/examples/workflows/router.py +0 -53
- mcp_agent/resources/examples/workflows/sse.py +0 -23
- mcp_agent/telemetry/__init__.py +0 -0
- mcp_agent/telemetry/usage_tracking.py +0 -18
- mcp_agent/workflows/__init__.py +0 -0
- mcp_agent/workflows/embedding/__init__.py +0 -0
- mcp_agent/workflows/embedding/embedding_base.py +0 -61
- mcp_agent/workflows/embedding/embedding_cohere.py +0 -49
- mcp_agent/workflows/embedding/embedding_openai.py +0 -46
- mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +0 -481
- mcp_agent/workflows/intent_classifier/__init__.py +0 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -120
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -134
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -45
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -45
- mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -161
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -60
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -60
- mcp_agent/workflows/llm/__init__.py +0 -0
- mcp_agent/workflows/llm/augmented_llm.py +0 -753
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +0 -241
- mcp_agent/workflows/llm/augmented_llm_playback.py +0 -109
- mcp_agent/workflows/llm/providers/__init__.py +0 -8
- mcp_agent/workflows/llm/sampling_format_converter.py +0 -22
- mcp_agent/workflows/orchestrator/__init__.py +0 -0
- mcp_agent/workflows/orchestrator/orchestrator.py +0 -578
- mcp_agent/workflows/parallel/__init__.py +0 -0
- mcp_agent/workflows/parallel/fan_in.py +0 -350
- mcp_agent/workflows/parallel/fan_out.py +0 -187
- mcp_agent/workflows/parallel/parallel_llm.py +0 -166
- mcp_agent/workflows/router/__init__.py +0 -0
- mcp_agent/workflows/router/router_base.py +0 -368
- mcp_agent/workflows/router/router_embedding.py +0 -240
- mcp_agent/workflows/router/router_embedding_cohere.py +0 -59
- mcp_agent/workflows/router/router_embedding_openai.py +0 -59
- mcp_agent/workflows/router/router_llm.py +0 -320
- mcp_agent/workflows/swarm/__init__.py +0 -0
- mcp_agent/workflows/swarm/swarm.py +0 -320
- mcp_agent/workflows/swarm/swarm_anthropic.py +0 -42
- mcp_agent/workflows/swarm/swarm_openai.py +0 -41
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.12.dist-info → fast_agent_mcp-0.2.0.dist-info}/licenses/LICENSE +0 -0
- /mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_prompts.py +0 -0
@@ -1,134 +0,0 @@
|
|
1
|
-
from typing import List, Optional, TYPE_CHECKING
|
2
|
-
|
3
|
-
from numpy import mean
|
4
|
-
|
5
|
-
from mcp_agent.workflows.embedding.embedding_base import (
|
6
|
-
FloatArray,
|
7
|
-
EmbeddingModel,
|
8
|
-
compute_confidence,
|
9
|
-
compute_similarity_scores,
|
10
|
-
)
|
11
|
-
from mcp_agent.workflows.intent_classifier.intent_classifier_base import (
|
12
|
-
Intent,
|
13
|
-
IntentClassifier,
|
14
|
-
IntentClassificationResult,
|
15
|
-
)
|
16
|
-
|
17
|
-
if TYPE_CHECKING:
|
18
|
-
from mcp_agent.context import Context
|
19
|
-
|
20
|
-
|
21
|
-
class EmbeddingIntent(Intent):
|
22
|
-
"""An intent with embedding information"""
|
23
|
-
|
24
|
-
embedding: FloatArray | None = None
|
25
|
-
"""Pre-computed embedding for this intent"""
|
26
|
-
|
27
|
-
|
28
|
-
class EmbeddingIntentClassifier(IntentClassifier):
|
29
|
-
"""
|
30
|
-
An intent classifier that uses embedding similarity for classification.
|
31
|
-
Supports different embedding models through the EmbeddingModel interface.
|
32
|
-
|
33
|
-
Features:
|
34
|
-
- Semantic similarity based classification
|
35
|
-
- Support for example-based learning
|
36
|
-
- Flexible embedding model support
|
37
|
-
- Multiple similarity computation strategies
|
38
|
-
"""
|
39
|
-
|
40
|
-
def __init__(
|
41
|
-
self,
|
42
|
-
intents: List[Intent],
|
43
|
-
embedding_model: EmbeddingModel,
|
44
|
-
context: Optional["Context"] = None,
|
45
|
-
**kwargs,
|
46
|
-
):
|
47
|
-
super().__init__(intents=intents, context=context, **kwargs)
|
48
|
-
self.embedding_model = embedding_model
|
49
|
-
self.initialized = False
|
50
|
-
|
51
|
-
@classmethod
|
52
|
-
async def create(
|
53
|
-
cls,
|
54
|
-
intents: List[Intent],
|
55
|
-
embedding_model: EmbeddingModel,
|
56
|
-
) -> "EmbeddingIntentClassifier":
|
57
|
-
"""
|
58
|
-
Factory method to create and initialize a classifier.
|
59
|
-
Use this instead of constructor since we need async initialization.
|
60
|
-
"""
|
61
|
-
instance = cls(
|
62
|
-
intents=intents,
|
63
|
-
embedding_model=embedding_model,
|
64
|
-
)
|
65
|
-
await instance.initialize()
|
66
|
-
return instance
|
67
|
-
|
68
|
-
async def initialize(self):
|
69
|
-
"""
|
70
|
-
Precompute embeddings for all intents by combining their
|
71
|
-
descriptions and examples
|
72
|
-
"""
|
73
|
-
if self.initialized:
|
74
|
-
return
|
75
|
-
|
76
|
-
for intent in self.intents.values():
|
77
|
-
# Combine all text for a rich intent representation
|
78
|
-
intent_texts = [intent.name, intent.description] + intent.examples
|
79
|
-
|
80
|
-
# Get embeddings for all texts
|
81
|
-
embeddings = await self.embedding_model.embed(intent_texts)
|
82
|
-
|
83
|
-
# Use mean pooling to combine embeddings
|
84
|
-
embedding = mean(embeddings, axis=0)
|
85
|
-
|
86
|
-
# Create intents with embeddings
|
87
|
-
self.intents[intent.name] = EmbeddingIntent(
|
88
|
-
**intent,
|
89
|
-
embedding=embedding,
|
90
|
-
)
|
91
|
-
|
92
|
-
self.initialized = True
|
93
|
-
|
94
|
-
async def classify(
|
95
|
-
self, request: str, top_k: int = 1
|
96
|
-
) -> List[IntentClassificationResult]:
|
97
|
-
"""
|
98
|
-
Classify the input text into one or more intents
|
99
|
-
|
100
|
-
Args:
|
101
|
-
text: Input text to classify
|
102
|
-
top_k: Maximum number of top matches to return
|
103
|
-
|
104
|
-
Returns:
|
105
|
-
List of classification results, ordered by confidence
|
106
|
-
"""
|
107
|
-
if not self.initialized:
|
108
|
-
await self.initialize()
|
109
|
-
|
110
|
-
# Get embedding for input
|
111
|
-
embeddings = await self.embedding_model.embed([request])
|
112
|
-
request_embedding = embeddings[0] # Take first since we only embedded one text
|
113
|
-
|
114
|
-
results: List[IntentClassificationResult] = []
|
115
|
-
for intent_name, intent in self.intents.items():
|
116
|
-
if intent.embedding is None:
|
117
|
-
continue
|
118
|
-
|
119
|
-
similarity_scores = compute_similarity_scores(
|
120
|
-
request_embedding, intent.embedding
|
121
|
-
)
|
122
|
-
|
123
|
-
# Compute overall confidence score
|
124
|
-
confidence = compute_confidence(similarity_scores)
|
125
|
-
|
126
|
-
results.append(
|
127
|
-
IntentClassificationResult(
|
128
|
-
intent=intent_name,
|
129
|
-
p_score=confidence,
|
130
|
-
)
|
131
|
-
)
|
132
|
-
|
133
|
-
results.sort(key=lambda x: x.p_score, reverse=True)
|
134
|
-
return results[:top_k]
|
@@ -1,45 +0,0 @@
|
|
1
|
-
from typing import List, Optional, TYPE_CHECKING
|
2
|
-
|
3
|
-
from mcp_agent.workflows.embedding.embedding_cohere import CohereEmbeddingModel
|
4
|
-
from mcp_agent.workflows.intent_classifier.intent_classifier_base import Intent
|
5
|
-
from mcp_agent.workflows.intent_classifier.intent_classifier_embedding import (
|
6
|
-
EmbeddingIntentClassifier,
|
7
|
-
)
|
8
|
-
|
9
|
-
if TYPE_CHECKING:
|
10
|
-
from mcp_agent.context import Context
|
11
|
-
|
12
|
-
|
13
|
-
class CohereEmbeddingIntentClassifier(EmbeddingIntentClassifier):
|
14
|
-
"""
|
15
|
-
An intent classifier that uses Cohere's embedding models for computing semantic simiarity based classifications.
|
16
|
-
"""
|
17
|
-
|
18
|
-
def __init__(
|
19
|
-
self,
|
20
|
-
intents: List[Intent],
|
21
|
-
embedding_model: CohereEmbeddingModel | None = None,
|
22
|
-
context: Optional["Context"] = None,
|
23
|
-
**kwargs,
|
24
|
-
):
|
25
|
-
embedding_model = embedding_model or CohereEmbeddingModel()
|
26
|
-
super().__init__(
|
27
|
-
embedding_model=embedding_model, intents=intents, context=context, **kwargs
|
28
|
-
)
|
29
|
-
|
30
|
-
@classmethod
|
31
|
-
async def create(
|
32
|
-
cls,
|
33
|
-
intents: List[Intent],
|
34
|
-
embedding_model: CohereEmbeddingModel | None = None,
|
35
|
-
context: Optional["Context"] = None,
|
36
|
-
) -> "CohereEmbeddingIntentClassifier":
|
37
|
-
"""
|
38
|
-
Factory method to create and initialize a classifier.
|
39
|
-
Use this instead of constructor since we need async initialization.
|
40
|
-
"""
|
41
|
-
instance = cls(
|
42
|
-
intents=intents, embedding_model=embedding_model, context=context
|
43
|
-
)
|
44
|
-
await instance.initialize()
|
45
|
-
return instance
|
@@ -1,45 +0,0 @@
|
|
1
|
-
from typing import List, Optional, TYPE_CHECKING
|
2
|
-
|
3
|
-
from mcp_agent.workflows.embedding.embedding_openai import OpenAIEmbeddingModel
|
4
|
-
from mcp_agent.workflows.intent_classifier.intent_classifier_base import Intent
|
5
|
-
from mcp_agent.workflows.intent_classifier.intent_classifier_embedding import (
|
6
|
-
EmbeddingIntentClassifier,
|
7
|
-
)
|
8
|
-
|
9
|
-
if TYPE_CHECKING:
|
10
|
-
from mcp_agent.context import Context
|
11
|
-
|
12
|
-
|
13
|
-
class OpenAIEmbeddingIntentClassifier(EmbeddingIntentClassifier):
|
14
|
-
"""
|
15
|
-
An intent classifier that uses OpenAI's embedding models for computing semantic simiarity based classifications.
|
16
|
-
"""
|
17
|
-
|
18
|
-
def __init__(
|
19
|
-
self,
|
20
|
-
intents: List[Intent],
|
21
|
-
embedding_model: OpenAIEmbeddingModel | None = None,
|
22
|
-
context: Optional["Context"] = None,
|
23
|
-
**kwargs,
|
24
|
-
):
|
25
|
-
embedding_model = embedding_model or OpenAIEmbeddingModel()
|
26
|
-
super().__init__(
|
27
|
-
embedding_model=embedding_model, intents=intents, context=context, **kwargs
|
28
|
-
)
|
29
|
-
|
30
|
-
@classmethod
|
31
|
-
async def create(
|
32
|
-
cls,
|
33
|
-
intents: List[Intent],
|
34
|
-
embedding_model: OpenAIEmbeddingModel | None = None,
|
35
|
-
context: Optional["Context"] = None,
|
36
|
-
) -> "OpenAIEmbeddingIntentClassifier":
|
37
|
-
"""
|
38
|
-
Factory method to create and initialize a classifier.
|
39
|
-
Use this instead of constructor since we need async initialization.
|
40
|
-
"""
|
41
|
-
instance = cls(
|
42
|
-
intents=intents, embedding_model=embedding_model, context=context
|
43
|
-
)
|
44
|
-
await instance.initialize()
|
45
|
-
return instance
|
@@ -1,161 +0,0 @@
|
|
1
|
-
from typing import List, Literal, Optional, TYPE_CHECKING
|
2
|
-
from pydantic import BaseModel
|
3
|
-
|
4
|
-
from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM
|
5
|
-
from mcp_agent.workflows.intent_classifier.intent_classifier_base import (
|
6
|
-
Intent,
|
7
|
-
IntentClassifier,
|
8
|
-
IntentClassificationResult,
|
9
|
-
)
|
10
|
-
|
11
|
-
if TYPE_CHECKING:
|
12
|
-
from mcp_agent.context import Context
|
13
|
-
|
14
|
-
DEFAULT_INTENT_CLASSIFICATION_INSTRUCTION = """
|
15
|
-
You are a precise intent classifier that analyzes user requests to determine their intended action or purpose.
|
16
|
-
Below are the available intents with their descriptions and examples:
|
17
|
-
|
18
|
-
{context}
|
19
|
-
|
20
|
-
Your task is to analyze the following request and determine the most likely intent(s). Consider:
|
21
|
-
- How well the request matches the intent descriptions and examples
|
22
|
-
- Any specific entities or parameters that should be extracted
|
23
|
-
- The confidence level in the classification
|
24
|
-
|
25
|
-
Request: {request}
|
26
|
-
|
27
|
-
Respond in JSON format:
|
28
|
-
{{
|
29
|
-
"classifications": [
|
30
|
-
{{
|
31
|
-
"intent": <intent name>,
|
32
|
-
"confidence": <float between 0 and 1>,
|
33
|
-
"extracted_entities": {{
|
34
|
-
"entity_name": "entity_value"
|
35
|
-
}},
|
36
|
-
"reasoning": <brief explanation>
|
37
|
-
}}
|
38
|
-
]
|
39
|
-
}}
|
40
|
-
|
41
|
-
Return up to {top_k} most likely intents. Only include intents with reasonable confidence (>0.5).
|
42
|
-
If no intents match well, return an empty list.
|
43
|
-
"""
|
44
|
-
|
45
|
-
|
46
|
-
class LLMIntentClassificationResult(IntentClassificationResult):
|
47
|
-
"""The result of intent classification using an LLM."""
|
48
|
-
|
49
|
-
confidence: Literal["low", "medium", "high"]
|
50
|
-
"""Confidence level of the classification"""
|
51
|
-
|
52
|
-
reasoning: str | None = None
|
53
|
-
"""Optional explanation of why this intent was chosen"""
|
54
|
-
|
55
|
-
|
56
|
-
class StructuredIntentResponse(BaseModel):
|
57
|
-
"""The complete structured response from the LLM"""
|
58
|
-
|
59
|
-
classifications: List[LLMIntentClassificationResult]
|
60
|
-
|
61
|
-
|
62
|
-
class LLMIntentClassifier(IntentClassifier):
|
63
|
-
"""
|
64
|
-
An intent classifier that uses an LLM to determine the user's intent.
|
65
|
-
Particularly useful when you need:
|
66
|
-
- Flexible understanding of natural language
|
67
|
-
- Detailed reasoning about classifications
|
68
|
-
- Entity extraction alongside classification
|
69
|
-
"""
|
70
|
-
|
71
|
-
def __init__(
|
72
|
-
self,
|
73
|
-
llm: AugmentedLLM,
|
74
|
-
intents: List[Intent],
|
75
|
-
classification_instruction: str | None = None,
|
76
|
-
context: Optional["Context"] = None,
|
77
|
-
**kwargs,
|
78
|
-
):
|
79
|
-
super().__init__(intents=intents, context=context, **kwargs)
|
80
|
-
self.llm = llm
|
81
|
-
self.classification_instruction = classification_instruction
|
82
|
-
|
83
|
-
@classmethod
|
84
|
-
async def create(
|
85
|
-
cls,
|
86
|
-
llm: AugmentedLLM,
|
87
|
-
intents: List[Intent],
|
88
|
-
classification_instruction: str | None = None,
|
89
|
-
) -> "LLMIntentClassifier":
|
90
|
-
"""
|
91
|
-
Factory method to create and initialize a classifier.
|
92
|
-
Use this instead of constructor since we need async initialization.
|
93
|
-
"""
|
94
|
-
instance = cls(
|
95
|
-
llm=llm,
|
96
|
-
intents=intents,
|
97
|
-
classification_instruction=classification_instruction,
|
98
|
-
)
|
99
|
-
await instance.initialize()
|
100
|
-
return instance
|
101
|
-
|
102
|
-
async def classify(
|
103
|
-
self, request: str, top_k: int = 1
|
104
|
-
) -> List[LLMIntentClassificationResult]:
|
105
|
-
if not self.initialized:
|
106
|
-
self.initialize()
|
107
|
-
|
108
|
-
classification_instruction = (
|
109
|
-
self.classification_instruction or DEFAULT_INTENT_CLASSIFICATION_INSTRUCTION
|
110
|
-
)
|
111
|
-
|
112
|
-
# Generate the context with intent descriptions and examples
|
113
|
-
context = self._generate_context()
|
114
|
-
|
115
|
-
# Format the prompt with all the necessary information
|
116
|
-
prompt = classification_instruction.format(
|
117
|
-
context=context, request=request, top_k=top_k
|
118
|
-
)
|
119
|
-
|
120
|
-
# Get classification from LLM
|
121
|
-
response = await self.llm.generate_structured(
|
122
|
-
message=prompt, response_model=StructuredIntentResponse
|
123
|
-
)
|
124
|
-
|
125
|
-
if not response or not response.classifications:
|
126
|
-
return []
|
127
|
-
|
128
|
-
results = []
|
129
|
-
for classification in response.classifications:
|
130
|
-
intent = self.intents.get(classification.intent)
|
131
|
-
if not intent:
|
132
|
-
# Skip invalid categories
|
133
|
-
# TODO: saqadri - log or raise an error
|
134
|
-
continue
|
135
|
-
|
136
|
-
results.append(classification)
|
137
|
-
|
138
|
-
return results[:top_k]
|
139
|
-
|
140
|
-
def _generate_context(self) -> str:
|
141
|
-
"""Generate a formatted context string describing all intents"""
|
142
|
-
context_parts = []
|
143
|
-
|
144
|
-
for idx, intent in enumerate(self.intents.values(), 1):
|
145
|
-
description = (
|
146
|
-
f"{idx}. Intent: {intent.name}\nDescription: {intent.description}"
|
147
|
-
)
|
148
|
-
|
149
|
-
if intent.examples:
|
150
|
-
examples = "\n".join(f"- {example}" for example in intent.examples)
|
151
|
-
description += f"\nExamples:\n{examples}"
|
152
|
-
|
153
|
-
if intent.metadata:
|
154
|
-
metadata = "\n".join(
|
155
|
-
f"- {key}: {value}" for key, value in intent.metadata.items()
|
156
|
-
)
|
157
|
-
description += f"\nAdditional Information:\n{metadata}"
|
158
|
-
|
159
|
-
context_parts.append(description)
|
160
|
-
|
161
|
-
return "\n\n".join(context_parts)
|
@@ -1,60 +0,0 @@
|
|
1
|
-
from typing import List, Optional, TYPE_CHECKING
|
2
|
-
|
3
|
-
from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM
|
4
|
-
from mcp_agent.workflows.intent_classifier.intent_classifier_base import Intent
|
5
|
-
from mcp_agent.workflows.intent_classifier.intent_classifier_llm import (
|
6
|
-
LLMIntentClassifier,
|
7
|
-
)
|
8
|
-
|
9
|
-
if TYPE_CHECKING:
|
10
|
-
from mcp_agent.context import Context
|
11
|
-
|
12
|
-
CLASSIFIER_SYSTEM_INSTRUCTION = """
|
13
|
-
You are a precise intent classifier that analyzes input requests to determine their intended action or purpose.
|
14
|
-
You are provided with a request and a list of intents to choose from.
|
15
|
-
You can choose one or more intents, or choose none if no intent is appropriate.
|
16
|
-
"""
|
17
|
-
|
18
|
-
|
19
|
-
class AnthropicLLMIntentClassifier(LLMIntentClassifier):
|
20
|
-
"""
|
21
|
-
An LLM router that uses an Anthropic model to make routing decisions.
|
22
|
-
"""
|
23
|
-
|
24
|
-
def __init__(
|
25
|
-
self,
|
26
|
-
intents: List[Intent],
|
27
|
-
classification_instruction: str | None = None,
|
28
|
-
context: Optional["Context"] = None,
|
29
|
-
**kwargs,
|
30
|
-
):
|
31
|
-
anthropic_llm = AnthropicAugmentedLLM(
|
32
|
-
instruction=CLASSIFIER_SYSTEM_INSTRUCTION, context=context
|
33
|
-
)
|
34
|
-
|
35
|
-
super().__init__(
|
36
|
-
llm=anthropic_llm,
|
37
|
-
intents=intents,
|
38
|
-
classification_instruction=classification_instruction,
|
39
|
-
context=context,
|
40
|
-
**kwargs,
|
41
|
-
)
|
42
|
-
|
43
|
-
@classmethod
|
44
|
-
async def create(
|
45
|
-
cls,
|
46
|
-
intents: List[Intent],
|
47
|
-
classification_instruction: str | None = None,
|
48
|
-
context: Optional["Context"] = None,
|
49
|
-
) -> "AnthropicLLMIntentClassifier":
|
50
|
-
"""
|
51
|
-
Factory method to create and initialize a classifier.
|
52
|
-
Use this instead of constructor since we need async initialization.
|
53
|
-
"""
|
54
|
-
instance = cls(
|
55
|
-
intents=intents,
|
56
|
-
classification_instruction=classification_instruction,
|
57
|
-
context=context,
|
58
|
-
)
|
59
|
-
await instance.initialize()
|
60
|
-
return instance
|
@@ -1,60 +0,0 @@
|
|
1
|
-
from typing import List, Optional, TYPE_CHECKING
|
2
|
-
|
3
|
-
from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM
|
4
|
-
from mcp_agent.workflows.intent_classifier.intent_classifier_base import Intent
|
5
|
-
from mcp_agent.workflows.intent_classifier.intent_classifier_llm import (
|
6
|
-
LLMIntentClassifier,
|
7
|
-
)
|
8
|
-
|
9
|
-
if TYPE_CHECKING:
|
10
|
-
from mcp_agent.context import Context
|
11
|
-
|
12
|
-
CLASSIFIER_SYSTEM_INSTRUCTION = """
|
13
|
-
You are a precise intent classifier that analyzes input requests to determine their intended action or purpose.
|
14
|
-
You are provided with a request and a list of intents to choose from.
|
15
|
-
You can choose one or more intents, or choose none if no intent is appropriate.
|
16
|
-
"""
|
17
|
-
|
18
|
-
|
19
|
-
class OpenAILLMIntentClassifier(LLMIntentClassifier):
|
20
|
-
"""
|
21
|
-
An LLM router that uses an OpenAI model to make routing decisions.
|
22
|
-
"""
|
23
|
-
|
24
|
-
def __init__(
|
25
|
-
self,
|
26
|
-
intents: List[Intent],
|
27
|
-
classification_instruction: str | None = None,
|
28
|
-
context: Optional["Context"] = None,
|
29
|
-
**kwargs,
|
30
|
-
):
|
31
|
-
openai_llm = OpenAIAugmentedLLM(
|
32
|
-
instruction=CLASSIFIER_SYSTEM_INSTRUCTION, context=context
|
33
|
-
)
|
34
|
-
|
35
|
-
super().__init__(
|
36
|
-
llm=openai_llm,
|
37
|
-
intents=intents,
|
38
|
-
classification_instruction=classification_instruction,
|
39
|
-
context=context,
|
40
|
-
**kwargs,
|
41
|
-
)
|
42
|
-
|
43
|
-
@classmethod
|
44
|
-
async def create(
|
45
|
-
cls,
|
46
|
-
intents: List[Intent],
|
47
|
-
classification_instruction: str | None = None,
|
48
|
-
context: Optional["Context"] = None,
|
49
|
-
) -> "OpenAILLMIntentClassifier":
|
50
|
-
"""
|
51
|
-
Factory method to create and initialize a classifier.
|
52
|
-
Use this instead of constructor since we need async initialization.
|
53
|
-
"""
|
54
|
-
instance = cls(
|
55
|
-
intents=intents,
|
56
|
-
classification_instruction=classification_instruction,
|
57
|
-
context=context,
|
58
|
-
)
|
59
|
-
await instance.initialize()
|
60
|
-
return instance
|
File without changes
|