fast-agent-mcp 0.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- fast_agent_mcp-0.0.7.dist-info/METADATA +322 -0
- fast_agent_mcp-0.0.7.dist-info/RECORD +100 -0
- fast_agent_mcp-0.0.7.dist-info/WHEEL +4 -0
- fast_agent_mcp-0.0.7.dist-info/entry_points.txt +5 -0
- fast_agent_mcp-0.0.7.dist-info/licenses/LICENSE +201 -0
- mcp_agent/__init__.py +0 -0
- mcp_agent/agents/__init__.py +0 -0
- mcp_agent/agents/agent.py +277 -0
- mcp_agent/app.py +303 -0
- mcp_agent/cli/__init__.py +0 -0
- mcp_agent/cli/__main__.py +4 -0
- mcp_agent/cli/commands/bootstrap.py +221 -0
- mcp_agent/cli/commands/config.py +11 -0
- mcp_agent/cli/commands/setup.py +229 -0
- mcp_agent/cli/main.py +68 -0
- mcp_agent/cli/terminal.py +24 -0
- mcp_agent/config.py +334 -0
- mcp_agent/console.py +28 -0
- mcp_agent/context.py +251 -0
- mcp_agent/context_dependent.py +48 -0
- mcp_agent/core/fastagent.py +1013 -0
- mcp_agent/eval/__init__.py +0 -0
- mcp_agent/event_progress.py +88 -0
- mcp_agent/executor/__init__.py +0 -0
- mcp_agent/executor/decorator_registry.py +120 -0
- mcp_agent/executor/executor.py +293 -0
- mcp_agent/executor/task_registry.py +34 -0
- mcp_agent/executor/temporal.py +405 -0
- mcp_agent/executor/workflow.py +197 -0
- mcp_agent/executor/workflow_signal.py +325 -0
- mcp_agent/human_input/__init__.py +0 -0
- mcp_agent/human_input/handler.py +49 -0
- mcp_agent/human_input/types.py +58 -0
- mcp_agent/logging/__init__.py +0 -0
- mcp_agent/logging/events.py +123 -0
- mcp_agent/logging/json_serializer.py +163 -0
- mcp_agent/logging/listeners.py +216 -0
- mcp_agent/logging/logger.py +365 -0
- mcp_agent/logging/rich_progress.py +120 -0
- mcp_agent/logging/tracing.py +140 -0
- mcp_agent/logging/transport.py +461 -0
- mcp_agent/mcp/__init__.py +0 -0
- mcp_agent/mcp/gen_client.py +85 -0
- mcp_agent/mcp/mcp_activity.py +18 -0
- mcp_agent/mcp/mcp_agent_client_session.py +242 -0
- mcp_agent/mcp/mcp_agent_server.py +56 -0
- mcp_agent/mcp/mcp_aggregator.py +394 -0
- mcp_agent/mcp/mcp_connection_manager.py +330 -0
- mcp_agent/mcp/stdio.py +104 -0
- mcp_agent/mcp_server_registry.py +275 -0
- mcp_agent/progress_display.py +10 -0
- mcp_agent/resources/examples/decorator/main.py +26 -0
- mcp_agent/resources/examples/decorator/optimizer.py +78 -0
- mcp_agent/resources/examples/decorator/orchestrator.py +68 -0
- mcp_agent/resources/examples/decorator/parallel.py +81 -0
- mcp_agent/resources/examples/decorator/router.py +56 -0
- mcp_agent/resources/examples/decorator/tiny.py +22 -0
- mcp_agent/resources/examples/mcp_researcher/main-evalopt.py +53 -0
- mcp_agent/resources/examples/mcp_researcher/main.py +38 -0
- mcp_agent/telemetry/__init__.py +0 -0
- mcp_agent/telemetry/usage_tracking.py +18 -0
- mcp_agent/workflows/__init__.py +0 -0
- mcp_agent/workflows/embedding/__init__.py +0 -0
- mcp_agent/workflows/embedding/embedding_base.py +61 -0
- mcp_agent/workflows/embedding/embedding_cohere.py +49 -0
- mcp_agent/workflows/embedding/embedding_openai.py +46 -0
- mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +359 -0
- mcp_agent/workflows/intent_classifier/__init__.py +0 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_base.py +120 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +134 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +45 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +45 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +161 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +60 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +60 -0
- mcp_agent/workflows/llm/__init__.py +0 -0
- mcp_agent/workflows/llm/augmented_llm.py +645 -0
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +539 -0
- mcp_agent/workflows/llm/augmented_llm_openai.py +615 -0
- mcp_agent/workflows/llm/llm_selector.py +345 -0
- mcp_agent/workflows/llm/model_factory.py +175 -0
- mcp_agent/workflows/orchestrator/__init__.py +0 -0
- mcp_agent/workflows/orchestrator/orchestrator.py +407 -0
- mcp_agent/workflows/orchestrator/orchestrator_models.py +154 -0
- mcp_agent/workflows/orchestrator/orchestrator_prompts.py +113 -0
- mcp_agent/workflows/parallel/__init__.py +0 -0
- mcp_agent/workflows/parallel/fan_in.py +350 -0
- mcp_agent/workflows/parallel/fan_out.py +187 -0
- mcp_agent/workflows/parallel/parallel_llm.py +141 -0
- mcp_agent/workflows/router/__init__.py +0 -0
- mcp_agent/workflows/router/router_base.py +276 -0
- mcp_agent/workflows/router/router_embedding.py +240 -0
- mcp_agent/workflows/router/router_embedding_cohere.py +59 -0
- mcp_agent/workflows/router/router_embedding_openai.py +59 -0
- mcp_agent/workflows/router/router_llm.py +301 -0
- mcp_agent/workflows/swarm/__init__.py +0 -0
- mcp_agent/workflows/swarm/swarm.py +320 -0
- mcp_agent/workflows/swarm/swarm_anthropic.py +42 -0
- mcp_agent/workflows/swarm/swarm_openai.py +41 -0
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from typing import Dict, List, Optional, TYPE_CHECKING
|
|
3
|
+
from pydantic import BaseModel, Field
|
|
4
|
+
|
|
5
|
+
if TYPE_CHECKING:
|
|
6
|
+
from mcp_agent.context import Context
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class Intent(BaseModel):
|
|
10
|
+
"""A class that represents a single intent category"""
|
|
11
|
+
|
|
12
|
+
name: str
|
|
13
|
+
"""The name of the intent"""
|
|
14
|
+
|
|
15
|
+
description: str | None = None
|
|
16
|
+
"""A description of what this intent represents"""
|
|
17
|
+
|
|
18
|
+
examples: List[str] = Field(default_factory=list)
|
|
19
|
+
"""Example phrases or requests that match this intent"""
|
|
20
|
+
|
|
21
|
+
metadata: Dict[str, str] = Field(default_factory=dict)
|
|
22
|
+
"""Additional metadata about the intent that might be useful for classification"""
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class IntentClassificationResult(BaseModel):
|
|
26
|
+
"""A class that represents the result of intent classification"""
|
|
27
|
+
|
|
28
|
+
intent: str
|
|
29
|
+
"""The classified intent name"""
|
|
30
|
+
|
|
31
|
+
p_score: float | None = None
|
|
32
|
+
"""
|
|
33
|
+
The probability score (i.e. 0->1) of the classification.
|
|
34
|
+
This is optional and may only be provided if the classifier is probabilistic (e.g. a probabilistic binary classifier).
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
extracted_entities: Dict[str, str] = Field(default_factory=dict)
|
|
38
|
+
"""Any entities or parameters extracted from the input request that are relevant to the intent"""
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class IntentClassifier(ABC):
|
|
42
|
+
"""
|
|
43
|
+
Base class for intent classification. This can be implemented using different approaches
|
|
44
|
+
like LLMs, embedding models, traditional ML classification models, or rule-based systems.
|
|
45
|
+
|
|
46
|
+
When to use this:
|
|
47
|
+
- When you need to understand the user's intention before routing or processing
|
|
48
|
+
- When you want to extract structured information from natural language inputs
|
|
49
|
+
- When you need to handle multiple related but distinct types of requests
|
|
50
|
+
|
|
51
|
+
Examples:
|
|
52
|
+
- Classifying customer service requests (complaint, question, feedback)
|
|
53
|
+
- Understanding user commands in a chat interface
|
|
54
|
+
- Determining the type of analysis requested for a dataset
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
def __init__(
|
|
58
|
+
self, intents: List[Intent], context: Optional["Context"] = None, **kwargs
|
|
59
|
+
):
|
|
60
|
+
super().__init__(context=context, **kwargs)
|
|
61
|
+
self.intents = {intent.name: intent for intent in intents}
|
|
62
|
+
self.initialized: bool = False
|
|
63
|
+
|
|
64
|
+
if not self.intents:
|
|
65
|
+
raise ValueError("At least one intent must be provided")
|
|
66
|
+
|
|
67
|
+
@abstractmethod
|
|
68
|
+
async def classify(
|
|
69
|
+
self, request: str, top_k: int = 1
|
|
70
|
+
) -> List[IntentClassificationResult]:
|
|
71
|
+
"""
|
|
72
|
+
Classify the input request into one or more intents.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
request: The input text to classify
|
|
76
|
+
top_k: Maximum number of top intent matches to return. May return fewer.
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
List of classification results, ordered by confidence
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
async def initialize(self):
|
|
83
|
+
"""Initialize the classifier. Override this method if needed."""
|
|
84
|
+
self.initialized = True
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
# Example
|
|
88
|
+
# Define some intents
|
|
89
|
+
# intents = [
|
|
90
|
+
# Intent(
|
|
91
|
+
# name="schedule_meeting",
|
|
92
|
+
# description="Schedule or set up a meeting or appointment",
|
|
93
|
+
# examples=[
|
|
94
|
+
# "Can you schedule a meeting with John?",
|
|
95
|
+
# "Set up a call for next week",
|
|
96
|
+
# "I need to arrange a meeting"
|
|
97
|
+
# ]
|
|
98
|
+
# ),
|
|
99
|
+
# Intent(
|
|
100
|
+
# name="check_calendar",
|
|
101
|
+
# description="Check calendar availability or existing appointments",
|
|
102
|
+
# examples=[
|
|
103
|
+
# "What meetings do I have today?",
|
|
104
|
+
# "Show me my calendar",
|
|
105
|
+
# "Am I free tomorrow afternoon?"
|
|
106
|
+
# ]
|
|
107
|
+
# )
|
|
108
|
+
# ]
|
|
109
|
+
|
|
110
|
+
# # Initialize with OpenAI embeddings
|
|
111
|
+
# classifier = OpenAIEmbeddingIntentClassifier(intents=intents, model="text-embedding-3-small")
|
|
112
|
+
|
|
113
|
+
# # Or use Cohere embeddings
|
|
114
|
+
# classifier = OpenAIEmbeddingIntentClassifier(intents=intents, model="embed-multilingual-v3.0")
|
|
115
|
+
|
|
116
|
+
# # Classify some text
|
|
117
|
+
# results = await classifier.classify(
|
|
118
|
+
# request="Can you set up a meeting with Sarah for tomorrow?"
|
|
119
|
+
# top_k=3
|
|
120
|
+
# )
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
from typing import List, Optional, TYPE_CHECKING
|
|
2
|
+
|
|
3
|
+
from numpy import mean
|
|
4
|
+
|
|
5
|
+
from mcp_agent.workflows.embedding.embedding_base import (
|
|
6
|
+
FloatArray,
|
|
7
|
+
EmbeddingModel,
|
|
8
|
+
compute_confidence,
|
|
9
|
+
compute_similarity_scores,
|
|
10
|
+
)
|
|
11
|
+
from mcp_agent.workflows.intent_classifier.intent_classifier_base import (
|
|
12
|
+
Intent,
|
|
13
|
+
IntentClassifier,
|
|
14
|
+
IntentClassificationResult,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from mcp_agent.context import Context
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class EmbeddingIntent(Intent):
|
|
22
|
+
"""An intent with embedding information"""
|
|
23
|
+
|
|
24
|
+
embedding: FloatArray | None = None
|
|
25
|
+
"""Pre-computed embedding for this intent"""
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class EmbeddingIntentClassifier(IntentClassifier):
|
|
29
|
+
"""
|
|
30
|
+
An intent classifier that uses embedding similarity for classification.
|
|
31
|
+
Supports different embedding models through the EmbeddingModel interface.
|
|
32
|
+
|
|
33
|
+
Features:
|
|
34
|
+
- Semantic similarity based classification
|
|
35
|
+
- Support for example-based learning
|
|
36
|
+
- Flexible embedding model support
|
|
37
|
+
- Multiple similarity computation strategies
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __init__(
|
|
41
|
+
self,
|
|
42
|
+
intents: List[Intent],
|
|
43
|
+
embedding_model: EmbeddingModel,
|
|
44
|
+
context: Optional["Context"] = None,
|
|
45
|
+
**kwargs,
|
|
46
|
+
):
|
|
47
|
+
super().__init__(intents=intents, context=context, **kwargs)
|
|
48
|
+
self.embedding_model = embedding_model
|
|
49
|
+
self.initialized = False
|
|
50
|
+
|
|
51
|
+
@classmethod
|
|
52
|
+
async def create(
|
|
53
|
+
cls,
|
|
54
|
+
intents: List[Intent],
|
|
55
|
+
embedding_model: EmbeddingModel,
|
|
56
|
+
) -> "EmbeddingIntentClassifier":
|
|
57
|
+
"""
|
|
58
|
+
Factory method to create and initialize a classifier.
|
|
59
|
+
Use this instead of constructor since we need async initialization.
|
|
60
|
+
"""
|
|
61
|
+
instance = cls(
|
|
62
|
+
intents=intents,
|
|
63
|
+
embedding_model=embedding_model,
|
|
64
|
+
)
|
|
65
|
+
await instance.initialize()
|
|
66
|
+
return instance
|
|
67
|
+
|
|
68
|
+
async def initialize(self):
|
|
69
|
+
"""
|
|
70
|
+
Precompute embeddings for all intents by combining their
|
|
71
|
+
descriptions and examples
|
|
72
|
+
"""
|
|
73
|
+
if self.initialized:
|
|
74
|
+
return
|
|
75
|
+
|
|
76
|
+
for intent in self.intents.values():
|
|
77
|
+
# Combine all text for a rich intent representation
|
|
78
|
+
intent_texts = [intent.name, intent.description] + intent.examples
|
|
79
|
+
|
|
80
|
+
# Get embeddings for all texts
|
|
81
|
+
embeddings = await self.embedding_model.embed(intent_texts)
|
|
82
|
+
|
|
83
|
+
# Use mean pooling to combine embeddings
|
|
84
|
+
embedding = mean(embeddings, axis=0)
|
|
85
|
+
|
|
86
|
+
# Create intents with embeddings
|
|
87
|
+
self.intents[intent.name] = EmbeddingIntent(
|
|
88
|
+
**intent,
|
|
89
|
+
embedding=embedding,
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
self.initialized = True
|
|
93
|
+
|
|
94
|
+
async def classify(
|
|
95
|
+
self, request: str, top_k: int = 1
|
|
96
|
+
) -> List[IntentClassificationResult]:
|
|
97
|
+
"""
|
|
98
|
+
Classify the input text into one or more intents
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
text: Input text to classify
|
|
102
|
+
top_k: Maximum number of top matches to return
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
List of classification results, ordered by confidence
|
|
106
|
+
"""
|
|
107
|
+
if not self.initialized:
|
|
108
|
+
await self.initialize()
|
|
109
|
+
|
|
110
|
+
# Get embedding for input
|
|
111
|
+
embeddings = await self.embedding_model.embed([request])
|
|
112
|
+
request_embedding = embeddings[0] # Take first since we only embedded one text
|
|
113
|
+
|
|
114
|
+
results: List[IntentClassificationResult] = []
|
|
115
|
+
for intent_name, intent in self.intents.items():
|
|
116
|
+
if intent.embedding is None:
|
|
117
|
+
continue
|
|
118
|
+
|
|
119
|
+
similarity_scores = compute_similarity_scores(
|
|
120
|
+
request_embedding, intent.embedding
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
# Compute overall confidence score
|
|
124
|
+
confidence = compute_confidence(similarity_scores)
|
|
125
|
+
|
|
126
|
+
results.append(
|
|
127
|
+
IntentClassificationResult(
|
|
128
|
+
intent=intent_name,
|
|
129
|
+
p_score=confidence,
|
|
130
|
+
)
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
results.sort(key=lambda x: x.p_score, reverse=True)
|
|
134
|
+
return results[:top_k]
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from typing import List, Optional, TYPE_CHECKING
|
|
2
|
+
|
|
3
|
+
from mcp_agent.workflows.embedding.embedding_cohere import CohereEmbeddingModel
|
|
4
|
+
from mcp_agent.workflows.intent_classifier.intent_classifier_base import Intent
|
|
5
|
+
from mcp_agent.workflows.intent_classifier.intent_classifier_embedding import (
|
|
6
|
+
EmbeddingIntentClassifier,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from mcp_agent.context import Context
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class CohereEmbeddingIntentClassifier(EmbeddingIntentClassifier):
|
|
14
|
+
"""
|
|
15
|
+
An intent classifier that uses Cohere's embedding models for computing semantic simiarity based classifications.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
intents: List[Intent],
|
|
21
|
+
embedding_model: CohereEmbeddingModel | None = None,
|
|
22
|
+
context: Optional["Context"] = None,
|
|
23
|
+
**kwargs,
|
|
24
|
+
):
|
|
25
|
+
embedding_model = embedding_model or CohereEmbeddingModel()
|
|
26
|
+
super().__init__(
|
|
27
|
+
embedding_model=embedding_model, intents=intents, context=context, **kwargs
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
@classmethod
|
|
31
|
+
async def create(
|
|
32
|
+
cls,
|
|
33
|
+
intents: List[Intent],
|
|
34
|
+
embedding_model: CohereEmbeddingModel | None = None,
|
|
35
|
+
context: Optional["Context"] = None,
|
|
36
|
+
) -> "CohereEmbeddingIntentClassifier":
|
|
37
|
+
"""
|
|
38
|
+
Factory method to create and initialize a classifier.
|
|
39
|
+
Use this instead of constructor since we need async initialization.
|
|
40
|
+
"""
|
|
41
|
+
instance = cls(
|
|
42
|
+
intents=intents, embedding_model=embedding_model, context=context
|
|
43
|
+
)
|
|
44
|
+
await instance.initialize()
|
|
45
|
+
return instance
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from typing import List, Optional, TYPE_CHECKING
|
|
2
|
+
|
|
3
|
+
from mcp_agent.workflows.embedding.embedding_openai import OpenAIEmbeddingModel
|
|
4
|
+
from mcp_agent.workflows.intent_classifier.intent_classifier_base import Intent
|
|
5
|
+
from mcp_agent.workflows.intent_classifier.intent_classifier_embedding import (
|
|
6
|
+
EmbeddingIntentClassifier,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from mcp_agent.context import Context
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class OpenAIEmbeddingIntentClassifier(EmbeddingIntentClassifier):
|
|
14
|
+
"""
|
|
15
|
+
An intent classifier that uses OpenAI's embedding models for computing semantic simiarity based classifications.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(
|
|
19
|
+
self,
|
|
20
|
+
intents: List[Intent],
|
|
21
|
+
embedding_model: OpenAIEmbeddingModel | None = None,
|
|
22
|
+
context: Optional["Context"] = None,
|
|
23
|
+
**kwargs,
|
|
24
|
+
):
|
|
25
|
+
embedding_model = embedding_model or OpenAIEmbeddingModel()
|
|
26
|
+
super().__init__(
|
|
27
|
+
embedding_model=embedding_model, intents=intents, context=context, **kwargs
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
@classmethod
|
|
31
|
+
async def create(
|
|
32
|
+
cls,
|
|
33
|
+
intents: List[Intent],
|
|
34
|
+
embedding_model: OpenAIEmbeddingModel | None = None,
|
|
35
|
+
context: Optional["Context"] = None,
|
|
36
|
+
) -> "OpenAIEmbeddingIntentClassifier":
|
|
37
|
+
"""
|
|
38
|
+
Factory method to create and initialize a classifier.
|
|
39
|
+
Use this instead of constructor since we need async initialization.
|
|
40
|
+
"""
|
|
41
|
+
instance = cls(
|
|
42
|
+
intents=intents, embedding_model=embedding_model, context=context
|
|
43
|
+
)
|
|
44
|
+
await instance.initialize()
|
|
45
|
+
return instance
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
from typing import List, Literal, Optional, TYPE_CHECKING
|
|
2
|
+
from pydantic import BaseModel
|
|
3
|
+
|
|
4
|
+
from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM
|
|
5
|
+
from mcp_agent.workflows.intent_classifier.intent_classifier_base import (
|
|
6
|
+
Intent,
|
|
7
|
+
IntentClassifier,
|
|
8
|
+
IntentClassificationResult,
|
|
9
|
+
)
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from mcp_agent.context import Context
|
|
13
|
+
|
|
14
|
+
DEFAULT_INTENT_CLASSIFICATION_INSTRUCTION = """
|
|
15
|
+
You are a precise intent classifier that analyzes user requests to determine their intended action or purpose.
|
|
16
|
+
Below are the available intents with their descriptions and examples:
|
|
17
|
+
|
|
18
|
+
{context}
|
|
19
|
+
|
|
20
|
+
Your task is to analyze the following request and determine the most likely intent(s). Consider:
|
|
21
|
+
- How well the request matches the intent descriptions and examples
|
|
22
|
+
- Any specific entities or parameters that should be extracted
|
|
23
|
+
- The confidence level in the classification
|
|
24
|
+
|
|
25
|
+
Request: {request}
|
|
26
|
+
|
|
27
|
+
Respond in JSON format:
|
|
28
|
+
{{
|
|
29
|
+
"classifications": [
|
|
30
|
+
{{
|
|
31
|
+
"intent": <intent name>,
|
|
32
|
+
"confidence": <float between 0 and 1>,
|
|
33
|
+
"extracted_entities": {{
|
|
34
|
+
"entity_name": "entity_value"
|
|
35
|
+
}},
|
|
36
|
+
"reasoning": <brief explanation>
|
|
37
|
+
}}
|
|
38
|
+
]
|
|
39
|
+
}}
|
|
40
|
+
|
|
41
|
+
Return up to {top_k} most likely intents. Only include intents with reasonable confidence (>0.5).
|
|
42
|
+
If no intents match well, return an empty list.
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class LLMIntentClassificationResult(IntentClassificationResult):
|
|
47
|
+
"""The result of intent classification using an LLM."""
|
|
48
|
+
|
|
49
|
+
confidence: Literal["low", "medium", "high"]
|
|
50
|
+
"""Confidence level of the classification"""
|
|
51
|
+
|
|
52
|
+
reasoning: str | None = None
|
|
53
|
+
"""Optional explanation of why this intent was chosen"""
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class StructuredIntentResponse(BaseModel):
|
|
57
|
+
"""The complete structured response from the LLM"""
|
|
58
|
+
|
|
59
|
+
classifications: List[LLMIntentClassificationResult]
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class LLMIntentClassifier(IntentClassifier):
|
|
63
|
+
"""
|
|
64
|
+
An intent classifier that uses an LLM to determine the user's intent.
|
|
65
|
+
Particularly useful when you need:
|
|
66
|
+
- Flexible understanding of natural language
|
|
67
|
+
- Detailed reasoning about classifications
|
|
68
|
+
- Entity extraction alongside classification
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
def __init__(
|
|
72
|
+
self,
|
|
73
|
+
llm: AugmentedLLM,
|
|
74
|
+
intents: List[Intent],
|
|
75
|
+
classification_instruction: str | None = None,
|
|
76
|
+
context: Optional["Context"] = None,
|
|
77
|
+
**kwargs,
|
|
78
|
+
):
|
|
79
|
+
super().__init__(intents=intents, context=context, **kwargs)
|
|
80
|
+
self.llm = llm
|
|
81
|
+
self.classification_instruction = classification_instruction
|
|
82
|
+
|
|
83
|
+
@classmethod
|
|
84
|
+
async def create(
|
|
85
|
+
cls,
|
|
86
|
+
llm: AugmentedLLM,
|
|
87
|
+
intents: List[Intent],
|
|
88
|
+
classification_instruction: str | None = None,
|
|
89
|
+
) -> "LLMIntentClassifier":
|
|
90
|
+
"""
|
|
91
|
+
Factory method to create and initialize a classifier.
|
|
92
|
+
Use this instead of constructor since we need async initialization.
|
|
93
|
+
"""
|
|
94
|
+
instance = cls(
|
|
95
|
+
llm=llm,
|
|
96
|
+
intents=intents,
|
|
97
|
+
classification_instruction=classification_instruction,
|
|
98
|
+
)
|
|
99
|
+
await instance.initialize()
|
|
100
|
+
return instance
|
|
101
|
+
|
|
102
|
+
async def classify(
|
|
103
|
+
self, request: str, top_k: int = 1
|
|
104
|
+
) -> List[LLMIntentClassificationResult]:
|
|
105
|
+
if not self.initialized:
|
|
106
|
+
self.initialize()
|
|
107
|
+
|
|
108
|
+
classification_instruction = (
|
|
109
|
+
self.classification_instruction or DEFAULT_INTENT_CLASSIFICATION_INSTRUCTION
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
# Generate the context with intent descriptions and examples
|
|
113
|
+
context = self._generate_context()
|
|
114
|
+
|
|
115
|
+
# Format the prompt with all the necessary information
|
|
116
|
+
prompt = classification_instruction.format(
|
|
117
|
+
context=context, request=request, top_k=top_k
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
# Get classification from LLM
|
|
121
|
+
response = await self.llm.generate_structured(
|
|
122
|
+
message=prompt, response_model=StructuredIntentResponse
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
if not response or not response.classifications:
|
|
126
|
+
return []
|
|
127
|
+
|
|
128
|
+
results = []
|
|
129
|
+
for classification in response.classifications:
|
|
130
|
+
intent = self.intents.get(classification.intent)
|
|
131
|
+
if not intent:
|
|
132
|
+
# Skip invalid categories
|
|
133
|
+
# TODO: saqadri - log or raise an error
|
|
134
|
+
continue
|
|
135
|
+
|
|
136
|
+
results.append(classification)
|
|
137
|
+
|
|
138
|
+
return results[:top_k]
|
|
139
|
+
|
|
140
|
+
def _generate_context(self) -> str:
|
|
141
|
+
"""Generate a formatted context string describing all intents"""
|
|
142
|
+
context_parts = []
|
|
143
|
+
|
|
144
|
+
for idx, intent in enumerate(self.intents.values(), 1):
|
|
145
|
+
description = (
|
|
146
|
+
f"{idx}. Intent: {intent.name}\nDescription: {intent.description}"
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
if intent.examples:
|
|
150
|
+
examples = "\n".join(f"- {example}" for example in intent.examples)
|
|
151
|
+
description += f"\nExamples:\n{examples}"
|
|
152
|
+
|
|
153
|
+
if intent.metadata:
|
|
154
|
+
metadata = "\n".join(
|
|
155
|
+
f"- {key}: {value}" for key, value in intent.metadata.items()
|
|
156
|
+
)
|
|
157
|
+
description += f"\nAdditional Information:\n{metadata}"
|
|
158
|
+
|
|
159
|
+
context_parts.append(description)
|
|
160
|
+
|
|
161
|
+
return "\n\n".join(context_parts)
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
from typing import List, Optional, TYPE_CHECKING
|
|
2
|
+
|
|
3
|
+
from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM
|
|
4
|
+
from mcp_agent.workflows.intent_classifier.intent_classifier_base import Intent
|
|
5
|
+
from mcp_agent.workflows.intent_classifier.intent_classifier_llm import (
|
|
6
|
+
LLMIntentClassifier,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from mcp_agent.context import Context
|
|
11
|
+
|
|
12
|
+
CLASSIFIER_SYSTEM_INSTRUCTION = """
|
|
13
|
+
You are a precise intent classifier that analyzes input requests to determine their intended action or purpose.
|
|
14
|
+
You are provided with a request and a list of intents to choose from.
|
|
15
|
+
You can choose one or more intents, or choose none if no intent is appropriate.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class AnthropicLLMIntentClassifier(LLMIntentClassifier):
|
|
20
|
+
"""
|
|
21
|
+
An LLM router that uses an Anthropic model to make routing decisions.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(
|
|
25
|
+
self,
|
|
26
|
+
intents: List[Intent],
|
|
27
|
+
classification_instruction: str | None = None,
|
|
28
|
+
context: Optional["Context"] = None,
|
|
29
|
+
**kwargs,
|
|
30
|
+
):
|
|
31
|
+
anthropic_llm = AnthropicAugmentedLLM(
|
|
32
|
+
instruction=CLASSIFIER_SYSTEM_INSTRUCTION, context=context
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
super().__init__(
|
|
36
|
+
llm=anthropic_llm,
|
|
37
|
+
intents=intents,
|
|
38
|
+
classification_instruction=classification_instruction,
|
|
39
|
+
context=context,
|
|
40
|
+
**kwargs,
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
@classmethod
|
|
44
|
+
async def create(
|
|
45
|
+
cls,
|
|
46
|
+
intents: List[Intent],
|
|
47
|
+
classification_instruction: str | None = None,
|
|
48
|
+
context: Optional["Context"] = None,
|
|
49
|
+
) -> "AnthropicLLMIntentClassifier":
|
|
50
|
+
"""
|
|
51
|
+
Factory method to create and initialize a classifier.
|
|
52
|
+
Use this instead of constructor since we need async initialization.
|
|
53
|
+
"""
|
|
54
|
+
instance = cls(
|
|
55
|
+
intents=intents,
|
|
56
|
+
classification_instruction=classification_instruction,
|
|
57
|
+
context=context,
|
|
58
|
+
)
|
|
59
|
+
await instance.initialize()
|
|
60
|
+
return instance
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
from typing import List, Optional, TYPE_CHECKING
|
|
2
|
+
|
|
3
|
+
from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM
|
|
4
|
+
from mcp_agent.workflows.intent_classifier.intent_classifier_base import Intent
|
|
5
|
+
from mcp_agent.workflows.intent_classifier.intent_classifier_llm import (
|
|
6
|
+
LLMIntentClassifier,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from mcp_agent.context import Context
|
|
11
|
+
|
|
12
|
+
CLASSIFIER_SYSTEM_INSTRUCTION = """
|
|
13
|
+
You are a precise intent classifier that analyzes input requests to determine their intended action or purpose.
|
|
14
|
+
You are provided with a request and a list of intents to choose from.
|
|
15
|
+
You can choose one or more intents, or choose none if no intent is appropriate.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class OpenAILLMIntentClassifier(LLMIntentClassifier):
|
|
20
|
+
"""
|
|
21
|
+
An LLM router that uses an OpenAI model to make routing decisions.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(
|
|
25
|
+
self,
|
|
26
|
+
intents: List[Intent],
|
|
27
|
+
classification_instruction: str | None = None,
|
|
28
|
+
context: Optional["Context"] = None,
|
|
29
|
+
**kwargs,
|
|
30
|
+
):
|
|
31
|
+
openai_llm = OpenAIAugmentedLLM(
|
|
32
|
+
instruction=CLASSIFIER_SYSTEM_INSTRUCTION, context=context
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
super().__init__(
|
|
36
|
+
llm=openai_llm,
|
|
37
|
+
intents=intents,
|
|
38
|
+
classification_instruction=classification_instruction,
|
|
39
|
+
context=context,
|
|
40
|
+
**kwargs,
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
@classmethod
|
|
44
|
+
async def create(
|
|
45
|
+
cls,
|
|
46
|
+
intents: List[Intent],
|
|
47
|
+
classification_instruction: str | None = None,
|
|
48
|
+
context: Optional["Context"] = None,
|
|
49
|
+
) -> "OpenAILLMIntentClassifier":
|
|
50
|
+
"""
|
|
51
|
+
Factory method to create and initialize a classifier.
|
|
52
|
+
Use this instead of constructor since we need async initialization.
|
|
53
|
+
"""
|
|
54
|
+
instance = cls(
|
|
55
|
+
intents=intents,
|
|
56
|
+
classification_instruction=classification_instruction,
|
|
57
|
+
context=context,
|
|
58
|
+
)
|
|
59
|
+
await instance.initialize()
|
|
60
|
+
return instance
|
|
File without changes
|