fast-agent-mcp 0.1.11__py3-none-any.whl → 0.1.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (131) hide show
  1. {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/METADATA +1 -1
  2. fast_agent_mcp-0.1.13.dist-info/RECORD +164 -0
  3. mcp_agent/agents/agent.py +37 -102
  4. mcp_agent/app.py +16 -27
  5. mcp_agent/cli/commands/bootstrap.py +22 -52
  6. mcp_agent/cli/commands/config.py +4 -4
  7. mcp_agent/cli/commands/setup.py +11 -26
  8. mcp_agent/cli/main.py +6 -9
  9. mcp_agent/cli/terminal.py +2 -2
  10. mcp_agent/config.py +1 -5
  11. mcp_agent/context.py +13 -26
  12. mcp_agent/context_dependent.py +3 -7
  13. mcp_agent/core/agent_app.py +46 -122
  14. mcp_agent/core/agent_types.py +29 -2
  15. mcp_agent/core/agent_utils.py +3 -5
  16. mcp_agent/core/decorators.py +6 -14
  17. mcp_agent/core/enhanced_prompt.py +25 -52
  18. mcp_agent/core/error_handling.py +1 -1
  19. mcp_agent/core/exceptions.py +8 -8
  20. mcp_agent/core/factory.py +30 -72
  21. mcp_agent/core/fastagent.py +48 -88
  22. mcp_agent/core/mcp_content.py +10 -19
  23. mcp_agent/core/prompt.py +8 -15
  24. mcp_agent/core/proxies.py +34 -25
  25. mcp_agent/core/request_params.py +46 -0
  26. mcp_agent/core/types.py +6 -6
  27. mcp_agent/core/validation.py +16 -16
  28. mcp_agent/executor/decorator_registry.py +11 -23
  29. mcp_agent/executor/executor.py +8 -17
  30. mcp_agent/executor/task_registry.py +2 -4
  31. mcp_agent/executor/temporal.py +28 -74
  32. mcp_agent/executor/workflow.py +3 -5
  33. mcp_agent/executor/workflow_signal.py +17 -29
  34. mcp_agent/human_input/handler.py +4 -9
  35. mcp_agent/human_input/types.py +2 -3
  36. mcp_agent/logging/events.py +1 -5
  37. mcp_agent/logging/json_serializer.py +7 -6
  38. mcp_agent/logging/listeners.py +20 -23
  39. mcp_agent/logging/logger.py +15 -17
  40. mcp_agent/logging/rich_progress.py +10 -8
  41. mcp_agent/logging/tracing.py +4 -6
  42. mcp_agent/logging/transport.py +24 -24
  43. mcp_agent/mcp/gen_client.py +4 -12
  44. mcp_agent/mcp/interfaces.py +107 -88
  45. mcp_agent/mcp/mcp_agent_client_session.py +11 -19
  46. mcp_agent/mcp/mcp_agent_server.py +8 -10
  47. mcp_agent/mcp/mcp_aggregator.py +49 -122
  48. mcp_agent/mcp/mcp_connection_manager.py +16 -37
  49. mcp_agent/mcp/prompt_message_multipart.py +12 -18
  50. mcp_agent/mcp/prompt_serialization.py +13 -38
  51. mcp_agent/mcp/prompts/prompt_load.py +99 -0
  52. mcp_agent/mcp/prompts/prompt_server.py +21 -128
  53. mcp_agent/mcp/prompts/prompt_template.py +20 -42
  54. mcp_agent/mcp/resource_utils.py +8 -17
  55. mcp_agent/mcp/sampling.py +62 -64
  56. mcp_agent/mcp/stdio.py +11 -8
  57. mcp_agent/mcp_server/__init__.py +1 -1
  58. mcp_agent/mcp_server/agent_server.py +10 -17
  59. mcp_agent/mcp_server_registry.py +13 -35
  60. mcp_agent/resources/examples/data-analysis/analysis-campaign.py +1 -1
  61. mcp_agent/resources/examples/data-analysis/analysis.py +1 -1
  62. mcp_agent/resources/examples/data-analysis/slides.py +110 -0
  63. mcp_agent/resources/examples/internal/agent.py +2 -1
  64. mcp_agent/resources/examples/internal/job.py +2 -1
  65. mcp_agent/resources/examples/internal/prompt_category.py +1 -1
  66. mcp_agent/resources/examples/internal/prompt_sizing.py +3 -5
  67. mcp_agent/resources/examples/internal/sizer.py +2 -1
  68. mcp_agent/resources/examples/internal/social.py +2 -1
  69. mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +1 -1
  70. mcp_agent/resources/examples/prompting/__init__.py +1 -1
  71. mcp_agent/resources/examples/prompting/agent.py +2 -1
  72. mcp_agent/resources/examples/prompting/image_server.py +5 -11
  73. mcp_agent/resources/examples/researcher/researcher-eval.py +1 -1
  74. mcp_agent/resources/examples/researcher/researcher-imp.py +3 -4
  75. mcp_agent/resources/examples/researcher/researcher.py +2 -1
  76. mcp_agent/resources/examples/workflows/agent_build.py +2 -1
  77. mcp_agent/resources/examples/workflows/chaining.py +2 -1
  78. mcp_agent/resources/examples/workflows/evaluator.py +2 -1
  79. mcp_agent/resources/examples/workflows/human_input.py +2 -1
  80. mcp_agent/resources/examples/workflows/orchestrator.py +2 -1
  81. mcp_agent/resources/examples/workflows/parallel.py +2 -1
  82. mcp_agent/resources/examples/workflows/router.py +2 -1
  83. mcp_agent/resources/examples/workflows/sse.py +1 -1
  84. mcp_agent/telemetry/usage_tracking.py +2 -1
  85. mcp_agent/ui/console_display.py +17 -41
  86. mcp_agent/workflows/embedding/embedding_base.py +1 -4
  87. mcp_agent/workflows/embedding/embedding_cohere.py +2 -2
  88. mcp_agent/workflows/embedding/embedding_openai.py +4 -13
  89. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +23 -57
  90. mcp_agent/workflows/intent_classifier/intent_classifier_base.py +5 -8
  91. mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +7 -11
  92. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +4 -8
  93. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +4 -8
  94. mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +11 -22
  95. mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +3 -3
  96. mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +4 -6
  97. mcp_agent/workflows/llm/anthropic_utils.py +8 -29
  98. mcp_agent/workflows/llm/augmented_llm.py +94 -332
  99. mcp_agent/workflows/llm/augmented_llm_anthropic.py +43 -76
  100. mcp_agent/workflows/llm/augmented_llm_openai.py +46 -100
  101. mcp_agent/workflows/llm/augmented_llm_passthrough.py +42 -20
  102. mcp_agent/workflows/llm/augmented_llm_playback.py +8 -6
  103. mcp_agent/workflows/llm/memory.py +103 -0
  104. mcp_agent/workflows/llm/model_factory.py +9 -21
  105. mcp_agent/workflows/llm/openai_utils.py +1 -1
  106. mcp_agent/workflows/llm/prompt_utils.py +39 -27
  107. mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +246 -184
  108. mcp_agent/workflows/llm/providers/multipart_converter_openai.py +212 -202
  109. mcp_agent/workflows/llm/providers/openai_multipart.py +19 -61
  110. mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +11 -212
  111. mcp_agent/workflows/llm/providers/sampling_converter_openai.py +13 -215
  112. mcp_agent/workflows/llm/sampling_converter.py +117 -0
  113. mcp_agent/workflows/llm/sampling_format_converter.py +12 -29
  114. mcp_agent/workflows/orchestrator/orchestrator.py +24 -67
  115. mcp_agent/workflows/orchestrator/orchestrator_models.py +14 -40
  116. mcp_agent/workflows/parallel/fan_in.py +17 -47
  117. mcp_agent/workflows/parallel/fan_out.py +6 -12
  118. mcp_agent/workflows/parallel/parallel_llm.py +9 -26
  119. mcp_agent/workflows/router/router_base.py +29 -59
  120. mcp_agent/workflows/router/router_embedding.py +11 -25
  121. mcp_agent/workflows/router/router_embedding_cohere.py +2 -2
  122. mcp_agent/workflows/router/router_embedding_openai.py +2 -2
  123. mcp_agent/workflows/router/router_llm.py +12 -28
  124. mcp_agent/workflows/swarm/swarm.py +20 -48
  125. mcp_agent/workflows/swarm/swarm_anthropic.py +2 -2
  126. mcp_agent/workflows/swarm/swarm_openai.py +2 -2
  127. fast_agent_mcp-0.1.11.dist-info/RECORD +0 -160
  128. mcp_agent/workflows/llm/llm_selector.py +0 -345
  129. {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/WHEEL +0 -0
  130. {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/entry_points.txt +0 -0
  131. {fast_agent_mcp-0.1.11.dist-info → fast_agent_mcp-0.1.13.dist-info}/licenses/LICENSE +0 -0
@@ -1,8 +1,12 @@
1
1
  import contextlib
2
2
  from enum import Enum
3
- from typing import Callable, List, Optional, Type, TYPE_CHECKING
3
+ from typing import TYPE_CHECKING, Callable, List, Optional, Type
4
+
4
5
  from pydantic import BaseModel, Field
5
6
 
7
+ from mcp_agent.agents.agent import Agent
8
+ from mcp_agent.core.agent_types import AgentConfig
9
+ from mcp_agent.logging.logger import get_logger
6
10
  from mcp_agent.workflows.llm.augmented_llm import (
7
11
  AugmentedLLM,
8
12
  MessageParamT,
@@ -10,8 +14,6 @@ from mcp_agent.workflows.llm.augmented_llm import (
10
14
  ModelT,
11
15
  RequestParams,
12
16
  )
13
- from mcp_agent.agents.agent import Agent, AgentConfig
14
- from mcp_agent.logging.logger import get_logger
15
17
  from mcp_agent.workflows.llm.augmented_llm_passthrough import PassthroughLLM
16
18
 
17
19
  if TYPE_CHECKING:
@@ -33,15 +35,9 @@ class EvaluationResult(BaseModel):
33
35
  """Model representing the evaluation result from the evaluator LLM"""
34
36
 
35
37
  rating: QualityRating = Field(description="Quality rating of the response")
36
- feedback: str = Field(
37
- description="Specific feedback and suggestions for improvement"
38
- )
39
- needs_improvement: bool = Field(
40
- description="Whether the output needs further improvement"
41
- )
42
- focus_areas: List[str] = Field(
43
- default_factory=list, description="Specific areas to focus on in next iteration"
44
- )
38
+ feedback: str = Field(description="Specific feedback and suggestions for improvement")
39
+ needs_improvement: bool = Field(description="Whether the output needs further improvement")
40
+ focus_areas: List[str] = Field(default_factory=list, description="Specific areas to focus on in next iteration")
45
41
 
46
42
 
47
43
  class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
@@ -68,14 +64,13 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
68
64
  def _initialize_default_params(self, kwargs: dict) -> RequestParams:
69
65
  """Initialize default parameters using the workflow's settings."""
70
66
  return RequestParams(
71
- modelPreferences=self.model_preferences,
72
67
  systemPrompt=self.instruction,
73
68
  parallel_tool_calls=True,
74
69
  max_iterations=10,
75
70
  use_history=self.generator_use_history, # Use generator's history setting
76
71
  )
77
72
 
78
- def _init_request_params(self):
73
+ def _init_request_params(self) -> None:
79
74
  """Initialize request parameters for both generator and evaluator components."""
80
75
  # Set up workflow's default params based on generator's history setting
81
76
  self.default_request_params = self._initialize_default_params({})
@@ -94,7 +89,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
94
89
  context: Optional["Context"] = None,
95
90
  name: Optional[str] = None,
96
91
  instruction: Optional[str] = None,
97
- ):
92
+ ) -> None:
98
93
  """
99
94
  Initialize the evaluator-optimizer workflow.
100
95
 
@@ -120,14 +115,10 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
120
115
  if isinstance(generator, Agent):
121
116
  self.generator_use_history = generator.config.use_history
122
117
  elif isinstance(generator, AugmentedLLM):
123
- if hasattr(generator, "aggregator") and isinstance(
124
- generator.aggregator, Agent
125
- ):
118
+ if hasattr(generator, "aggregator") and isinstance(generator.aggregator, Agent):
126
119
  self.generator_use_history = generator.aggregator.config.use_history
127
120
  elif hasattr(generator, "default_request_params"):
128
- self.generator_use_history = getattr(
129
- generator.default_request_params, "use_history", False
130
- )
121
+ self.generator_use_history = getattr(generator.default_request_params, "use_history", False)
131
122
  # All other types default to False
132
123
 
133
124
  # Initialize parent class
@@ -140,20 +131,12 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
140
131
  # Set up the generator based on type
141
132
  if isinstance(generator, Agent):
142
133
  if not llm_factory:
143
- raise ValueError(
144
- "llm_factory is required when using an Agent generator"
145
- )
134
+ raise ValueError("llm_factory is required when using an Agent generator")
146
135
 
147
136
  # Use existing LLM if available, otherwise create new one
148
- self.generator_llm = getattr(generator, "_llm", None) or llm_factory(
149
- agent=generator
150
- )
137
+ self.generator_llm = getattr(generator, "_llm", None) or llm_factory(agent=generator)
151
138
  self.aggregator = generator
152
- self.instruction = instruction or (
153
- generator.instruction
154
- if isinstance(generator.instruction, str)
155
- else None
156
- )
139
+ self.instruction = instruction or (generator.instruction if isinstance(generator.instruction, str) else None)
157
140
  elif isinstance(generator, AugmentedLLM):
158
141
  self.generator_llm = generator
159
142
  self.aggregator = getattr(generator, "aggregator", None)
@@ -162,16 +145,12 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
162
145
  # ChainProxy-like object
163
146
  self.generator_llm = generator
164
147
  self.aggregator = None
165
- self.instruction = (
166
- instruction or f"Chain of agents: {', '.join(generator._sequence)}"
167
- )
148
+ self.instruction = instruction or f"Chain of agents: {', '.join(generator._sequence)}"
168
149
 
169
150
  # Set up the evaluator - always disable history
170
151
  if isinstance(evaluator, str):
171
152
  if not llm_factory:
172
- raise ValueError(
173
- "llm_factory is required when using a string evaluator"
174
- )
153
+ raise ValueError("llm_factory is required when using a string evaluator")
175
154
 
176
155
  evaluator_agent = Agent(
177
156
  name="Evaluator",
@@ -186,15 +165,11 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
186
165
  self.evaluator_llm = llm_factory(agent=evaluator_agent)
187
166
  elif isinstance(evaluator, Agent):
188
167
  if not llm_factory:
189
- raise ValueError(
190
- "llm_factory is required when using an Agent evaluator"
191
- )
168
+ raise ValueError("llm_factory is required when using an Agent evaluator")
192
169
 
193
170
  # Disable history and use/create LLM
194
171
  evaluator.config.use_history = False
195
- self.evaluator_llm = getattr(evaluator, "_llm", None) or llm_factory(
196
- agent=evaluator
197
- )
172
+ self.evaluator_llm = getattr(evaluator, "_llm", None) or llm_factory(agent=evaluator)
198
173
  elif isinstance(evaluator, AugmentedLLM):
199
174
  self.evaluator_llm = evaluator
200
175
  # Ensure history is disabled
@@ -282,10 +257,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
282
257
  )
283
258
 
284
259
  # Check if we've reached acceptable quality
285
- if (
286
- evaluation_result.rating.value >= self.min_rating.value
287
- or not evaluation_result.needs_improvement
288
- ):
260
+ if evaluation_result.rating.value >= self.min_rating.value or not evaluation_result.needs_improvement:
289
261
  logger.debug(
290
262
  f"Acceptable quality {evaluation_result.rating.value} reached",
291
263
  data={
@@ -338,9 +310,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
338
310
  request_params: RequestParams | None = None,
339
311
  ) -> ModelT:
340
312
  """Generate an optimized structured response"""
341
- response_str = await self.generate_str(
342
- message=message, request_params=request_params
343
- )
313
+ response_str = await self.generate_str(message=message, request_params=request_params)
344
314
 
345
315
  return await self.generator.generate_structured(
346
316
  message=response_str,
@@ -348,9 +318,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
348
318
  request_params=request_params,
349
319
  )
350
320
 
351
- def _build_eval_prompt(
352
- self, original_request: str, current_response: str, iteration: int
353
- ) -> str:
321
+ def _build_eval_prompt(self, original_request: str, current_response: str, iteration: int) -> str:
354
322
  """Build the evaluation prompt for the evaluator"""
355
323
  return f"""
356
324
  You are an expert evaluator for content quality. Your task is to evaluate a response against the user's original request.
@@ -409,9 +377,7 @@ Be concrete and actionable in your recommendations.
409
377
  """Build the refinement prompt for the optimizer"""
410
378
  # Get the correct history setting - use param if provided, otherwise class default
411
379
  if use_history is None:
412
- use_history = (
413
- self.generator_use_history
414
- ) # Use generator's setting as default
380
+ use_history = self.generator_use_history # Use generator's setting as default
415
381
 
416
382
  # Start with clear non-delimited instructions
417
383
  prompt = f"""
@@ -1,5 +1,6 @@
1
1
  from abc import ABC, abstractmethod
2
- from typing import Dict, List, Optional, TYPE_CHECKING
2
+ from typing import TYPE_CHECKING, Dict, List, Optional
3
+
3
4
  from pydantic import BaseModel, Field
4
5
 
5
6
  if TYPE_CHECKING:
@@ -54,9 +55,7 @@ class IntentClassifier(ABC):
54
55
  - Determining the type of analysis requested for a dataset
55
56
  """
56
57
 
57
- def __init__(
58
- self, intents: List[Intent], context: Optional["Context"] = None, **kwargs
59
- ):
58
+ def __init__(self, intents: List[Intent], context: Optional["Context"] = None, **kwargs) -> None:
60
59
  super().__init__(context=context, **kwargs)
61
60
  self.intents = {intent.name: intent for intent in intents}
62
61
  self.initialized: bool = False
@@ -65,9 +64,7 @@ class IntentClassifier(ABC):
65
64
  raise ValueError("At least one intent must be provided")
66
65
 
67
66
  @abstractmethod
68
- async def classify(
69
- self, request: str, top_k: int = 1
70
- ) -> List[IntentClassificationResult]:
67
+ async def classify(self, request: str, top_k: int = 1) -> List[IntentClassificationResult]:
71
68
  """
72
69
  Classify the input request into one or more intents.
73
70
 
@@ -79,7 +76,7 @@ class IntentClassifier(ABC):
79
76
  List of classification results, ordered by confidence
80
77
  """
81
78
 
82
- async def initialize(self):
79
+ async def initialize(self) -> None:
83
80
  """Initialize the classifier. Override this method if needed."""
84
81
  self.initialized = True
85
82
 
@@ -1,17 +1,17 @@
1
- from typing import List, Optional, TYPE_CHECKING
1
+ from typing import TYPE_CHECKING, List, Optional
2
2
 
3
3
  from numpy import mean
4
4
 
5
5
  from mcp_agent.workflows.embedding.embedding_base import (
6
- FloatArray,
7
6
  EmbeddingModel,
7
+ FloatArray,
8
8
  compute_confidence,
9
9
  compute_similarity_scores,
10
10
  )
11
11
  from mcp_agent.workflows.intent_classifier.intent_classifier_base import (
12
12
  Intent,
13
- IntentClassifier,
14
13
  IntentClassificationResult,
14
+ IntentClassifier,
15
15
  )
16
16
 
17
17
  if TYPE_CHECKING:
@@ -43,7 +43,7 @@ class EmbeddingIntentClassifier(IntentClassifier):
43
43
  embedding_model: EmbeddingModel,
44
44
  context: Optional["Context"] = None,
45
45
  **kwargs,
46
- ):
46
+ ) -> None:
47
47
  super().__init__(intents=intents, context=context, **kwargs)
48
48
  self.embedding_model = embedding_model
49
49
  self.initialized = False
@@ -65,7 +65,7 @@ class EmbeddingIntentClassifier(IntentClassifier):
65
65
  await instance.initialize()
66
66
  return instance
67
67
 
68
- async def initialize(self):
68
+ async def initialize(self) -> None:
69
69
  """
70
70
  Precompute embeddings for all intents by combining their
71
71
  descriptions and examples
@@ -91,9 +91,7 @@ class EmbeddingIntentClassifier(IntentClassifier):
91
91
 
92
92
  self.initialized = True
93
93
 
94
- async def classify(
95
- self, request: str, top_k: int = 1
96
- ) -> List[IntentClassificationResult]:
94
+ async def classify(self, request: str, top_k: int = 1) -> List[IntentClassificationResult]:
97
95
  """
98
96
  Classify the input text into one or more intents
99
97
 
@@ -116,9 +114,7 @@ class EmbeddingIntentClassifier(IntentClassifier):
116
114
  if intent.embedding is None:
117
115
  continue
118
116
 
119
- similarity_scores = compute_similarity_scores(
120
- request_embedding, intent.embedding
121
- )
117
+ similarity_scores = compute_similarity_scores(request_embedding, intent.embedding)
122
118
 
123
119
  # Compute overall confidence score
124
120
  confidence = compute_confidence(similarity_scores)
@@ -1,4 +1,4 @@
1
- from typing import List, Optional, TYPE_CHECKING
1
+ from typing import TYPE_CHECKING, List, Optional
2
2
 
3
3
  from mcp_agent.workflows.embedding.embedding_cohere import CohereEmbeddingModel
4
4
  from mcp_agent.workflows.intent_classifier.intent_classifier_base import Intent
@@ -21,11 +21,9 @@ class CohereEmbeddingIntentClassifier(EmbeddingIntentClassifier):
21
21
  embedding_model: CohereEmbeddingModel | None = None,
22
22
  context: Optional["Context"] = None,
23
23
  **kwargs,
24
- ):
24
+ ) -> None:
25
25
  embedding_model = embedding_model or CohereEmbeddingModel()
26
- super().__init__(
27
- embedding_model=embedding_model, intents=intents, context=context, **kwargs
28
- )
26
+ super().__init__(embedding_model=embedding_model, intents=intents, context=context, **kwargs)
29
27
 
30
28
  @classmethod
31
29
  async def create(
@@ -38,8 +36,6 @@ class CohereEmbeddingIntentClassifier(EmbeddingIntentClassifier):
38
36
  Factory method to create and initialize a classifier.
39
37
  Use this instead of constructor since we need async initialization.
40
38
  """
41
- instance = cls(
42
- intents=intents, embedding_model=embedding_model, context=context
43
- )
39
+ instance = cls(intents=intents, embedding_model=embedding_model, context=context)
44
40
  await instance.initialize()
45
41
  return instance
@@ -1,4 +1,4 @@
1
- from typing import List, Optional, TYPE_CHECKING
1
+ from typing import TYPE_CHECKING, List, Optional
2
2
 
3
3
  from mcp_agent.workflows.embedding.embedding_openai import OpenAIEmbeddingModel
4
4
  from mcp_agent.workflows.intent_classifier.intent_classifier_base import Intent
@@ -21,11 +21,9 @@ class OpenAIEmbeddingIntentClassifier(EmbeddingIntentClassifier):
21
21
  embedding_model: OpenAIEmbeddingModel | None = None,
22
22
  context: Optional["Context"] = None,
23
23
  **kwargs,
24
- ):
24
+ ) -> None:
25
25
  embedding_model = embedding_model or OpenAIEmbeddingModel()
26
- super().__init__(
27
- embedding_model=embedding_model, intents=intents, context=context, **kwargs
28
- )
26
+ super().__init__(embedding_model=embedding_model, intents=intents, context=context, **kwargs)
29
27
 
30
28
  @classmethod
31
29
  async def create(
@@ -38,8 +36,6 @@ class OpenAIEmbeddingIntentClassifier(EmbeddingIntentClassifier):
38
36
  Factory method to create and initialize a classifier.
39
37
  Use this instead of constructor since we need async initialization.
40
38
  """
41
- instance = cls(
42
- intents=intents, embedding_model=embedding_model, context=context
43
- )
39
+ instance = cls(intents=intents, embedding_model=embedding_model, context=context)
44
40
  await instance.initialize()
45
41
  return instance
@@ -1,12 +1,13 @@
1
- from typing import List, Literal, Optional, TYPE_CHECKING
1
+ from typing import TYPE_CHECKING, List, Literal, Optional
2
+
2
3
  from pydantic import BaseModel
3
4
 
4
- from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM
5
5
  from mcp_agent.workflows.intent_classifier.intent_classifier_base import (
6
6
  Intent,
7
- IntentClassifier,
8
7
  IntentClassificationResult,
8
+ IntentClassifier,
9
9
  )
10
+ from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM
10
11
 
11
12
  if TYPE_CHECKING:
12
13
  from mcp_agent.context import Context
@@ -75,7 +76,7 @@ class LLMIntentClassifier(IntentClassifier):
75
76
  classification_instruction: str | None = None,
76
77
  context: Optional["Context"] = None,
77
78
  **kwargs,
78
- ):
79
+ ) -> None:
79
80
  super().__init__(intents=intents, context=context, **kwargs)
80
81
  self.llm = llm
81
82
  self.classification_instruction = classification_instruction
@@ -99,28 +100,20 @@ class LLMIntentClassifier(IntentClassifier):
99
100
  await instance.initialize()
100
101
  return instance
101
102
 
102
- async def classify(
103
- self, request: str, top_k: int = 1
104
- ) -> List[LLMIntentClassificationResult]:
103
+ async def classify(self, request: str, top_k: int = 1) -> List[LLMIntentClassificationResult]:
105
104
  if not self.initialized:
106
105
  self.initialize()
107
106
 
108
- classification_instruction = (
109
- self.classification_instruction or DEFAULT_INTENT_CLASSIFICATION_INSTRUCTION
110
- )
107
+ classification_instruction = self.classification_instruction or DEFAULT_INTENT_CLASSIFICATION_INSTRUCTION
111
108
 
112
109
  # Generate the context with intent descriptions and examples
113
110
  context = self._generate_context()
114
111
 
115
112
  # Format the prompt with all the necessary information
116
- prompt = classification_instruction.format(
117
- context=context, request=request, top_k=top_k
118
- )
113
+ prompt = classification_instruction.format(context=context, request=request, top_k=top_k)
119
114
 
120
115
  # Get classification from LLM
121
- response = await self.llm.generate_structured(
122
- message=prompt, response_model=StructuredIntentResponse
123
- )
116
+ response = await self.llm.generate_structured(message=prompt, response_model=StructuredIntentResponse)
124
117
 
125
118
  if not response or not response.classifications:
126
119
  return []
@@ -142,18 +135,14 @@ class LLMIntentClassifier(IntentClassifier):
142
135
  context_parts = []
143
136
 
144
137
  for idx, intent in enumerate(self.intents.values(), 1):
145
- description = (
146
- f"{idx}. Intent: {intent.name}\nDescription: {intent.description}"
147
- )
138
+ description = f"{idx}. Intent: {intent.name}\nDescription: {intent.description}"
148
139
 
149
140
  if intent.examples:
150
141
  examples = "\n".join(f"- {example}" for example in intent.examples)
151
142
  description += f"\nExamples:\n{examples}"
152
143
 
153
144
  if intent.metadata:
154
- metadata = "\n".join(
155
- f"- {key}: {value}" for key, value in intent.metadata.items()
156
- )
145
+ metadata = "\n".join(f"- {key}: {value}" for key, value in intent.metadata.items())
157
146
  description += f"\nAdditional Information:\n{metadata}"
158
147
 
159
148
  context_parts.append(description)
@@ -1,10 +1,10 @@
1
- from typing import List, Optional, TYPE_CHECKING
1
+ from typing import TYPE_CHECKING, List, Optional
2
2
 
3
- from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM
4
3
  from mcp_agent.workflows.intent_classifier.intent_classifier_base import Intent
5
4
  from mcp_agent.workflows.intent_classifier.intent_classifier_llm import (
6
5
  LLMIntentClassifier,
7
6
  )
7
+ from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM
8
8
 
9
9
  if TYPE_CHECKING:
10
10
  from mcp_agent.context import Context
@@ -27,7 +27,7 @@ class AnthropicLLMIntentClassifier(LLMIntentClassifier):
27
27
  classification_instruction: str | None = None,
28
28
  context: Optional["Context"] = None,
29
29
  **kwargs,
30
- ):
30
+ ) -> None:
31
31
  anthropic_llm = AnthropicAugmentedLLM(
32
32
  instruction=CLASSIFIER_SYSTEM_INSTRUCTION, context=context
33
33
  )
@@ -1,10 +1,10 @@
1
- from typing import List, Optional, TYPE_CHECKING
1
+ from typing import TYPE_CHECKING, List, Optional
2
2
 
3
- from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM
4
3
  from mcp_agent.workflows.intent_classifier.intent_classifier_base import Intent
5
4
  from mcp_agent.workflows.intent_classifier.intent_classifier_llm import (
6
5
  LLMIntentClassifier,
7
6
  )
7
+ from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM
8
8
 
9
9
  if TYPE_CHECKING:
10
10
  from mcp_agent.context import Context
@@ -27,10 +27,8 @@ class OpenAILLMIntentClassifier(LLMIntentClassifier):
27
27
  classification_instruction: str | None = None,
28
28
  context: Optional["Context"] = None,
29
29
  **kwargs,
30
- ):
31
- openai_llm = OpenAIAugmentedLLM(
32
- instruction=CLASSIFIER_SYSTEM_INSTRUCTION, context=context
33
- )
30
+ ) -> None:
31
+ openai_llm = OpenAIAugmentedLLM(instruction=CLASSIFIER_SYSTEM_INSTRUCTION, context=context)
34
32
 
35
33
  super().__init__(
36
34
  llm=openai_llm,
@@ -8,11 +8,10 @@ leveraging existing code for resource handling and delimited formats.
8
8
  from anthropic.types import (
9
9
  MessageParam,
10
10
  )
11
-
12
11
  from mcp.types import (
13
- TextContent,
14
- ImageContent,
15
12
  EmbeddedResource,
13
+ ImageContent,
14
+ TextContent,
16
15
  TextResourceContents,
17
16
  )
18
17
 
@@ -37,9 +36,7 @@ def anthropic_message_param_to_prompt_message_multipart(
37
36
 
38
37
  # Handle string content (user messages can be simple strings)
39
38
  if isinstance(content, str):
40
- return PromptMessageMultipart(
41
- role=role, content=[TextContent(type="text", text=content)]
42
- )
39
+ return PromptMessageMultipart(role=role, content=[TextContent(type="text", text=content)])
43
40
 
44
41
  # Convert content blocks to MCP content types
45
42
  mcp_contents = []
@@ -50,29 +47,13 @@ def anthropic_message_param_to_prompt_message_multipart(
50
47
  text = block.get("text", "")
51
48
 
52
49
  # Check if this is a resource marker
53
- if (
54
- text
55
- and (
56
- text.startswith("[Resource:")
57
- or text.startswith("[Binary Resource:")
58
- )
59
- and "\n" in text
60
- ):
50
+ if text and (text.startswith("[Resource:") or text.startswith("[Binary Resource:")) and "\n" in text:
61
51
  header, content_text = text.split("\n", 1)
62
52
  if "MIME:" in header:
63
53
  mime_match = header.split("MIME:", 1)[1].split("]")[0].strip()
64
- if (
65
- mime_match != "text/plain"
66
- ): # Only process non-plain text resources
67
- if (
68
- "Resource:" in header
69
- and "Binary Resource:" not in header
70
- ):
71
- uri = (
72
- header.split("Resource:", 1)[1]
73
- .split(",")[0]
74
- .strip()
75
- )
54
+ if mime_match != "text/plain": # Only process non-plain text resources
55
+ if "Resource:" in header and "Binary Resource:" not in header:
56
+ uri = header.split("Resource:", 1)[1].split(",")[0].strip()
76
57
  mcp_contents.append(
77
58
  EmbeddedResource(
78
59
  type="resource",
@@ -94,8 +75,6 @@ def anthropic_message_param_to_prompt_message_multipart(
94
75
  if isinstance(source, dict) and source.get("type") == "base64":
95
76
  media_type = source.get("media_type", "image/png")
96
77
  data = source.get("data", "")
97
- mcp_contents.append(
98
- ImageContent(type="image", data=data, mimeType=media_type)
99
- )
78
+ mcp_contents.append(ImageContent(type="image", data=data, mimeType=media_type))
100
79
 
101
80
  return PromptMessageMultipart(role=role, content=mcp_contents)