fast-agent-mcp 0.1.13__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (147) hide show
  1. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/METADATA +3 -4
  2. fast_agent_mcp-0.2.0.dist-info/RECORD +123 -0
  3. mcp_agent/__init__.py +75 -0
  4. mcp_agent/agents/agent.py +59 -371
  5. mcp_agent/agents/base_agent.py +522 -0
  6. mcp_agent/agents/workflow/__init__.py +1 -0
  7. mcp_agent/agents/workflow/chain_agent.py +173 -0
  8. mcp_agent/agents/workflow/evaluator_optimizer.py +362 -0
  9. mcp_agent/agents/workflow/orchestrator_agent.py +591 -0
  10. mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_models.py +27 -11
  11. mcp_agent/agents/workflow/parallel_agent.py +182 -0
  12. mcp_agent/agents/workflow/router_agent.py +307 -0
  13. mcp_agent/app.py +3 -1
  14. mcp_agent/cli/commands/bootstrap.py +18 -7
  15. mcp_agent/cli/commands/setup.py +12 -4
  16. mcp_agent/cli/main.py +1 -1
  17. mcp_agent/cli/terminal.py +1 -1
  18. mcp_agent/config.py +24 -35
  19. mcp_agent/context.py +3 -1
  20. mcp_agent/context_dependent.py +3 -1
  21. mcp_agent/core/agent_types.py +10 -7
  22. mcp_agent/core/direct_agent_app.py +179 -0
  23. mcp_agent/core/direct_decorators.py +443 -0
  24. mcp_agent/core/direct_factory.py +476 -0
  25. mcp_agent/core/enhanced_prompt.py +15 -20
  26. mcp_agent/core/fastagent.py +151 -337
  27. mcp_agent/core/interactive_prompt.py +424 -0
  28. mcp_agent/core/mcp_content.py +19 -11
  29. mcp_agent/core/prompt.py +6 -2
  30. mcp_agent/core/validation.py +89 -16
  31. mcp_agent/executor/decorator_registry.py +6 -2
  32. mcp_agent/executor/temporal.py +35 -11
  33. mcp_agent/executor/workflow_signal.py +8 -2
  34. mcp_agent/human_input/handler.py +3 -1
  35. mcp_agent/llm/__init__.py +2 -0
  36. mcp_agent/{workflows/llm → llm}/augmented_llm.py +131 -256
  37. mcp_agent/{workflows/llm → llm}/augmented_llm_passthrough.py +35 -107
  38. mcp_agent/llm/augmented_llm_playback.py +83 -0
  39. mcp_agent/{workflows/llm → llm}/model_factory.py +26 -8
  40. mcp_agent/llm/providers/__init__.py +8 -0
  41. mcp_agent/{workflows/llm → llm/providers}/anthropic_utils.py +5 -1
  42. mcp_agent/{workflows/llm → llm/providers}/augmented_llm_anthropic.py +37 -141
  43. mcp_agent/llm/providers/augmented_llm_deepseek.py +53 -0
  44. mcp_agent/{workflows/llm → llm/providers}/augmented_llm_openai.py +112 -148
  45. mcp_agent/{workflows/llm → llm}/providers/multipart_converter_anthropic.py +78 -35
  46. mcp_agent/{workflows/llm → llm}/providers/multipart_converter_openai.py +73 -44
  47. mcp_agent/{workflows/llm → llm}/providers/openai_multipart.py +18 -4
  48. mcp_agent/{workflows/llm → llm/providers}/openai_utils.py +3 -3
  49. mcp_agent/{workflows/llm → llm}/providers/sampling_converter_anthropic.py +3 -3
  50. mcp_agent/{workflows/llm → llm}/providers/sampling_converter_openai.py +3 -3
  51. mcp_agent/{workflows/llm → llm}/sampling_converter.py +0 -21
  52. mcp_agent/{workflows/llm → llm}/sampling_format_converter.py +16 -1
  53. mcp_agent/logging/logger.py +2 -2
  54. mcp_agent/mcp/gen_client.py +9 -3
  55. mcp_agent/mcp/interfaces.py +67 -45
  56. mcp_agent/mcp/logger_textio.py +97 -0
  57. mcp_agent/mcp/mcp_agent_client_session.py +12 -4
  58. mcp_agent/mcp/mcp_agent_server.py +3 -1
  59. mcp_agent/mcp/mcp_aggregator.py +124 -93
  60. mcp_agent/mcp/mcp_connection_manager.py +21 -7
  61. mcp_agent/mcp/prompt_message_multipart.py +59 -1
  62. mcp_agent/mcp/prompt_render.py +77 -0
  63. mcp_agent/mcp/prompt_serialization.py +20 -13
  64. mcp_agent/mcp/prompts/prompt_constants.py +18 -0
  65. mcp_agent/mcp/prompts/prompt_helpers.py +327 -0
  66. mcp_agent/mcp/prompts/prompt_load.py +15 -5
  67. mcp_agent/mcp/prompts/prompt_server.py +154 -87
  68. mcp_agent/mcp/prompts/prompt_template.py +26 -35
  69. mcp_agent/mcp/resource_utils.py +3 -1
  70. mcp_agent/mcp/sampling.py +24 -15
  71. mcp_agent/mcp_server/agent_server.py +8 -5
  72. mcp_agent/mcp_server_registry.py +22 -9
  73. mcp_agent/resources/examples/{workflows → in_dev}/agent_build.py +1 -1
  74. mcp_agent/resources/examples/{data-analysis → in_dev}/slides.py +1 -1
  75. mcp_agent/resources/examples/internal/agent.py +4 -2
  76. mcp_agent/resources/examples/internal/fastagent.config.yaml +8 -2
  77. mcp_agent/resources/examples/prompting/image_server.py +3 -1
  78. mcp_agent/resources/examples/prompting/work_with_image.py +19 -0
  79. mcp_agent/ui/console_display.py +27 -7
  80. fast_agent_mcp-0.1.13.dist-info/RECORD +0 -164
  81. mcp_agent/core/agent_app.py +0 -570
  82. mcp_agent/core/agent_utils.py +0 -69
  83. mcp_agent/core/decorators.py +0 -448
  84. mcp_agent/core/factory.py +0 -422
  85. mcp_agent/core/proxies.py +0 -278
  86. mcp_agent/core/types.py +0 -22
  87. mcp_agent/eval/__init__.py +0 -0
  88. mcp_agent/mcp/stdio.py +0 -114
  89. mcp_agent/resources/examples/data-analysis/analysis-campaign.py +0 -188
  90. mcp_agent/resources/examples/data-analysis/analysis.py +0 -65
  91. mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +0 -41
  92. mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -1471
  93. mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +0 -53
  94. mcp_agent/resources/examples/researcher/fastagent.config.yaml +0 -66
  95. mcp_agent/resources/examples/researcher/researcher-eval.py +0 -53
  96. mcp_agent/resources/examples/researcher/researcher-imp.py +0 -189
  97. mcp_agent/resources/examples/researcher/researcher.py +0 -39
  98. mcp_agent/resources/examples/workflows/chaining.py +0 -45
  99. mcp_agent/resources/examples/workflows/evaluator.py +0 -79
  100. mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -24
  101. mcp_agent/resources/examples/workflows/human_input.py +0 -26
  102. mcp_agent/resources/examples/workflows/orchestrator.py +0 -74
  103. mcp_agent/resources/examples/workflows/parallel.py +0 -79
  104. mcp_agent/resources/examples/workflows/router.py +0 -54
  105. mcp_agent/resources/examples/workflows/sse.py +0 -23
  106. mcp_agent/telemetry/__init__.py +0 -0
  107. mcp_agent/telemetry/usage_tracking.py +0 -19
  108. mcp_agent/workflows/__init__.py +0 -0
  109. mcp_agent/workflows/embedding/__init__.py +0 -0
  110. mcp_agent/workflows/embedding/embedding_base.py +0 -58
  111. mcp_agent/workflows/embedding/embedding_cohere.py +0 -49
  112. mcp_agent/workflows/embedding/embedding_openai.py +0 -37
  113. mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
  114. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +0 -447
  115. mcp_agent/workflows/intent_classifier/__init__.py +0 -0
  116. mcp_agent/workflows/intent_classifier/intent_classifier_base.py +0 -117
  117. mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +0 -130
  118. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +0 -41
  119. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +0 -41
  120. mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +0 -150
  121. mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +0 -60
  122. mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +0 -58
  123. mcp_agent/workflows/llm/__init__.py +0 -0
  124. mcp_agent/workflows/llm/augmented_llm_playback.py +0 -111
  125. mcp_agent/workflows/llm/providers/__init__.py +0 -8
  126. mcp_agent/workflows/orchestrator/__init__.py +0 -0
  127. mcp_agent/workflows/orchestrator/orchestrator.py +0 -535
  128. mcp_agent/workflows/parallel/__init__.py +0 -0
  129. mcp_agent/workflows/parallel/fan_in.py +0 -320
  130. mcp_agent/workflows/parallel/fan_out.py +0 -181
  131. mcp_agent/workflows/parallel/parallel_llm.py +0 -149
  132. mcp_agent/workflows/router/__init__.py +0 -0
  133. mcp_agent/workflows/router/router_base.py +0 -338
  134. mcp_agent/workflows/router/router_embedding.py +0 -226
  135. mcp_agent/workflows/router/router_embedding_cohere.py +0 -59
  136. mcp_agent/workflows/router/router_embedding_openai.py +0 -59
  137. mcp_agent/workflows/router/router_llm.py +0 -304
  138. mcp_agent/workflows/swarm/__init__.py +0 -0
  139. mcp_agent/workflows/swarm/swarm.py +0 -292
  140. mcp_agent/workflows/swarm/swarm_anthropic.py +0 -42
  141. mcp_agent/workflows/swarm/swarm_openai.py +0 -41
  142. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/WHEEL +0 -0
  143. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/entry_points.txt +0 -0
  144. {fast_agent_mcp-0.1.13.dist-info → fast_agent_mcp-0.2.0.dist-info}/licenses/LICENSE +0 -0
  145. /mcp_agent/{workflows/orchestrator → agents/workflow}/orchestrator_prompts.py +0 -0
  146. /mcp_agent/{workflows/llm → llm}/memory.py +0 -0
  147. /mcp_agent/{workflows/llm → llm}/prompt_utils.py +0 -0
@@ -1,447 +0,0 @@
1
- import contextlib
2
- from enum import Enum
3
- from typing import TYPE_CHECKING, Callable, List, Optional, Type
4
-
5
- from pydantic import BaseModel, Field
6
-
7
- from mcp_agent.agents.agent import Agent
8
- from mcp_agent.core.agent_types import AgentConfig
9
- from mcp_agent.logging.logger import get_logger
10
- from mcp_agent.workflows.llm.augmented_llm import (
11
- AugmentedLLM,
12
- MessageParamT,
13
- MessageT,
14
- ModelT,
15
- RequestParams,
16
- )
17
- from mcp_agent.workflows.llm.augmented_llm_passthrough import PassthroughLLM
18
-
19
- if TYPE_CHECKING:
20
- from mcp_agent.context import Context
21
-
22
- logger = get_logger(__name__)
23
-
24
-
25
- class QualityRating(str, Enum):
26
- """Enum for evaluation quality ratings"""
27
-
28
- POOR = 0 # Major improvements needed
29
- FAIR = 1 # Several improvements needed
30
- GOOD = 2 # Minor improvements possible
31
- EXCELLENT = 3 # No improvements needed
32
-
33
-
34
- class EvaluationResult(BaseModel):
35
- """Model representing the evaluation result from the evaluator LLM"""
36
-
37
- rating: QualityRating = Field(description="Quality rating of the response")
38
- feedback: str = Field(description="Specific feedback and suggestions for improvement")
39
- needs_improvement: bool = Field(description="Whether the output needs further improvement")
40
- focus_areas: List[str] = Field(default_factory=list, description="Specific areas to focus on in next iteration")
41
-
42
-
43
- class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
44
- """
45
- Implementation of the evaluator-optimizer workflow where one LLM generates responses
46
- while another provides evaluation and feedback in a refinement loop.
47
-
48
- This can be used either:
49
- 1. As a standalone workflow with its own optimizer agent
50
- 2. As a wrapper around another workflow (Orchestrator, Router, ParallelLLM) to add
51
- evaluation and refinement capabilities
52
-
53
- When to use this workflow:
54
- - When you have clear evaluation criteria and iterative refinement provides value
55
- - When LLM responses improve with articulated feedback
56
- - When the task benefits from focused iteration on specific aspects
57
-
58
- Examples:
59
- - Literary translation with "expert" refinement
60
- - Complex search tasks needing multiple rounds
61
- - Document writing requiring multiple revisions
62
- """
63
-
64
- def _initialize_default_params(self, kwargs: dict) -> RequestParams:
65
- """Initialize default parameters using the workflow's settings."""
66
- return RequestParams(
67
- systemPrompt=self.instruction,
68
- parallel_tool_calls=True,
69
- max_iterations=10,
70
- use_history=self.generator_use_history, # Use generator's history setting
71
- )
72
-
73
- def _init_request_params(self) -> None:
74
- """Initialize request parameters for both generator and evaluator components."""
75
- # Set up workflow's default params based on generator's history setting
76
- self.default_request_params = self._initialize_default_params({})
77
-
78
- # Ensure evaluator's request params have history disabled
79
- if hasattr(self.evaluator_llm, "default_request_params"):
80
- self.evaluator_llm.default_request_params.use_history = False
81
-
82
- def __init__(
83
- self,
84
- generator: Agent | AugmentedLLM,
85
- evaluator: str | Agent | AugmentedLLM,
86
- min_rating: QualityRating = QualityRating.GOOD,
87
- max_refinements: int = 3,
88
- llm_factory: Callable[[Agent], AugmentedLLM] | None = None,
89
- context: Optional["Context"] = None,
90
- name: Optional[str] = None,
91
- instruction: Optional[str] = None,
92
- ) -> None:
93
- """
94
- Initialize the evaluator-optimizer workflow.
95
-
96
- Args:
97
- generator: The agent/LLM/workflow that generates responses
98
- evaluator: The evaluator (string instruction, Agent or AugmentedLLM)
99
- min_rating: Minimum acceptable quality rating
100
- max_refinements: Maximum refinement iterations
101
- llm_factory: Factory to create LLMs from agents when needed
102
- name: Optional name for the workflow (defaults to generator's name)
103
- instruction: Optional instruction (defaults to generator's instruction)
104
- """
105
- # Set initial attributes
106
- self.name = name or getattr(generator, "name", "EvaluatorOptimizer")
107
- self.llm_factory = llm_factory
108
- self.generator = generator
109
- self.evaluator = evaluator
110
- self.min_rating = min_rating
111
- self.max_refinements = max_refinements
112
-
113
- # Determine generator's history setting directly based on type
114
- self.generator_use_history = False
115
- if isinstance(generator, Agent):
116
- self.generator_use_history = generator.config.use_history
117
- elif isinstance(generator, AugmentedLLM):
118
- if hasattr(generator, "aggregator") and isinstance(generator.aggregator, Agent):
119
- self.generator_use_history = generator.aggregator.config.use_history
120
- elif hasattr(generator, "default_request_params"):
121
- self.generator_use_history = getattr(generator.default_request_params, "use_history", False)
122
- # All other types default to False
123
-
124
- # Initialize parent class
125
- super().__init__(context=context, name=name or getattr(generator, "name", None))
126
-
127
- # Create a PassthroughLLM as _llm property
128
- # TODO -- remove this when we fix/remove the inheritance hierarchy
129
- self._llm = PassthroughLLM(name=f"{self.name}_passthrough", context=context)
130
-
131
- # Set up the generator based on type
132
- if isinstance(generator, Agent):
133
- if not llm_factory:
134
- raise ValueError("llm_factory is required when using an Agent generator")
135
-
136
- # Use existing LLM if available, otherwise create new one
137
- self.generator_llm = getattr(generator, "_llm", None) or llm_factory(agent=generator)
138
- self.aggregator = generator
139
- self.instruction = instruction or (generator.instruction if isinstance(generator.instruction, str) else None)
140
- elif isinstance(generator, AugmentedLLM):
141
- self.generator_llm = generator
142
- self.aggregator = getattr(generator, "aggregator", None)
143
- self.instruction = instruction or generator.instruction
144
- else:
145
- # ChainProxy-like object
146
- self.generator_llm = generator
147
- self.aggregator = None
148
- self.instruction = instruction or f"Chain of agents: {', '.join(generator._sequence)}"
149
-
150
- # Set up the evaluator - always disable history
151
- if isinstance(evaluator, str):
152
- if not llm_factory:
153
- raise ValueError("llm_factory is required when using a string evaluator")
154
-
155
- evaluator_agent = Agent(
156
- name="Evaluator",
157
- instruction=evaluator,
158
- config=AgentConfig(
159
- name="Evaluator",
160
- instruction=evaluator,
161
- servers=[],
162
- use_history=False,
163
- ),
164
- )
165
- self.evaluator_llm = llm_factory(agent=evaluator_agent)
166
- elif isinstance(evaluator, Agent):
167
- if not llm_factory:
168
- raise ValueError("llm_factory is required when using an Agent evaluator")
169
-
170
- # Disable history and use/create LLM
171
- evaluator.config.use_history = False
172
- self.evaluator_llm = getattr(evaluator, "_llm", None) or llm_factory(agent=evaluator)
173
- elif isinstance(evaluator, AugmentedLLM):
174
- self.evaluator_llm = evaluator
175
- # Ensure history is disabled
176
- if hasattr(self.evaluator_llm, "default_request_params"):
177
- self.evaluator_llm.default_request_params.use_history = False
178
- else:
179
- raise ValueError(f"Unsupported evaluator type: {type(evaluator)}")
180
-
181
- # Track iteration history
182
- self.refinement_history = []
183
-
184
- # Set up workflow's default params
185
- self.default_request_params = self._initialize_default_params({})
186
-
187
- # Ensure evaluator's request params have history disabled
188
- if hasattr(self.evaluator_llm, "default_request_params"):
189
- self.evaluator_llm.default_request_params.use_history = False
190
-
191
- async def generate(
192
- self,
193
- message: str | MessageParamT | List[MessageParamT],
194
- request_params: RequestParams | None = None,
195
- ) -> List[MessageT]:
196
- """Generate an optimized response through evaluation-guided refinement"""
197
- refinement_count = 0
198
- response = None
199
- best_response = None
200
- best_rating = QualityRating.POOR
201
- self.refinement_history = []
202
-
203
- # Get request params with proper use_history setting
204
- params = self.get_request_params(request_params)
205
-
206
- # Use a single AsyncExitStack for the entire method to maintain connections
207
- async with contextlib.AsyncExitStack() as stack:
208
- # Enter all agent contexts once at the beginning
209
- if isinstance(self.generator, Agent):
210
- await stack.enter_async_context(self.generator)
211
- if isinstance(self.evaluator, Agent):
212
- await stack.enter_async_context(self.evaluator)
213
-
214
- # Initial generation - pass parameters to any type of generator
215
- response = await self.generator_llm.generate_str(
216
- message=message,
217
- request_params=params, # Pass params which may override use_history
218
- )
219
-
220
- best_response = response
221
-
222
- while refinement_count < self.max_refinements:
223
- logger.debug("Generator result:", data=response)
224
-
225
- # Evaluate current response
226
- eval_prompt = self._build_eval_prompt(
227
- original_request=str(message),
228
- current_response=response, # response is already a string
229
- iteration=refinement_count,
230
- )
231
-
232
- # No need for nested AsyncExitStack here - using the outer one
233
- evaluation_result = await self.evaluator_llm.generate_structured(
234
- message=eval_prompt,
235
- response_model=EvaluationResult,
236
- request_params=request_params,
237
- )
238
-
239
- # Track iteration
240
- self.refinement_history.append(
241
- {
242
- "attempt": refinement_count + 1,
243
- "response": response,
244
- "evaluation_result": evaluation_result,
245
- }
246
- )
247
-
248
- logger.debug("Evaluator result:", data=evaluation_result)
249
-
250
- # Track best response (using enum ordering)
251
- if evaluation_result.rating.value > best_rating.value:
252
- best_rating = evaluation_result.rating
253
- best_response = response
254
- logger.debug(
255
- "New best response:",
256
- data={"rating": best_rating, "response": best_response},
257
- )
258
-
259
- # Check if we've reached acceptable quality
260
- if evaluation_result.rating.value >= self.min_rating.value or not evaluation_result.needs_improvement:
261
- logger.debug(
262
- f"Acceptable quality {evaluation_result.rating.value} reached",
263
- data={
264
- "rating": evaluation_result.rating.value,
265
- "needs_improvement": evaluation_result.needs_improvement,
266
- "min_rating": self.min_rating.value,
267
- },
268
- )
269
- break
270
-
271
- # Generate refined response
272
- refinement_prompt = self._build_refinement_prompt(
273
- original_request=str(message),
274
- current_response=response,
275
- feedback=evaluation_result,
276
- iteration=refinement_count,
277
- use_history=self.generator_use_history, # Use the generator's history setting
278
- )
279
-
280
- # Pass parameters to any type of generator
281
- response = await self.generator_llm.generate_str(
282
- message=refinement_prompt,
283
- request_params=params, # Pass params which may override use_history
284
- )
285
-
286
- refinement_count += 1
287
-
288
- # Return the best response as a list with a single string element
289
- # This makes it consistent with other AugmentedLLM implementations
290
- # that return List[MessageT]
291
- return [best_response]
292
-
293
- async def generate_str(
294
- self,
295
- message: str | MessageParamT | List[MessageParamT],
296
- request_params: RequestParams | None = None,
297
- ) -> str:
298
- """Generate an optimized response and return it as a string"""
299
- response = await self.generate(
300
- message=message,
301
- request_params=request_params,
302
- )
303
- # Since generate now returns [best_response], just return the first element
304
- return str(response[0])
305
-
306
- async def generate_structured(
307
- self,
308
- message: str | MessageParamT | List[MessageParamT],
309
- response_model: Type[ModelT],
310
- request_params: RequestParams | None = None,
311
- ) -> ModelT:
312
- """Generate an optimized structured response"""
313
- response_str = await self.generate_str(message=message, request_params=request_params)
314
-
315
- return await self.generator.generate_structured(
316
- message=response_str,
317
- response_model=response_model,
318
- request_params=request_params,
319
- )
320
-
321
- def _build_eval_prompt(self, original_request: str, current_response: str, iteration: int) -> str:
322
- """Build the evaluation prompt for the evaluator"""
323
- return f"""
324
- You are an expert evaluator for content quality. Your task is to evaluate a response against the user's original request.
325
-
326
- Evaluate the response for iteration {iteration + 1} and provide structured feedback on its quality and areas for improvement.
327
-
328
- <fastagent:data>
329
- <fastagent:request>
330
- {original_request}
331
- </fastagent:request>
332
-
333
- <fastagent:response>
334
- {current_response}
335
- </fastagent:response>
336
-
337
- <fastagent:evaluation-criteria>
338
- {self.evaluator.instruction}
339
- </fastagent:evaluation-criteria>
340
- </fastagent:data>
341
-
342
- <fastagent:instruction>
343
- Provide a structured evaluation with the following components:
344
-
345
- <rating>
346
- Choose one: EXCELLENT, GOOD, FAIR, or POOR
347
- - EXCELLENT: No improvements needed
348
- - GOOD: Only minor improvements possible
349
- - FAIR: Several improvements needed
350
- - POOR: Major improvements needed
351
- </rating>
352
-
353
- <details>
354
- Provide specific, actionable feedback and suggestions for improvement.
355
- Be precise about what works well and what could be improved.
356
- </details>
357
-
358
- <needs_improvement>
359
- Indicate true/false whether further improvement is needed.
360
- </needs_improvement>
361
-
362
- <focus-areas>
363
- List 1-3 specific areas to focus on in the next iteration.
364
- Be concrete and actionable in your recommendations.
365
- </focus-areas>
366
- </fastagent:instruction>
367
- """
368
-
369
- def _build_refinement_prompt(
370
- self,
371
- original_request: str,
372
- current_response: str,
373
- feedback: EvaluationResult,
374
- iteration: int,
375
- use_history: bool = None,
376
- ) -> str:
377
- """Build the refinement prompt for the optimizer"""
378
- # Get the correct history setting - use param if provided, otherwise class default
379
- if use_history is None:
380
- use_history = self.generator_use_history # Use generator's setting as default
381
-
382
- # Start with clear non-delimited instructions
383
- prompt = f"""
384
- You are tasked with improving a response based on expert feedback. This is iteration {iteration + 1} of the refinement process.
385
-
386
- Your goal is to address all feedback points while maintaining accuracy and relevance to the original request.
387
- """
388
-
389
- # Add data section with all relevant information
390
- prompt += """
391
- <fastagent:data>
392
- """
393
-
394
- # Add request
395
- prompt += f"""
396
- <fastagent:request>
397
- {original_request}
398
- </fastagent:request>
399
- """
400
-
401
- # Only include previous response if history is not enabled
402
- if not use_history:
403
- prompt += f"""
404
- <fastagent:previous-response>
405
- {current_response}
406
- </fastagent:previous-response>
407
- """
408
-
409
- # Always include the feedback
410
- prompt += f"""
411
- <fastagent:feedback>
412
- <rating>{feedback.rating}</rating>
413
- <details>{feedback.feedback}</details>
414
- <focus-areas>{", ".join(feedback.focus_areas) if feedback.focus_areas else "None specified"}</focus-areas>
415
- </fastagent:feedback>
416
- </fastagent:data>
417
- """
418
-
419
- # Customize instruction based on history availability
420
- if not use_history:
421
- prompt += """
422
- <fastagent:instruction>
423
- Create an improved version of the response that:
424
- 1. Directly addresses each point in the feedback
425
- 2. Focuses on the specific areas mentioned for improvement
426
- 3. Maintains all the strengths of the original response
427
- 4. Remains accurate and relevant to the original request
428
-
429
- Provide your complete improved response without explanations or commentary.
430
- </fastagent:instruction>
431
- """
432
- else:
433
- prompt += """
434
- <fastagent:instruction>
435
- Your previous response is available in your conversation history.
436
-
437
- Create an improved version that:
438
- 1. Directly addresses each point in the feedback
439
- 2. Focuses on the specific areas mentioned for improvement
440
- 3. Maintains all the strengths of your original response
441
- 4. Remains accurate and relevant to the original request
442
-
443
- Provide your complete improved response without explanations or commentary.
444
- </fastagent:instruction>
445
- """
446
-
447
- return prompt
File without changes
@@ -1,117 +0,0 @@
1
- from abc import ABC, abstractmethod
2
- from typing import TYPE_CHECKING, Dict, List, Optional
3
-
4
- from pydantic import BaseModel, Field
5
-
6
- if TYPE_CHECKING:
7
- from mcp_agent.context import Context
8
-
9
-
10
- class Intent(BaseModel):
11
- """A class that represents a single intent category"""
12
-
13
- name: str
14
- """The name of the intent"""
15
-
16
- description: str | None = None
17
- """A description of what this intent represents"""
18
-
19
- examples: List[str] = Field(default_factory=list)
20
- """Example phrases or requests that match this intent"""
21
-
22
- metadata: Dict[str, str] = Field(default_factory=dict)
23
- """Additional metadata about the intent that might be useful for classification"""
24
-
25
-
26
- class IntentClassificationResult(BaseModel):
27
- """A class that represents the result of intent classification"""
28
-
29
- intent: str
30
- """The classified intent name"""
31
-
32
- p_score: float | None = None
33
- """
34
- The probability score (i.e. 0->1) of the classification.
35
- This is optional and may only be provided if the classifier is probabilistic (e.g. a probabilistic binary classifier).
36
- """
37
-
38
- extracted_entities: Dict[str, str] = Field(default_factory=dict)
39
- """Any entities or parameters extracted from the input request that are relevant to the intent"""
40
-
41
-
42
- class IntentClassifier(ABC):
43
- """
44
- Base class for intent classification. This can be implemented using different approaches
45
- like LLMs, embedding models, traditional ML classification models, or rule-based systems.
46
-
47
- When to use this:
48
- - When you need to understand the user's intention before routing or processing
49
- - When you want to extract structured information from natural language inputs
50
- - When you need to handle multiple related but distinct types of requests
51
-
52
- Examples:
53
- - Classifying customer service requests (complaint, question, feedback)
54
- - Understanding user commands in a chat interface
55
- - Determining the type of analysis requested for a dataset
56
- """
57
-
58
- def __init__(self, intents: List[Intent], context: Optional["Context"] = None, **kwargs) -> None:
59
- super().__init__(context=context, **kwargs)
60
- self.intents = {intent.name: intent for intent in intents}
61
- self.initialized: bool = False
62
-
63
- if not self.intents:
64
- raise ValueError("At least one intent must be provided")
65
-
66
- @abstractmethod
67
- async def classify(self, request: str, top_k: int = 1) -> List[IntentClassificationResult]:
68
- """
69
- Classify the input request into one or more intents.
70
-
71
- Args:
72
- request: The input text to classify
73
- top_k: Maximum number of top intent matches to return. May return fewer.
74
-
75
- Returns:
76
- List of classification results, ordered by confidence
77
- """
78
-
79
- async def initialize(self) -> None:
80
- """Initialize the classifier. Override this method if needed."""
81
- self.initialized = True
82
-
83
-
84
- # Example
85
- # Define some intents
86
- # intents = [
87
- # Intent(
88
- # name="schedule_meeting",
89
- # description="Schedule or set up a meeting or appointment",
90
- # examples=[
91
- # "Can you schedule a meeting with John?",
92
- # "Set up a call for next week",
93
- # "I need to arrange a meeting"
94
- # ]
95
- # ),
96
- # Intent(
97
- # name="check_calendar",
98
- # description="Check calendar availability or existing appointments",
99
- # examples=[
100
- # "What meetings do I have today?",
101
- # "Show me my calendar",
102
- # "Am I free tomorrow afternoon?"
103
- # ]
104
- # )
105
- # ]
106
-
107
- # # Initialize with OpenAI embeddings
108
- # classifier = OpenAIEmbeddingIntentClassifier(intents=intents, model="text-embedding-3-small")
109
-
110
- # # Or use Cohere embeddings
111
- # classifier = OpenAIEmbeddingIntentClassifier(intents=intents, model="embed-multilingual-v3.0")
112
-
113
- # # Classify some text
114
- # results = await classifier.classify(
115
- # request="Can you set up a meeting with Sarah for tomorrow?"
116
- # top_k=3
117
- # )
@@ -1,130 +0,0 @@
1
- from typing import TYPE_CHECKING, List, Optional
2
-
3
- from numpy import mean
4
-
5
- from mcp_agent.workflows.embedding.embedding_base import (
6
- EmbeddingModel,
7
- FloatArray,
8
- compute_confidence,
9
- compute_similarity_scores,
10
- )
11
- from mcp_agent.workflows.intent_classifier.intent_classifier_base import (
12
- Intent,
13
- IntentClassificationResult,
14
- IntentClassifier,
15
- )
16
-
17
- if TYPE_CHECKING:
18
- from mcp_agent.context import Context
19
-
20
-
21
- class EmbeddingIntent(Intent):
22
- """An intent with embedding information"""
23
-
24
- embedding: FloatArray | None = None
25
- """Pre-computed embedding for this intent"""
26
-
27
-
28
- class EmbeddingIntentClassifier(IntentClassifier):
29
- """
30
- An intent classifier that uses embedding similarity for classification.
31
- Supports different embedding models through the EmbeddingModel interface.
32
-
33
- Features:
34
- - Semantic similarity based classification
35
- - Support for example-based learning
36
- - Flexible embedding model support
37
- - Multiple similarity computation strategies
38
- """
39
-
40
- def __init__(
41
- self,
42
- intents: List[Intent],
43
- embedding_model: EmbeddingModel,
44
- context: Optional["Context"] = None,
45
- **kwargs,
46
- ) -> None:
47
- super().__init__(intents=intents, context=context, **kwargs)
48
- self.embedding_model = embedding_model
49
- self.initialized = False
50
-
51
- @classmethod
52
- async def create(
53
- cls,
54
- intents: List[Intent],
55
- embedding_model: EmbeddingModel,
56
- ) -> "EmbeddingIntentClassifier":
57
- """
58
- Factory method to create and initialize a classifier.
59
- Use this instead of constructor since we need async initialization.
60
- """
61
- instance = cls(
62
- intents=intents,
63
- embedding_model=embedding_model,
64
- )
65
- await instance.initialize()
66
- return instance
67
-
68
- async def initialize(self) -> None:
69
- """
70
- Precompute embeddings for all intents by combining their
71
- descriptions and examples
72
- """
73
- if self.initialized:
74
- return
75
-
76
- for intent in self.intents.values():
77
- # Combine all text for a rich intent representation
78
- intent_texts = [intent.name, intent.description] + intent.examples
79
-
80
- # Get embeddings for all texts
81
- embeddings = await self.embedding_model.embed(intent_texts)
82
-
83
- # Use mean pooling to combine embeddings
84
- embedding = mean(embeddings, axis=0)
85
-
86
- # Create intents with embeddings
87
- self.intents[intent.name] = EmbeddingIntent(
88
- **intent,
89
- embedding=embedding,
90
- )
91
-
92
- self.initialized = True
93
-
94
- async def classify(self, request: str, top_k: int = 1) -> List[IntentClassificationResult]:
95
- """
96
- Classify the input text into one or more intents
97
-
98
- Args:
99
- text: Input text to classify
100
- top_k: Maximum number of top matches to return
101
-
102
- Returns:
103
- List of classification results, ordered by confidence
104
- """
105
- if not self.initialized:
106
- await self.initialize()
107
-
108
- # Get embedding for input
109
- embeddings = await self.embedding_model.embed([request])
110
- request_embedding = embeddings[0] # Take first since we only embedded one text
111
-
112
- results: List[IntentClassificationResult] = []
113
- for intent_name, intent in self.intents.items():
114
- if intent.embedding is None:
115
- continue
116
-
117
- similarity_scores = compute_similarity_scores(request_embedding, intent.embedding)
118
-
119
- # Compute overall confidence score
120
- confidence = compute_confidence(similarity_scores)
121
-
122
- results.append(
123
- IntentClassificationResult(
124
- intent=intent_name,
125
- p_score=confidence,
126
- )
127
- )
128
-
129
- results.sort(key=lambda x: x.p_score, reverse=True)
130
- return results[:top_k]