fast-agent-mcp 0.2.14__py3-none-any.whl → 0.2.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. {fast_agent_mcp-0.2.14.dist-info → fast_agent_mcp-0.2.17.dist-info}/METADATA +4 -6
  2. {fast_agent_mcp-0.2.14.dist-info → fast_agent_mcp-0.2.17.dist-info}/RECORD +46 -46
  3. mcp_agent/agents/base_agent.py +50 -6
  4. mcp_agent/agents/workflow/orchestrator_agent.py +6 -7
  5. mcp_agent/agents/workflow/router_agent.py +70 -136
  6. mcp_agent/app.py +1 -124
  7. mcp_agent/cli/commands/setup.py +1 -1
  8. mcp_agent/config.py +19 -19
  9. mcp_agent/context.py +4 -22
  10. mcp_agent/core/agent_types.py +2 -2
  11. mcp_agent/core/direct_decorators.py +2 -2
  12. mcp_agent/core/direct_factory.py +2 -1
  13. mcp_agent/core/enhanced_prompt.py +6 -5
  14. mcp_agent/core/fastagent.py +1 -1
  15. mcp_agent/core/interactive_prompt.py +70 -50
  16. mcp_agent/core/request_params.py +5 -1
  17. mcp_agent/executor/workflow_signal.py +0 -2
  18. mcp_agent/llm/augmented_llm.py +183 -57
  19. mcp_agent/llm/augmented_llm_passthrough.py +1 -1
  20. mcp_agent/llm/augmented_llm_playback.py +21 -1
  21. mcp_agent/llm/memory.py +3 -3
  22. mcp_agent/llm/model_factory.py +3 -1
  23. mcp_agent/llm/provider_key_manager.py +1 -0
  24. mcp_agent/llm/provider_types.py +2 -1
  25. mcp_agent/llm/providers/augmented_llm_anthropic.py +49 -10
  26. mcp_agent/llm/providers/augmented_llm_deepseek.py +0 -2
  27. mcp_agent/llm/providers/augmented_llm_generic.py +4 -2
  28. mcp_agent/llm/providers/augmented_llm_google.py +30 -0
  29. mcp_agent/llm/providers/augmented_llm_openai.py +95 -158
  30. mcp_agent/llm/providers/multipart_converter_openai.py +10 -27
  31. mcp_agent/llm/providers/sampling_converter_openai.py +5 -6
  32. mcp_agent/mcp/interfaces.py +6 -1
  33. mcp_agent/mcp/mcp_aggregator.py +2 -8
  34. mcp_agent/mcp/prompt_message_multipart.py +25 -2
  35. mcp_agent/resources/examples/data-analysis/analysis-campaign.py +2 -2
  36. mcp_agent/resources/examples/in_dev/agent_build.py +1 -1
  37. mcp_agent/resources/examples/internal/job.py +1 -1
  38. mcp_agent/resources/examples/mcp/state-transfer/fastagent.config.yaml +1 -1
  39. mcp_agent/resources/examples/prompting/agent.py +0 -2
  40. mcp_agent/resources/examples/prompting/fastagent.config.yaml +2 -3
  41. mcp_agent/resources/examples/researcher/fastagent.config.yaml +1 -6
  42. mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -1
  43. mcp_agent/resources/examples/workflows/parallel.py +1 -1
  44. mcp_agent/executor/decorator_registry.py +0 -112
  45. {fast_agent_mcp-0.2.14.dist-info → fast_agent_mcp-0.2.17.dist-info}/WHEEL +0 -0
  46. {fast_agent_mcp-0.2.14.dist-info → fast_agent_mcp-0.2.17.dist-info}/entry_points.txt +0 -0
  47. {fast_agent_mcp-0.2.14.dist-info → fast_agent_mcp-0.2.17.dist-info}/licenses/LICENSE +0 -0
@@ -1,9 +1,11 @@
1
- from typing import Any, List
1
+ from typing import Any, List, Type
2
2
 
3
+ from mcp_agent.core.exceptions import ModelConfigError
3
4
  from mcp_agent.core.prompt import Prompt
4
5
  from mcp_agent.llm.augmented_llm import RequestParams
5
6
  from mcp_agent.llm.augmented_llm_passthrough import PassthroughLLM
6
7
  from mcp_agent.llm.provider_types import Provider
8
+ from mcp_agent.mcp.interfaces import ModelT
7
9
  from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
8
10
  from mcp_agent.mcp.prompts.prompt_helpers import MessageContent
9
11
 
@@ -82,3 +84,21 @@ class PlaybackLLM(PassthroughLLM):
82
84
  )
83
85
 
84
86
  return response
87
+
88
+ async def structured(
89
+ self,
90
+ multipart_messages: List[PromptMessageMultipart],
91
+ model: Type[ModelT],
92
+ request_params: RequestParams | None = None,
93
+ ) -> tuple[ModelT | None, PromptMessageMultipart]:
94
+ """
95
+ Handle structured requests by returning the next assistant message.
96
+ """
97
+
98
+ if -1 == self._current_index:
99
+ raise ModelConfigError("Use generate() to load playback history")
100
+
101
+ return self._structured_from_multipart(
102
+ self._get_next_assistant_message(),
103
+ model,
104
+ )
mcp_agent/llm/memory.py CHANGED
@@ -19,7 +19,7 @@ class Memory(Protocol, Generic[MessageParamT]):
19
19
 
20
20
  def append(self, message: MessageParamT, is_prompt: bool = False) -> None: ...
21
21
 
22
- def get(self, include_history: bool = True) -> List[MessageParamT]: ...
22
+ def get(self, include_completion_history: bool = True) -> List[MessageParamT]: ...
23
23
 
24
24
  def clear(self, clear_prompts: bool = False) -> None: ...
25
25
 
@@ -75,7 +75,7 @@ class SimpleMemory(Memory, Generic[MessageParamT]):
75
75
  else:
76
76
  self.history.append(message)
77
77
 
78
- def get(self, include_history: bool = True) -> List[MessageParamT]:
78
+ def get(self, include_completion_history: bool = True) -> List[MessageParamT]:
79
79
  """
80
80
  Get all messages in memory.
81
81
 
@@ -86,7 +86,7 @@ class SimpleMemory(Memory, Generic[MessageParamT]):
86
86
  Returns:
87
87
  Combined list of prompt messages and optionally history messages
88
88
  """
89
- if include_history:
89
+ if include_completion_history:
90
90
  return self.prompt_messages + self.history
91
91
  else:
92
92
  return self.prompt_messages.copy()
@@ -12,6 +12,7 @@ from mcp_agent.llm.provider_types import Provider
12
12
  from mcp_agent.llm.providers.augmented_llm_anthropic import AnthropicAugmentedLLM
13
13
  from mcp_agent.llm.providers.augmented_llm_deepseek import DeepSeekAugmentedLLM
14
14
  from mcp_agent.llm.providers.augmented_llm_generic import GenericAugmentedLLM
15
+ from mcp_agent.llm.providers.augmented_llm_google import GoogleAugmentedLLM
15
16
  from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
16
17
  from mcp_agent.llm.providers.augmented_llm_openrouter import OpenRouterAugmentedLLM
17
18
  from mcp_agent.mcp.interfaces import AugmentedLLMProtocol
@@ -107,6 +108,7 @@ class ModelFactory:
107
108
  Provider.FAST_AGENT: PassthroughLLM,
108
109
  Provider.DEEPSEEK: DeepSeekAugmentedLLM,
109
110
  Provider.GENERIC: GenericAugmentedLLM,
111
+ Provider.GOOGLE: GoogleAugmentedLLM, # type: ignore
110
112
  Provider.OPENROUTER: OpenRouterAugmentedLLM,
111
113
  }
112
114
 
@@ -161,7 +163,7 @@ class ModelFactory:
161
163
  Creates a factory function that follows the attach_llm protocol.
162
164
 
163
165
  Args:
164
- model_string: The model specification string (e.g. "gpt-4o.high")
166
+ model_string: The model specification string (e.g. "gpt-4.1")
165
167
  request_params: Optional parameters to configure LLM behavior
166
168
 
167
169
  Returns:
@@ -14,6 +14,7 @@ PROVIDER_ENVIRONMENT_MAP: Dict[str, str] = {
14
14
  "anthropic": "ANTHROPIC_API_KEY",
15
15
  "openai": "OPENAI_API_KEY",
16
16
  "deepseek": "DEEPSEEK_API_KEY",
17
+ "google": "GOOGLE_API_KEY",
17
18
  "openrouter": "OPENROUTER_API_KEY",
18
19
  "generic": "GENERIC_API_KEY",
19
20
  }
@@ -11,6 +11,7 @@ class Provider(Enum):
11
11
  ANTHROPIC = "anthropic"
12
12
  OPENAI = "openai"
13
13
  FAST_AGENT = "fast-agent"
14
+ GOOGLE = "google"
14
15
  DEEPSEEK = "deepseek"
15
16
  GENERIC = "generic"
16
- OPENROUTER = "openrouter"
17
+ OPENROUTER = "openrouter"
@@ -1,4 +1,4 @@
1
- from typing import TYPE_CHECKING, List
1
+ from typing import TYPE_CHECKING, List, Tuple, Type
2
2
 
3
3
  from mcp.types import EmbeddedResource, ImageContent, TextContent
4
4
 
@@ -10,6 +10,7 @@ from mcp_agent.llm.providers.multipart_converter_anthropic import (
10
10
  from mcp_agent.llm.providers.sampling_converter_anthropic import (
11
11
  AnthropicSamplingConverter,
12
12
  )
13
+ from mcp_agent.mcp.interfaces import ModelT
13
14
  from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
14
15
 
15
16
  if TYPE_CHECKING:
@@ -50,6 +51,19 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
50
51
  selecting appropriate tools, and determining what information to retain.
51
52
  """
52
53
 
54
+ # Anthropic-specific parameter exclusions
55
+ ANTHROPIC_EXCLUDE_FIELDS = {
56
+ AugmentedLLM.PARAM_MESSAGES,
57
+ AugmentedLLM.PARAM_MODEL,
58
+ AugmentedLLM.PARAM_SYSTEM_PROMPT,
59
+ AugmentedLLM.PARAM_STOP_SEQUENCES,
60
+ AugmentedLLM.PARAM_MAX_TOKENS,
61
+ AugmentedLLM.PARAM_METADATA,
62
+ AugmentedLLM.PARAM_USE_HISTORY,
63
+ AugmentedLLM.PARAM_MAX_ITERATIONS,
64
+ AugmentedLLM.PARAM_PARALLEL_TOOL_CALLS,
65
+ }
66
+
53
67
  def __init__(self, *args, **kwargs) -> None:
54
68
  # Initialize logger - keep it simple without name reference
55
69
  self.logger = get_logger(__name__)
@@ -73,7 +87,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
73
87
  assert self.context.config
74
88
  return self.context.config.anthropic.base_url if self.context.config.anthropic else None
75
89
 
76
- async def generate_internal(
90
+ async def _anthropic_completion(
77
91
  self,
78
92
  message_param,
79
93
  request_params: RequestParams | None = None,
@@ -100,7 +114,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
100
114
 
101
115
  # Always include prompt messages, but only include conversation history
102
116
  # if use_history is True
103
- messages.extend(self.history.get(include_history=params.use_history))
117
+ messages.extend(self.history.get(include_completion_history=params.use_history))
104
118
 
105
119
  messages.append(message_param)
106
120
 
@@ -120,7 +134,8 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
120
134
 
121
135
  for i in range(params.max_iterations):
122
136
  self._log_chat_progress(self.chat_turn(), model=model)
123
- arguments = {
137
+ # Create base arguments dictionary
138
+ base_args = {
124
139
  "model": model,
125
140
  "messages": messages,
126
141
  "system": self.instruction or params.systemPrompt,
@@ -129,10 +144,12 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
129
144
  }
130
145
 
131
146
  if params.maxTokens is not None:
132
- arguments["max_tokens"] = params.maxTokens
147
+ base_args["max_tokens"] = params.maxTokens
133
148
 
134
- if params.metadata:
135
- arguments = {**arguments, **params.metadata}
149
+ # Use the base class method to prepare all arguments with Anthropic-specific exclusions
150
+ arguments = self.prepare_provider_arguments(
151
+ base_args, params, self.ANTHROPIC_EXCLUDE_FIELDS
152
+ )
136
153
 
137
154
  self.logger.debug(f"{arguments}")
138
155
 
@@ -265,7 +282,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
265
282
  # Keep the prompt messages separate
266
283
  if params.use_history:
267
284
  # Get current prompt messages
268
- prompt_messages = self.history.get(include_history=False)
285
+ prompt_messages = self.history.get(include_completion_history=False)
269
286
 
270
287
  # Calculate new conversation messages (excluding prompts)
271
288
  new_messages = messages[len(prompt_messages) :]
@@ -288,7 +305,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
288
305
  Override this method to use a different LLM.
289
306
 
290
307
  """
291
- res = await self.generate_internal(
308
+ res = await self._anthropic_completion(
292
309
  message_param=message_param,
293
310
  request_params=request_params,
294
311
  )
@@ -298,6 +315,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
298
315
  self,
299
316
  multipart_messages: List["PromptMessageMultipart"],
300
317
  request_params: RequestParams | None = None,
318
+ is_template: bool = False,
301
319
  ) -> PromptMessageMultipart:
302
320
  # Check the last message role
303
321
  last_message = multipart_messages[-1]
@@ -310,7 +328,7 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
310
328
  for msg in messages_to_add:
311
329
  converted.append(AnthropicConverter.convert_to_anthropic(msg))
312
330
 
313
- self.history.extend(converted, is_prompt=True)
331
+ self.history.extend(converted, is_prompt=is_template)
314
332
 
315
333
  if last_message.role == "user":
316
334
  self.logger.debug("Last message in prompt is from user, generating assistant response")
@@ -321,6 +339,27 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
321
339
  self.logger.debug("Last message in prompt is from assistant, returning it directly")
322
340
  return last_message
323
341
 
342
+ async def _apply_prompt_provider_specific_structured(
343
+ self,
344
+ multipart_messages: List[PromptMessageMultipart],
345
+ model: Type[ModelT],
346
+ request_params: RequestParams | None = None,
347
+ ) -> Tuple[ModelT | None, PromptMessageMultipart]: # noqa: F821
348
+ request_params = self.get_request_params(request_params)
349
+
350
+ multipart_messages[-1].add_text(
351
+ """YOU MUST RESPOND IN THE FOLLOWING FORMAT:
352
+ {schema}
353
+ RESPOND ONLY WITH THE JSON, NO PREAMBLE OR CODE FENCES """.format(
354
+ schema=model.model_json_schema()
355
+ )
356
+ )
357
+
358
+ result: PromptMessageMultipart = await self._apply_prompt_provider_specific(
359
+ multipart_messages, request_params
360
+ )
361
+ return self._structured_from_multipart(result, model)
362
+
324
363
  @classmethod
325
364
  def convert_message_to_message_param(cls, message: Message, **kwargs) -> MessageParam:
326
365
  """Convert a response object to an input parameter object to allow LLM calls to be chained."""
@@ -1,4 +1,3 @@
1
-
2
1
  from mcp_agent.core.request_params import RequestParams
3
2
  from mcp_agent.llm.provider_types import Provider
4
3
  from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
@@ -9,7 +8,6 @@ DEFAULT_DEEPSEEK_MODEL = "deepseekchat" # current Deepseek only has two type mo
9
8
 
10
9
  class DeepSeekAugmentedLLM(OpenAIAugmentedLLM):
11
10
  def __init__(self, *args, **kwargs) -> None:
12
- kwargs["provider_name"] = "Deepseek" # Set provider name in kwargs
13
11
  super().__init__(
14
12
  *args, provider=Provider.DEEPSEEK, **kwargs
15
13
  ) # Properly pass args and kwargs to parent
@@ -10,8 +10,10 @@ DEFAULT_OLLAMA_API_KEY = "ollama"
10
10
 
11
11
 
12
12
  class GenericAugmentedLLM(OpenAIAugmentedLLM):
13
- def __init__(self, *args, provider=Provider.GENERIC, **kwargs) -> None:
14
- super().__init__(*args, **kwargs) # Properly pass args and kwargs to parent
13
+ def __init__(self, *args, **kwargs) -> None:
14
+ super().__init__(
15
+ *args, provider=Provider.GENERIC, **kwargs
16
+ ) # Properly pass args and kwargs to parent
15
17
 
16
18
  def _initialize_default_params(self, kwargs: dict) -> RequestParams:
17
19
  """Initialize Generic parameters"""
@@ -0,0 +1,30 @@
1
+ from mcp_agent.core.request_params import RequestParams
2
+ from mcp_agent.llm.provider_types import Provider
3
+ from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
4
+
5
+ GOOGLE_BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai"
6
+ DEFAULT_GOOGLE_MODEL = "gemini-2.0-flash"
7
+
8
+
9
+ class GoogleAugmentedLLM(OpenAIAugmentedLLM):
10
+ def __init__(self, *args, **kwargs) -> None:
11
+ super().__init__(*args, provider=Provider.GOOGLE, **kwargs)
12
+
13
+ def _initialize_default_params(self, kwargs: dict) -> RequestParams:
14
+ """Initialize Google OpenAI Compatibility default parameters"""
15
+ chosen_model = kwargs.get("model", DEFAULT_GOOGLE_MODEL)
16
+
17
+ return RequestParams(
18
+ model=chosen_model,
19
+ systemPrompt=self.instruction,
20
+ parallel_tool_calls=False,
21
+ max_iterations=10,
22
+ use_history=True,
23
+ )
24
+
25
+ def _base_url(self) -> str:
26
+ base_url = None
27
+ if self.context.config and self.context.config.google:
28
+ base_url = self.context.config.google.base_url
29
+
30
+ return base_url if base_url else GOOGLE_BASE_URL