fast-agent-mcp 0.2.57__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (234) hide show
  1. fast_agent/__init__.py +127 -0
  2. fast_agent/agents/__init__.py +36 -0
  3. {mcp_agent/core → fast_agent/agents}/agent_types.py +2 -1
  4. fast_agent/agents/llm_agent.py +217 -0
  5. fast_agent/agents/llm_decorator.py +486 -0
  6. mcp_agent/agents/base_agent.py → fast_agent/agents/mcp_agent.py +377 -385
  7. fast_agent/agents/tool_agent.py +168 -0
  8. {mcp_agent → fast_agent}/agents/workflow/chain_agent.py +43 -33
  9. {mcp_agent → fast_agent}/agents/workflow/evaluator_optimizer.py +31 -35
  10. {mcp_agent → fast_agent}/agents/workflow/iterative_planner.py +56 -47
  11. {mcp_agent → fast_agent}/agents/workflow/orchestrator_models.py +4 -4
  12. {mcp_agent → fast_agent}/agents/workflow/parallel_agent.py +34 -41
  13. {mcp_agent → fast_agent}/agents/workflow/router_agent.py +54 -39
  14. {mcp_agent → fast_agent}/cli/__main__.py +5 -3
  15. {mcp_agent → fast_agent}/cli/commands/check_config.py +95 -66
  16. {mcp_agent → fast_agent}/cli/commands/go.py +20 -11
  17. {mcp_agent → fast_agent}/cli/commands/quickstart.py +4 -4
  18. {mcp_agent → fast_agent}/cli/commands/server_helpers.py +1 -1
  19. {mcp_agent → fast_agent}/cli/commands/setup.py +64 -134
  20. {mcp_agent → fast_agent}/cli/commands/url_parser.py +9 -8
  21. {mcp_agent → fast_agent}/cli/main.py +36 -16
  22. {mcp_agent → fast_agent}/cli/terminal.py +2 -2
  23. {mcp_agent → fast_agent}/config.py +13 -2
  24. fast_agent/constants.py +8 -0
  25. {mcp_agent → fast_agent}/context.py +24 -19
  26. {mcp_agent → fast_agent}/context_dependent.py +9 -5
  27. fast_agent/core/__init__.py +17 -0
  28. {mcp_agent → fast_agent}/core/agent_app.py +39 -36
  29. fast_agent/core/core_app.py +135 -0
  30. {mcp_agent → fast_agent}/core/direct_decorators.py +12 -26
  31. {mcp_agent → fast_agent}/core/direct_factory.py +95 -73
  32. {mcp_agent → fast_agent/core}/executor/executor.py +4 -5
  33. {mcp_agent → fast_agent}/core/fastagent.py +32 -32
  34. fast_agent/core/logging/__init__.py +5 -0
  35. {mcp_agent → fast_agent/core}/logging/events.py +3 -3
  36. {mcp_agent → fast_agent/core}/logging/json_serializer.py +1 -1
  37. {mcp_agent → fast_agent/core}/logging/listeners.py +85 -7
  38. {mcp_agent → fast_agent/core}/logging/logger.py +7 -7
  39. {mcp_agent → fast_agent/core}/logging/transport.py +10 -11
  40. fast_agent/core/prompt.py +9 -0
  41. {mcp_agent → fast_agent}/core/validation.py +4 -4
  42. fast_agent/event_progress.py +61 -0
  43. fast_agent/history/history_exporter.py +44 -0
  44. {mcp_agent → fast_agent}/human_input/__init__.py +9 -12
  45. {mcp_agent → fast_agent}/human_input/elicitation_handler.py +26 -8
  46. {mcp_agent → fast_agent}/human_input/elicitation_state.py +7 -7
  47. {mcp_agent → fast_agent}/human_input/simple_form.py +6 -4
  48. {mcp_agent → fast_agent}/human_input/types.py +1 -18
  49. fast_agent/interfaces.py +228 -0
  50. fast_agent/llm/__init__.py +9 -0
  51. mcp_agent/llm/augmented_llm.py → fast_agent/llm/fastagent_llm.py +128 -218
  52. fast_agent/llm/internal/passthrough.py +137 -0
  53. mcp_agent/llm/augmented_llm_playback.py → fast_agent/llm/internal/playback.py +29 -25
  54. mcp_agent/llm/augmented_llm_silent.py → fast_agent/llm/internal/silent.py +10 -17
  55. fast_agent/llm/internal/slow.py +38 -0
  56. {mcp_agent → fast_agent}/llm/memory.py +40 -30
  57. {mcp_agent → fast_agent}/llm/model_database.py +35 -2
  58. {mcp_agent → fast_agent}/llm/model_factory.py +103 -77
  59. fast_agent/llm/model_info.py +126 -0
  60. {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/anthropic_utils.py +7 -7
  61. fast_agent/llm/provider/anthropic/llm_anthropic.py +603 -0
  62. {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/multipart_converter_anthropic.py +79 -86
  63. fast_agent/llm/provider/bedrock/bedrock_utils.py +218 -0
  64. fast_agent/llm/provider/bedrock/llm_bedrock.py +2192 -0
  65. {mcp_agent/llm/providers → fast_agent/llm/provider/google}/google_converter.py +66 -14
  66. fast_agent/llm/provider/google/llm_google_native.py +431 -0
  67. mcp_agent/llm/providers/augmented_llm_aliyun.py → fast_agent/llm/provider/openai/llm_aliyun.py +6 -7
  68. mcp_agent/llm/providers/augmented_llm_azure.py → fast_agent/llm/provider/openai/llm_azure.py +4 -4
  69. mcp_agent/llm/providers/augmented_llm_deepseek.py → fast_agent/llm/provider/openai/llm_deepseek.py +10 -11
  70. mcp_agent/llm/providers/augmented_llm_generic.py → fast_agent/llm/provider/openai/llm_generic.py +4 -4
  71. mcp_agent/llm/providers/augmented_llm_google_oai.py → fast_agent/llm/provider/openai/llm_google_oai.py +4 -4
  72. mcp_agent/llm/providers/augmented_llm_groq.py → fast_agent/llm/provider/openai/llm_groq.py +14 -16
  73. mcp_agent/llm/providers/augmented_llm_openai.py → fast_agent/llm/provider/openai/llm_openai.py +133 -206
  74. mcp_agent/llm/providers/augmented_llm_openrouter.py → fast_agent/llm/provider/openai/llm_openrouter.py +6 -6
  75. mcp_agent/llm/providers/augmented_llm_tensorzero_openai.py → fast_agent/llm/provider/openai/llm_tensorzero_openai.py +17 -16
  76. mcp_agent/llm/providers/augmented_llm_xai.py → fast_agent/llm/provider/openai/llm_xai.py +6 -6
  77. {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/multipart_converter_openai.py +125 -63
  78. {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_multipart.py +12 -12
  79. {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_utils.py +18 -16
  80. {mcp_agent → fast_agent}/llm/provider_key_manager.py +2 -2
  81. {mcp_agent → fast_agent}/llm/provider_types.py +2 -0
  82. {mcp_agent → fast_agent}/llm/sampling_converter.py +15 -12
  83. {mcp_agent → fast_agent}/llm/usage_tracking.py +23 -5
  84. fast_agent/mcp/__init__.py +43 -0
  85. {mcp_agent → fast_agent}/mcp/elicitation_factory.py +3 -3
  86. {mcp_agent → fast_agent}/mcp/elicitation_handlers.py +19 -10
  87. {mcp_agent → fast_agent}/mcp/gen_client.py +3 -3
  88. fast_agent/mcp/helpers/__init__.py +36 -0
  89. fast_agent/mcp/helpers/content_helpers.py +183 -0
  90. {mcp_agent → fast_agent}/mcp/helpers/server_config_helpers.py +8 -8
  91. {mcp_agent → fast_agent}/mcp/hf_auth.py +25 -23
  92. fast_agent/mcp/interfaces.py +93 -0
  93. {mcp_agent → fast_agent}/mcp/logger_textio.py +4 -4
  94. {mcp_agent → fast_agent}/mcp/mcp_agent_client_session.py +49 -44
  95. {mcp_agent → fast_agent}/mcp/mcp_aggregator.py +66 -115
  96. {mcp_agent → fast_agent}/mcp/mcp_connection_manager.py +16 -23
  97. {mcp_agent/core → fast_agent/mcp}/mcp_content.py +23 -15
  98. {mcp_agent → fast_agent}/mcp/mime_utils.py +39 -0
  99. fast_agent/mcp/prompt.py +159 -0
  100. mcp_agent/mcp/prompt_message_multipart.py → fast_agent/mcp/prompt_message_extended.py +27 -20
  101. {mcp_agent → fast_agent}/mcp/prompt_render.py +21 -19
  102. {mcp_agent → fast_agent}/mcp/prompt_serialization.py +46 -46
  103. fast_agent/mcp/prompts/__main__.py +7 -0
  104. {mcp_agent → fast_agent}/mcp/prompts/prompt_helpers.py +31 -30
  105. {mcp_agent → fast_agent}/mcp/prompts/prompt_load.py +8 -8
  106. {mcp_agent → fast_agent}/mcp/prompts/prompt_server.py +11 -19
  107. {mcp_agent → fast_agent}/mcp/prompts/prompt_template.py +18 -18
  108. {mcp_agent → fast_agent}/mcp/resource_utils.py +1 -1
  109. {mcp_agent → fast_agent}/mcp/sampling.py +31 -26
  110. {mcp_agent/mcp_server → fast_agent/mcp/server}/__init__.py +1 -1
  111. {mcp_agent/mcp_server → fast_agent/mcp/server}/agent_server.py +5 -6
  112. fast_agent/mcp/ui_agent.py +48 -0
  113. fast_agent/mcp/ui_mixin.py +209 -0
  114. fast_agent/mcp_server_registry.py +90 -0
  115. {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis-campaign.py +5 -4
  116. {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis.py +1 -1
  117. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_forms_server.py +25 -3
  118. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/forms_demo.py +3 -3
  119. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character.py +2 -2
  120. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character_handler.py +1 -1
  121. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/tool_call.py +1 -1
  122. {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_one.py +1 -1
  123. {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_two.py +1 -1
  124. {mcp_agent → fast_agent}/resources/examples/researcher/researcher-eval.py +1 -1
  125. {mcp_agent → fast_agent}/resources/examples/researcher/researcher-imp.py +1 -1
  126. {mcp_agent → fast_agent}/resources/examples/researcher/researcher.py +1 -1
  127. {mcp_agent → fast_agent}/resources/examples/tensorzero/agent.py +2 -2
  128. {mcp_agent → fast_agent}/resources/examples/tensorzero/image_demo.py +3 -3
  129. {mcp_agent → fast_agent}/resources/examples/tensorzero/simple_agent.py +1 -1
  130. {mcp_agent → fast_agent}/resources/examples/workflows/chaining.py +1 -1
  131. {mcp_agent → fast_agent}/resources/examples/workflows/evaluator.py +3 -3
  132. {mcp_agent → fast_agent}/resources/examples/workflows/human_input.py +5 -3
  133. {mcp_agent → fast_agent}/resources/examples/workflows/orchestrator.py +1 -1
  134. {mcp_agent → fast_agent}/resources/examples/workflows/parallel.py +2 -2
  135. {mcp_agent → fast_agent}/resources/examples/workflows/router.py +5 -2
  136. fast_agent/resources/setup/.gitignore +24 -0
  137. fast_agent/resources/setup/agent.py +18 -0
  138. fast_agent/resources/setup/fastagent.config.yaml +44 -0
  139. fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
  140. fast_agent/tools/elicitation.py +369 -0
  141. fast_agent/types/__init__.py +32 -0
  142. fast_agent/types/llm_stop_reason.py +77 -0
  143. fast_agent/ui/__init__.py +38 -0
  144. fast_agent/ui/console_display.py +1005 -0
  145. {mcp_agent/human_input → fast_agent/ui}/elicitation_form.py +56 -39
  146. mcp_agent/human_input/elicitation_forms.py → fast_agent/ui/elicitation_style.py +1 -1
  147. {mcp_agent/core → fast_agent/ui}/enhanced_prompt.py +96 -25
  148. {mcp_agent/core → fast_agent/ui}/interactive_prompt.py +330 -125
  149. fast_agent/ui/mcp_ui_utils.py +224 -0
  150. {mcp_agent → fast_agent/ui}/progress_display.py +2 -2
  151. {mcp_agent/logging → fast_agent/ui}/rich_progress.py +4 -4
  152. {mcp_agent/core → fast_agent/ui}/usage_display.py +3 -8
  153. {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/METADATA +7 -7
  154. fast_agent_mcp-0.3.0.dist-info/RECORD +202 -0
  155. fast_agent_mcp-0.3.0.dist-info/entry_points.txt +5 -0
  156. fast_agent_mcp-0.2.57.dist-info/RECORD +0 -192
  157. fast_agent_mcp-0.2.57.dist-info/entry_points.txt +0 -6
  158. mcp_agent/__init__.py +0 -114
  159. mcp_agent/agents/agent.py +0 -92
  160. mcp_agent/agents/workflow/__init__.py +0 -1
  161. mcp_agent/agents/workflow/orchestrator_agent.py +0 -597
  162. mcp_agent/app.py +0 -175
  163. mcp_agent/core/__init__.py +0 -26
  164. mcp_agent/core/prompt.py +0 -191
  165. mcp_agent/event_progress.py +0 -134
  166. mcp_agent/human_input/handler.py +0 -81
  167. mcp_agent/llm/__init__.py +0 -2
  168. mcp_agent/llm/augmented_llm_passthrough.py +0 -232
  169. mcp_agent/llm/augmented_llm_slow.py +0 -53
  170. mcp_agent/llm/providers/__init__.py +0 -8
  171. mcp_agent/llm/providers/augmented_llm_anthropic.py +0 -717
  172. mcp_agent/llm/providers/augmented_llm_bedrock.py +0 -1788
  173. mcp_agent/llm/providers/augmented_llm_google_native.py +0 -495
  174. mcp_agent/llm/providers/sampling_converter_anthropic.py +0 -57
  175. mcp_agent/llm/providers/sampling_converter_openai.py +0 -26
  176. mcp_agent/llm/sampling_format_converter.py +0 -37
  177. mcp_agent/logging/__init__.py +0 -0
  178. mcp_agent/mcp/__init__.py +0 -50
  179. mcp_agent/mcp/helpers/__init__.py +0 -25
  180. mcp_agent/mcp/helpers/content_helpers.py +0 -187
  181. mcp_agent/mcp/interfaces.py +0 -266
  182. mcp_agent/mcp/prompts/__init__.py +0 -0
  183. mcp_agent/mcp/prompts/__main__.py +0 -10
  184. mcp_agent/mcp_server_registry.py +0 -343
  185. mcp_agent/tools/tool_definition.py +0 -14
  186. mcp_agent/ui/console_display.py +0 -790
  187. mcp_agent/ui/console_display_legacy.py +0 -401
  188. {mcp_agent → fast_agent}/agents/workflow/orchestrator_prompts.py +0 -0
  189. {mcp_agent/agents → fast_agent/cli}/__init__.py +0 -0
  190. {mcp_agent → fast_agent}/cli/constants.py +0 -0
  191. {mcp_agent → fast_agent}/core/error_handling.py +0 -0
  192. {mcp_agent → fast_agent}/core/exceptions.py +0 -0
  193. {mcp_agent/cli → fast_agent/core/executor}/__init__.py +0 -0
  194. {mcp_agent → fast_agent/core}/executor/task_registry.py +0 -0
  195. {mcp_agent → fast_agent/core}/executor/workflow_signal.py +0 -0
  196. {mcp_agent → fast_agent}/human_input/form_fields.py +0 -0
  197. {mcp_agent → fast_agent}/llm/prompt_utils.py +0 -0
  198. {mcp_agent/core → fast_agent/llm}/request_params.py +0 -0
  199. {mcp_agent → fast_agent}/mcp/common.py +0 -0
  200. {mcp_agent/executor → fast_agent/mcp/prompts}/__init__.py +0 -0
  201. {mcp_agent → fast_agent}/mcp/prompts/prompt_constants.py +0 -0
  202. {mcp_agent → fast_agent}/py.typed +0 -0
  203. {mcp_agent → fast_agent}/resources/examples/data-analysis/fastagent.config.yaml +0 -0
  204. {mcp_agent → fast_agent}/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
  205. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_account_server.py +0 -0
  206. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_game_server.py +0 -0
  207. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.config.yaml +0 -0
  208. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +0 -0
  209. {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
  210. {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
  211. {mcp_agent → fast_agent}/resources/examples/researcher/fastagent.config.yaml +0 -0
  212. {mcp_agent → fast_agent}/resources/examples/tensorzero/.env.sample +0 -0
  213. {mcp_agent → fast_agent}/resources/examples/tensorzero/Makefile +0 -0
  214. {mcp_agent → fast_agent}/resources/examples/tensorzero/README.md +0 -0
  215. {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
  216. {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/crab.png +0 -0
  217. {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
  218. {mcp_agent → fast_agent}/resources/examples/tensorzero/docker-compose.yml +0 -0
  219. {mcp_agent → fast_agent}/resources/examples/tensorzero/fastagent.config.yaml +0 -0
  220. {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/Dockerfile +0 -0
  221. {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/entrypoint.sh +0 -0
  222. {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/mcp_server.py +0 -0
  223. {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/pyproject.toml +0 -0
  224. {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_schema.json +0 -0
  225. {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +0 -0
  226. {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +0 -0
  227. {mcp_agent → fast_agent}/resources/examples/workflows/fastagent.config.yaml +0 -0
  228. {mcp_agent → fast_agent}/resources/examples/workflows/graded_report.md +0 -0
  229. {mcp_agent → fast_agent}/resources/examples/workflows/short_story.md +0 -0
  230. {mcp_agent → fast_agent}/resources/examples/workflows/short_story.txt +0 -0
  231. {mcp_agent → fast_agent/ui}/console.py +0 -0
  232. {mcp_agent/core → fast_agent/ui}/mermaid_utils.py +0 -0
  233. {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/WHEEL +0 -0
  234. {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,495 +0,0 @@
1
- from typing import List
2
-
3
- # Import necessary types and client from google.genai
4
- from google import genai
5
- from google.genai import (
6
- errors, # For error handling
7
- types,
8
- )
9
- from mcp.types import (
10
- CallToolRequest,
11
- CallToolRequestParams,
12
- CallToolResult,
13
- ContentBlock,
14
- TextContent,
15
- )
16
- from rich.text import Text
17
-
18
- from mcp_agent.core.exceptions import ProviderKeyError
19
- from mcp_agent.core.prompt import Prompt
20
- from mcp_agent.core.request_params import RequestParams
21
- from mcp_agent.llm.augmented_llm import AugmentedLLM
22
- from mcp_agent.llm.provider_types import Provider
23
-
24
- # Import the new converter class
25
- from mcp_agent.llm.providers.google_converter import GoogleConverter
26
- from mcp_agent.llm.usage_tracking import TurnUsage
27
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
28
-
29
- # Define default model and potentially other Google-specific defaults
30
- DEFAULT_GOOGLE_MODEL = "gemini-2.0-flash"
31
-
32
-
33
- class GoogleNativeAugmentedLLM(AugmentedLLM[types.Content, types.Content]):
34
- """
35
- Google LLM provider using the native google.genai library.
36
- """
37
-
38
- async def _apply_prompt_provider_specific_structured(
39
- self,
40
- multipart_messages,
41
- model,
42
- request_params=None,
43
- ):
44
- """
45
- Handles structured output for Gemini models using response_schema and response_mime_type.
46
- """
47
- import json
48
-
49
- # Check if the last message is from assistant
50
- if multipart_messages and multipart_messages[-1].role == "assistant":
51
- last_message = multipart_messages[-1]
52
-
53
- # Extract text content from the assistant message
54
- assistant_text = last_message.first_text()
55
-
56
- if assistant_text:
57
- try:
58
- json_data = json.loads(assistant_text)
59
- validated_model = model.model_validate(json_data)
60
-
61
- # Update history with all messages including the assistant message
62
- self.history.extend(multipart_messages, is_prompt=False)
63
-
64
- # Return the validated model and the assistant message
65
- return validated_model, last_message
66
-
67
- except (json.JSONDecodeError, Exception) as e:
68
- self.logger.warning(
69
- f"Failed to parse assistant message as structured response: {e}"
70
- )
71
- # Return None and the assistant message on failure
72
- self.history.extend(multipart_messages, is_prompt=False)
73
- return None, last_message
74
-
75
- # Prepare request params
76
- request_params = self.get_request_params(request_params)
77
- # Convert Pydantic model to schema dict for Gemini
78
- schema = None
79
- try:
80
- schema = model.model_json_schema()
81
- except Exception:
82
- pass
83
-
84
- # Set up Gemini config for structured output
85
- def _get_schema_type(model):
86
- # Try to get the type annotation for the model (for list[...] etc)
87
- # Fallback to dict schema if not available
88
- try:
89
- return model
90
- except Exception:
91
- return None
92
-
93
- # Use the schema as a dict or as a type, as Gemini supports both
94
- response_schema = _get_schema_type(model)
95
- if schema is not None:
96
- response_schema = schema
97
-
98
- # Set config for structured output
99
- generate_content_config = self._converter.convert_request_params_to_google_config(
100
- request_params
101
- )
102
- generate_content_config.response_mime_type = "application/json"
103
- generate_content_config.response_schema = response_schema
104
-
105
- # Convert messages to google.genai format
106
- conversation_history = self._converter.convert_to_google_content(multipart_messages)
107
-
108
- # Call Gemini API
109
- try:
110
- api_response = await self._google_client.aio.models.generate_content(
111
- model=request_params.model,
112
- contents=conversation_history,
113
- config=generate_content_config,
114
- )
115
- except Exception as e:
116
- self.logger.error(f"Error during Gemini structured call: {e}")
117
- # Return None and a dummy assistant message
118
- return None, Prompt.assistant(f"Error: {e}")
119
-
120
- # Parse the response as JSON and validate against the model
121
- if not api_response.candidates or not api_response.candidates[0].content.parts:
122
- return None, Prompt.assistant("No structured response returned.")
123
-
124
- # Try to extract the JSON from the first part
125
- text = None
126
- for part in api_response.candidates[0].content.parts:
127
- if part.text:
128
- text = part.text
129
- break
130
- if text is None:
131
- return None, Prompt.assistant("No structured text returned.")
132
-
133
- try:
134
- json_data = json.loads(text)
135
- validated_model = model.model_validate(json_data)
136
- # Update LLM history with user and assistant messages for correct history tracking
137
- # Add user message(s)
138
- for msg in multipart_messages:
139
- self.history.append(msg)
140
- # Add assistant message
141
- assistant_msg = Prompt.assistant(text)
142
- self.history.append(assistant_msg)
143
- return validated_model, assistant_msg
144
- except Exception as e:
145
- self.logger.warning(f"Failed to parse structured response: {e}")
146
- # Still update history for consistency
147
- for msg in multipart_messages:
148
- self.history.append(msg)
149
- assistant_msg = Prompt.assistant(text)
150
- self.history.append(assistant_msg)
151
- return None, assistant_msg
152
-
153
- # Define Google-specific parameter exclusions if necessary
154
- GOOGLE_EXCLUDE_FIELDS = {
155
- # Add fields that should not be passed directly from RequestParams to google.genai config
156
- AugmentedLLM.PARAM_MESSAGES, # Handled by contents
157
- AugmentedLLM.PARAM_MODEL, # Handled during client/call setup
158
- AugmentedLLM.PARAM_SYSTEM_PROMPT, # Handled by system_instruction in config
159
- # AugmentedLLM.PARAM_PARALLEL_TOOL_CALLS, # Handled by tool_config in config
160
- AugmentedLLM.PARAM_USE_HISTORY, # Handled by AugmentedLLM base / this class's logic
161
- AugmentedLLM.PARAM_MAX_ITERATIONS, # Handled by this class's loop
162
- # Add any other OpenAI-specific params not applicable to google.genai
163
- }.union(AugmentedLLM.BASE_EXCLUDE_FIELDS)
164
-
165
- def __init__(self, *args, **kwargs) -> None:
166
- super().__init__(*args, provider=Provider.GOOGLE, **kwargs)
167
- # Initialize the google.genai client
168
- self._google_client = self._initialize_google_client()
169
- # Initialize the converter
170
- self._converter = GoogleConverter()
171
-
172
- def _initialize_google_client(self) -> genai.Client:
173
- """
174
- Initializes the google.genai client.
175
-
176
- Reads Google API key or Vertex AI configuration from context config.
177
- """
178
- try:
179
- # Example: Authenticate using API key from config
180
- api_key = self._api_key() # Assuming _api_key() exists in base class
181
- if not api_key:
182
- # Handle case where API key is missing
183
- raise ProviderKeyError(
184
- "Google API key not found.", "Please configure your Google API key."
185
- )
186
-
187
- # Check for Vertex AI configuration
188
- if (
189
- self.context
190
- and self.context.config
191
- and hasattr(self.context.config, "google")
192
- and hasattr(self.context.config.google, "vertex_ai")
193
- and self.context.config.google.vertex_ai.enabled
194
- ):
195
- vertex_config = self.context.config.google.vertex_ai
196
- return genai.Client(
197
- vertexai=True,
198
- project=vertex_config.project_id,
199
- location=vertex_config.location,
200
- # Add other Vertex AI specific options if needed
201
- # http_options=types.HttpOptions(api_version='v1') # Example for v1 API
202
- )
203
- else:
204
- # Default to Gemini Developer API
205
- return genai.Client(
206
- api_key=api_key,
207
- # http_options=types.HttpOptions(api_version='v1') # Example for v1 API
208
- )
209
- except Exception as e:
210
- # Catch potential initialization errors and raise ProviderKeyError
211
- raise ProviderKeyError("Failed to initialize Google GenAI client.", str(e)) from e
212
-
213
- def _initialize_default_params(self, kwargs: dict) -> RequestParams:
214
- """Initialize Google-specific default parameters."""
215
- chosen_model = kwargs.get("model", DEFAULT_GOOGLE_MODEL)
216
-
217
- return RequestParams(
218
- model=chosen_model,
219
- systemPrompt=self.instruction, # System instruction will be mapped in _google_completion
220
- parallel_tool_calls=True, # Assume parallel tool calls are supported by default with native API
221
- max_iterations=20,
222
- use_history=True,
223
- maxTokens=65536, # Default max tokens for Google models
224
- # Include other relevant default parameters
225
- )
226
-
227
- async def _google_completion(
228
- self,
229
- request_params: RequestParams | None = None,
230
- ) -> List[ContentBlock]:
231
- """
232
- Process a query using Google's generate_content API and available tools.
233
- """
234
- request_params = self.get_request_params(request_params=request_params)
235
- responses: List[ContentBlock] = []
236
-
237
- # Load full conversation history if use_history is true
238
- if request_params.use_history:
239
- # Get history from self.history and convert to google.genai format
240
- conversation_history = self._converter.convert_to_google_content(
241
- self.history.get(include_completion_history=True)
242
- )
243
- else:
244
- # If not using history, convert the last message to google.genai format
245
- conversation_history = self._converter.convert_to_google_content(
246
- self.history.get(include_completion_history=True)[-1:]
247
- )
248
-
249
- self.logger.debug(f"Google completion requested with messages: {conversation_history}")
250
- self._log_chat_progress(
251
- self.chat_turn(), model=request_params.model
252
- ) # Log chat progress at the start of completion
253
-
254
- # Keep track of the number of messages in history before this turn
255
- initial_history_length = len(conversation_history)
256
-
257
- for i in range(request_params.max_iterations):
258
- # 1. Get available tools
259
- aggregator_response = await self.aggregator.list_tools()
260
- available_tools = self._converter.convert_to_google_tools(
261
- aggregator_response.tools
262
- ) # Convert fast-agent tools to google.genai tools
263
-
264
- # 2. Prepare generate_content arguments
265
- generate_content_config = self._converter.convert_request_params_to_google_config(
266
- request_params
267
- )
268
-
269
- # Add tools and tool_config to generate_content_config if tools are available
270
- if available_tools:
271
- generate_content_config.tools = available_tools
272
- # Set tool_config mode to AUTO to allow the model to decide when to call tools
273
- generate_content_config.tool_config = types.ToolConfig(
274
- function_calling_config=types.FunctionCallingConfig(mode="AUTO")
275
- )
276
-
277
- # 3. Call the google.genai API
278
- try:
279
- # Use the async client
280
- api_response = await self._google_client.aio.models.generate_content(
281
- model=request_params.model,
282
- contents=conversation_history, # Pass the current turn's conversation history
283
- config=generate_content_config,
284
- )
285
- self.logger.debug("Google generate_content response:", data=api_response)
286
-
287
- # Track usage if response is valid and has usage data
288
- if (
289
- hasattr(api_response, "usage_metadata")
290
- and api_response.usage_metadata
291
- and not isinstance(api_response, BaseException)
292
- ):
293
- try:
294
- turn_usage = TurnUsage.from_google(
295
- api_response.usage_metadata, request_params.model
296
- )
297
- self._finalize_turn_usage(turn_usage)
298
-
299
- except Exception as e:
300
- self.logger.warning(f"Failed to track usage: {e}")
301
-
302
- except errors.APIError as e:
303
- # Handle specific Google API errors
304
- self.logger.error(f"Google API Error: {e.code} - {e.message}")
305
- raise ProviderKeyError(f"Google API Error: {e.code}", e.message or "") from e
306
- except Exception as e:
307
- self.logger.error(f"Error during Google generate_content call: {e}")
308
- # Decide how to handle other exceptions - potentially re-raise or return an error message
309
- raise e
310
-
311
- # 4. Process the API response
312
- if not api_response.candidates:
313
- # No response from the model, we're done
314
- self.logger.debug(f"Iteration {i}: No candidates returned.")
315
- break
316
-
317
- candidate = api_response.candidates[0] # Process the first candidate
318
-
319
- # Convert the model's response content to fast-agent types
320
- model_response_content_parts = self._converter.convert_from_google_content(
321
- candidate.content
322
- )
323
-
324
- # Add model's response to conversation history for potential next turn
325
- # This is for the *internal* conversation history of this completion call
326
- # to handle multi-turn tool use within one _google_completion call.
327
- conversation_history.append(candidate.content)
328
-
329
- # Extract and process text content and tool calls
330
- assistant_message_parts = []
331
- tool_calls_to_execute = []
332
-
333
- for part in model_response_content_parts:
334
- if isinstance(part, TextContent):
335
- responses.append(part) # Add text content to the final responses to be returned
336
- assistant_message_parts.append(
337
- part
338
- ) # Collect text for potential assistant message display
339
- elif isinstance(part, CallToolRequestParams):
340
- # This is a function call requested by the model
341
- tool_calls_to_execute.append(part) # Collect tool calls to execute
342
-
343
- # Display assistant message if there is text content
344
- if assistant_message_parts:
345
- # Combine text parts for display
346
- assistant_text = "".join(
347
- [p.text for p in assistant_message_parts if isinstance(p, TextContent)]
348
- )
349
- # Display the assistant message. If there are tool calls, indicate that.
350
- if tool_calls_to_execute:
351
- tool_names = ", ".join([tc.name for tc in tool_calls_to_execute])
352
- display_text = Text(
353
- f"{assistant_text}\nAssistant requested tool calls: {tool_names}",
354
- style="dim green italic",
355
- )
356
- await self.show_assistant_message(display_text, tool_names)
357
- else:
358
- await self.show_assistant_message(Text(assistant_text))
359
-
360
- # 5. Handle tool calls if any
361
- if tool_calls_to_execute:
362
- tool_results = []
363
- for tool_call_params in tool_calls_to_execute:
364
- # Convert to CallToolRequest and execute
365
- tool_call_request = CallToolRequest(
366
- method="tools/call", params=tool_call_params
367
- )
368
- self.show_tool_call(
369
- aggregator_response.tools, # Pass fast-agent tool definitions for display
370
- tool_call_request.params.name,
371
- str(
372
- tool_call_request.params.arguments
373
- ), # Convert dict to string for display
374
- )
375
-
376
- # Execute the tool call. google.genai does not provide a tool_call_id, pass None.
377
- result = await self.call_tool(tool_call_request, None)
378
- self.show_tool_result(result)
379
-
380
- tool_results.append((tool_call_params.name, result)) # Store name and result
381
-
382
- # Add tool result content to the overall responses to be returned
383
- responses.extend(result.content)
384
-
385
- # Convert tool results back to google.genai format and add to conversation_history for the next turn
386
- tool_response_google_contents = self._converter.convert_function_results_to_google(
387
- tool_results
388
- )
389
- conversation_history.extend(tool_response_google_contents)
390
-
391
- self.logger.debug(f"Iteration {i}: Tool call results processed.")
392
- else:
393
- # If no tool calls, check finish reason to stop or continue
394
- # google.genai finish reasons: STOP, MAX_TOKENS, SAFETY, RECITATION, OTHER
395
- if candidate.finish_reason in ["STOP", "MAX_TOKENS", "SAFETY"]:
396
- self.logger.debug(
397
- f"Iteration {i}: Stopping because finish_reason is '{candidate.finish_reason}'"
398
- )
399
- # Display message if stopping due to max tokens
400
- if (
401
- candidate.finish_reason == "MAX_TOKENS"
402
- and request_params
403
- and request_params.maxTokens is not None
404
- ):
405
- message_text = Text(
406
- f"the assistant has reached the maximum token limit ({request_params.maxTokens})",
407
- style="dim green italic",
408
- )
409
- await self.show_assistant_message(message_text)
410
- break # Exit the loop if a stopping condition is met
411
- # If no tool calls and no explicit stopping reason, the model might be done.
412
- # Break to avoid infinite loops if the model doesn't explicitly stop or call tools.
413
- self.logger.debug(
414
- f"Iteration {i}: No tool calls and no explicit stop reason, breaking."
415
- )
416
- break
417
-
418
- # 6. Update history after all iterations are done (or max_iterations reached)
419
- # Only add the new messages generated during this completion turn to history
420
- if request_params.use_history:
421
- new_google_messages = conversation_history[initial_history_length:]
422
- new_multipart_messages = self._converter.convert_from_google_content_list(
423
- new_google_messages
424
- )
425
- self.history.extend(new_multipart_messages)
426
-
427
- self._log_chat_finished(model=request_params.model) # Use model from request_params
428
- return responses # Return the accumulated responses (fast-agent content types)
429
-
430
- async def _apply_prompt_provider_specific(
431
- self,
432
- multipart_messages: List[PromptMessageMultipart],
433
- request_params: RequestParams | None = None,
434
- is_template: bool = False,
435
- ) -> PromptMessageMultipart:
436
- """
437
- Applies the prompt messages and potentially calls the LLM for completion.
438
- """
439
- # Reset tool call counter for new turn
440
- self._reset_turn_tool_calls()
441
-
442
- request_params = self.get_request_params(
443
- request_params=request_params
444
- ) # Get request params
445
-
446
- # Add incoming messages to history before calling completion
447
- # This ensures the current user message is part of the history for the API call
448
- self.history.extend(multipart_messages, is_prompt=is_template)
449
-
450
- last_message_role = multipart_messages[-1].role if multipart_messages else None
451
-
452
- if last_message_role == "user":
453
- # If the last message is from the user, call the LLM for a response
454
- # _google_completion will now load history internally
455
- responses = await self._google_completion(request_params=request_params)
456
-
457
- # History update is now handled within _google_completion
458
- pass
459
-
460
- return Prompt.assistant(*responses) # Return combined responses as an assistant message
461
- else:
462
- # If the last message is not from the user (e.g., assistant), no completion is needed for this step
463
- # The messages have already been added to history by the calling code/framework
464
- return multipart_messages[-1] # Return the last message as is
465
-
466
- async def pre_tool_call(self, tool_call_id: str | None, request: CallToolRequest):
467
- """
468
- Hook called before a tool call.
469
-
470
- Args:
471
- tool_call_id: The ID of the tool call.
472
- request: The CallToolRequest object.
473
-
474
- Returns:
475
- The modified CallToolRequest object.
476
- """
477
- # Currently a pass-through, can add Google-specific logic if needed
478
- return request
479
-
480
- async def post_tool_call(
481
- self, tool_call_id: str | None, request: CallToolRequest, result: CallToolResult
482
- ):
483
- """
484
- Hook called after a tool call.
485
-
486
- Args:
487
- tool_call_id: The ID of the tool call.
488
- request: The original CallToolRequest object.
489
- result: The CallToolResult object.
490
-
491
- Returns:
492
- The modified CallToolResult object.
493
- """
494
- # Currently a pass-through, can add Google-specific logic if needed
495
- return result
@@ -1,57 +0,0 @@
1
- from anthropic.types import (
2
- Message,
3
- MessageParam,
4
- )
5
- from mcp import StopReason
6
- from mcp.types import (
7
- PromptMessage,
8
- )
9
-
10
- from mcp_agent.llm.providers.multipart_converter_anthropic import (
11
- AnthropicConverter,
12
- )
13
- from mcp_agent.llm.sampling_format_converter import ProviderFormatConverter
14
- from mcp_agent.logging.logger import get_logger
15
-
16
- _logger = get_logger(__name__)
17
-
18
-
19
- class AnthropicSamplingConverter(ProviderFormatConverter[MessageParam, Message]):
20
- """
21
- Convert between Anthropic and MCP types.
22
- """
23
-
24
- @classmethod
25
- def from_prompt_message(cls, message: PromptMessage) -> MessageParam:
26
- """Convert an MCP PromptMessage to an Anthropic MessageParam."""
27
- return AnthropicConverter.convert_prompt_message_to_anthropic(message)
28
-
29
-
30
- def mcp_stop_reason_to_anthropic_stop_reason(stop_reason: StopReason):
31
- if not stop_reason:
32
- return None
33
- elif stop_reason == "endTurn":
34
- return "end_turn"
35
- elif stop_reason == "maxTokens":
36
- return "max_tokens"
37
- elif stop_reason == "stopSequence":
38
- return "stop_sequence"
39
- elif stop_reason == "toolUse":
40
- return "tool_use"
41
- else:
42
- return stop_reason
43
-
44
-
45
- def anthropic_stop_reason_to_mcp_stop_reason(stop_reason: str) -> StopReason:
46
- if not stop_reason:
47
- return "end_turn"
48
- elif stop_reason == "end_turn":
49
- return "endTurn"
50
- elif stop_reason == "max_tokens":
51
- return "maxTokens"
52
- elif stop_reason == "stop_sequence":
53
- return "stopSequence"
54
- elif stop_reason == "tool_use":
55
- return "toolUse"
56
- else:
57
- return stop_reason
@@ -1,26 +0,0 @@
1
-
2
- from mcp.types import (
3
- PromptMessage,
4
- )
5
- from openai.types.chat import ChatCompletionMessage, ChatCompletionMessageParam
6
-
7
- from mcp_agent.llm.sampling_format_converter import (
8
- ProviderFormatConverter,
9
- )
10
- from mcp_agent.logging.logger import get_logger
11
-
12
- _logger = get_logger(__name__)
13
-
14
-
15
- class OpenAISamplingConverter(
16
- ProviderFormatConverter[ChatCompletionMessageParam, ChatCompletionMessage]
17
- ):
18
- @classmethod
19
- def from_prompt_message(cls, message: PromptMessage) -> ChatCompletionMessageParam:
20
- """Convert an MCP PromptMessage to an OpenAI message dict."""
21
- from mcp_agent.llm.providers.multipart_converter_openai import (
22
- OpenAIConverter,
23
- )
24
-
25
- # Use the full-featured OpenAI converter for consistent handling
26
- return OpenAIConverter.convert_prompt_message_to_openai(message)
@@ -1,37 +0,0 @@
1
- from typing import Generic, List, Protocol, TypeVar
2
-
3
- from mcp.types import PromptMessage
4
-
5
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
6
-
7
- # Define covariant type variables
8
- MessageParamT_co = TypeVar("MessageParamT_co", covariant=True)
9
- MessageT_co = TypeVar("MessageT_co", covariant=True)
10
-
11
-
12
- class ProviderFormatConverter(Protocol, Generic[MessageParamT_co, MessageT_co]):
13
- """Conversions between LLM provider and MCP types"""
14
-
15
- @classmethod
16
- def from_prompt_message(cls, message: PromptMessage) -> MessageParamT_co:
17
- """Convert an MCP PromptMessage to a provider-specific message parameter."""
18
- ...
19
-
20
- @classmethod
21
- def from_mutlipart_prompts(
22
- cls, messages: List[PromptMessageMultipart]
23
- ) -> List[MessageParamT_co]:
24
- """Convert a list of PromptMessageMultiparts to a list of provider-specific implementations"""
25
- ...
26
-
27
-
28
- class BasicFormatConverter(ProviderFormatConverter[PromptMessage, PromptMessage]):
29
- @classmethod
30
- def from_prompt_message(cls, message: PromptMessage) -> PromptMessage:
31
- return message
32
-
33
- @classmethod
34
- def from_multipart_prompts(
35
- cls, messages: List[PromptMessageMultipart]
36
- ) -> List[PromptMessageMultipart]:
37
- return messages
File without changes
mcp_agent/mcp/__init__.py DELETED
@@ -1,50 +0,0 @@
1
- """
2
- MCP (Model Context Protocol) integration components.
3
- """
4
-
5
- from mcp.types import PromptMessage
6
-
7
- from .helpers import (
8
- get_image_data,
9
- get_resource_text,
10
- get_resource_uri,
11
- get_text,
12
- is_image_content,
13
- is_resource_content,
14
- is_resource_link,
15
- is_text_content,
16
- )
17
- from .interfaces import (
18
- AgentProtocol,
19
- AugmentedLLMProtocol,
20
- MCPConnectionManagerProtocol,
21
- ModelFactoryClassProtocol,
22
- ModelT,
23
- ServerConnection,
24
- ServerRegistryProtocol,
25
- )
26
- from .prompt_message_multipart import PromptMessageMultipart
27
-
28
- __all__ = [
29
- # Types from mcp.types
30
- "PromptMessage",
31
- # Multipart message handling
32
- "PromptMessageMultipart",
33
- # Protocol interfaces
34
- "AugmentedLLMProtocol",
35
- "AgentProtocol",
36
- "MCPConnectionManagerProtocol",
37
- "ServerRegistryProtocol",
38
- "ServerConnection",
39
- "ModelFactoryClassProtocol",
40
- "ModelT",
41
- # Helper functions
42
- "get_text",
43
- "get_image_data",
44
- "get_resource_uri",
45
- "is_text_content",
46
- "is_image_content",
47
- "is_resource_content",
48
- "is_resource_link",
49
- "get_resource_text",
50
- ]