fast-agent-mcp 0.2.58__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (233) hide show
  1. fast_agent/__init__.py +127 -0
  2. fast_agent/agents/__init__.py +36 -0
  3. {mcp_agent/core → fast_agent/agents}/agent_types.py +2 -1
  4. fast_agent/agents/llm_agent.py +217 -0
  5. fast_agent/agents/llm_decorator.py +486 -0
  6. mcp_agent/agents/base_agent.py → fast_agent/agents/mcp_agent.py +377 -385
  7. fast_agent/agents/tool_agent.py +168 -0
  8. {mcp_agent → fast_agent}/agents/workflow/chain_agent.py +43 -33
  9. {mcp_agent → fast_agent}/agents/workflow/evaluator_optimizer.py +31 -35
  10. {mcp_agent → fast_agent}/agents/workflow/iterative_planner.py +56 -47
  11. {mcp_agent → fast_agent}/agents/workflow/orchestrator_models.py +4 -4
  12. {mcp_agent → fast_agent}/agents/workflow/parallel_agent.py +34 -41
  13. {mcp_agent → fast_agent}/agents/workflow/router_agent.py +54 -39
  14. {mcp_agent → fast_agent}/cli/__main__.py +5 -3
  15. {mcp_agent → fast_agent}/cli/commands/check_config.py +95 -66
  16. {mcp_agent → fast_agent}/cli/commands/go.py +20 -11
  17. {mcp_agent → fast_agent}/cli/commands/quickstart.py +4 -4
  18. {mcp_agent → fast_agent}/cli/commands/server_helpers.py +1 -1
  19. {mcp_agent → fast_agent}/cli/commands/setup.py +64 -134
  20. {mcp_agent → fast_agent}/cli/commands/url_parser.py +9 -8
  21. {mcp_agent → fast_agent}/cli/main.py +36 -16
  22. {mcp_agent → fast_agent}/cli/terminal.py +2 -2
  23. {mcp_agent → fast_agent}/config.py +10 -2
  24. fast_agent/constants.py +8 -0
  25. {mcp_agent → fast_agent}/context.py +24 -19
  26. {mcp_agent → fast_agent}/context_dependent.py +9 -5
  27. fast_agent/core/__init__.py +17 -0
  28. {mcp_agent → fast_agent}/core/agent_app.py +39 -36
  29. fast_agent/core/core_app.py +135 -0
  30. {mcp_agent → fast_agent}/core/direct_decorators.py +12 -26
  31. {mcp_agent → fast_agent}/core/direct_factory.py +95 -73
  32. {mcp_agent → fast_agent/core}/executor/executor.py +4 -5
  33. {mcp_agent → fast_agent}/core/fastagent.py +32 -32
  34. fast_agent/core/logging/__init__.py +5 -0
  35. {mcp_agent → fast_agent/core}/logging/events.py +3 -3
  36. {mcp_agent → fast_agent/core}/logging/json_serializer.py +1 -1
  37. {mcp_agent → fast_agent/core}/logging/listeners.py +85 -7
  38. {mcp_agent → fast_agent/core}/logging/logger.py +7 -7
  39. {mcp_agent → fast_agent/core}/logging/transport.py +10 -11
  40. fast_agent/core/prompt.py +9 -0
  41. {mcp_agent → fast_agent}/core/validation.py +4 -4
  42. fast_agent/event_progress.py +61 -0
  43. fast_agent/history/history_exporter.py +44 -0
  44. {mcp_agent → fast_agent}/human_input/__init__.py +9 -12
  45. {mcp_agent → fast_agent}/human_input/elicitation_handler.py +26 -8
  46. {mcp_agent → fast_agent}/human_input/elicitation_state.py +7 -7
  47. {mcp_agent → fast_agent}/human_input/simple_form.py +6 -4
  48. {mcp_agent → fast_agent}/human_input/types.py +1 -18
  49. fast_agent/interfaces.py +228 -0
  50. fast_agent/llm/__init__.py +9 -0
  51. mcp_agent/llm/augmented_llm.py → fast_agent/llm/fastagent_llm.py +127 -218
  52. fast_agent/llm/internal/passthrough.py +137 -0
  53. mcp_agent/llm/augmented_llm_playback.py → fast_agent/llm/internal/playback.py +29 -25
  54. mcp_agent/llm/augmented_llm_silent.py → fast_agent/llm/internal/silent.py +10 -17
  55. fast_agent/llm/internal/slow.py +38 -0
  56. {mcp_agent → fast_agent}/llm/memory.py +40 -30
  57. {mcp_agent → fast_agent}/llm/model_database.py +35 -2
  58. {mcp_agent → fast_agent}/llm/model_factory.py +103 -77
  59. fast_agent/llm/model_info.py +126 -0
  60. {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/anthropic_utils.py +7 -7
  61. fast_agent/llm/provider/anthropic/llm_anthropic.py +603 -0
  62. {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/multipart_converter_anthropic.py +79 -86
  63. {mcp_agent/llm/providers → fast_agent/llm/provider/bedrock}/bedrock_utils.py +3 -1
  64. mcp_agent/llm/providers/augmented_llm_bedrock.py → fast_agent/llm/provider/bedrock/llm_bedrock.py +833 -717
  65. {mcp_agent/llm/providers → fast_agent/llm/provider/google}/google_converter.py +66 -14
  66. fast_agent/llm/provider/google/llm_google_native.py +431 -0
  67. mcp_agent/llm/providers/augmented_llm_aliyun.py → fast_agent/llm/provider/openai/llm_aliyun.py +6 -7
  68. mcp_agent/llm/providers/augmented_llm_azure.py → fast_agent/llm/provider/openai/llm_azure.py +4 -4
  69. mcp_agent/llm/providers/augmented_llm_deepseek.py → fast_agent/llm/provider/openai/llm_deepseek.py +10 -11
  70. mcp_agent/llm/providers/augmented_llm_generic.py → fast_agent/llm/provider/openai/llm_generic.py +4 -4
  71. mcp_agent/llm/providers/augmented_llm_google_oai.py → fast_agent/llm/provider/openai/llm_google_oai.py +4 -4
  72. mcp_agent/llm/providers/augmented_llm_groq.py → fast_agent/llm/provider/openai/llm_groq.py +14 -16
  73. mcp_agent/llm/providers/augmented_llm_openai.py → fast_agent/llm/provider/openai/llm_openai.py +133 -207
  74. mcp_agent/llm/providers/augmented_llm_openrouter.py → fast_agent/llm/provider/openai/llm_openrouter.py +6 -6
  75. mcp_agent/llm/providers/augmented_llm_tensorzero_openai.py → fast_agent/llm/provider/openai/llm_tensorzero_openai.py +17 -16
  76. mcp_agent/llm/providers/augmented_llm_xai.py → fast_agent/llm/provider/openai/llm_xai.py +6 -6
  77. {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/multipart_converter_openai.py +125 -63
  78. {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_multipart.py +12 -12
  79. {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_utils.py +18 -16
  80. {mcp_agent → fast_agent}/llm/provider_key_manager.py +2 -2
  81. {mcp_agent → fast_agent}/llm/provider_types.py +2 -0
  82. {mcp_agent → fast_agent}/llm/sampling_converter.py +15 -12
  83. {mcp_agent → fast_agent}/llm/usage_tracking.py +23 -5
  84. fast_agent/mcp/__init__.py +43 -0
  85. {mcp_agent → fast_agent}/mcp/elicitation_factory.py +3 -3
  86. {mcp_agent → fast_agent}/mcp/elicitation_handlers.py +19 -10
  87. {mcp_agent → fast_agent}/mcp/gen_client.py +3 -3
  88. fast_agent/mcp/helpers/__init__.py +36 -0
  89. fast_agent/mcp/helpers/content_helpers.py +183 -0
  90. {mcp_agent → fast_agent}/mcp/helpers/server_config_helpers.py +8 -8
  91. {mcp_agent → fast_agent}/mcp/hf_auth.py +25 -23
  92. fast_agent/mcp/interfaces.py +93 -0
  93. {mcp_agent → fast_agent}/mcp/logger_textio.py +4 -4
  94. {mcp_agent → fast_agent}/mcp/mcp_agent_client_session.py +49 -44
  95. {mcp_agent → fast_agent}/mcp/mcp_aggregator.py +66 -115
  96. {mcp_agent → fast_agent}/mcp/mcp_connection_manager.py +16 -23
  97. {mcp_agent/core → fast_agent/mcp}/mcp_content.py +23 -15
  98. {mcp_agent → fast_agent}/mcp/mime_utils.py +39 -0
  99. fast_agent/mcp/prompt.py +159 -0
  100. mcp_agent/mcp/prompt_message_multipart.py → fast_agent/mcp/prompt_message_extended.py +27 -20
  101. {mcp_agent → fast_agent}/mcp/prompt_render.py +21 -19
  102. {mcp_agent → fast_agent}/mcp/prompt_serialization.py +46 -46
  103. fast_agent/mcp/prompts/__main__.py +7 -0
  104. {mcp_agent → fast_agent}/mcp/prompts/prompt_helpers.py +31 -30
  105. {mcp_agent → fast_agent}/mcp/prompts/prompt_load.py +8 -8
  106. {mcp_agent → fast_agent}/mcp/prompts/prompt_server.py +11 -19
  107. {mcp_agent → fast_agent}/mcp/prompts/prompt_template.py +18 -18
  108. {mcp_agent → fast_agent}/mcp/resource_utils.py +1 -1
  109. {mcp_agent → fast_agent}/mcp/sampling.py +31 -26
  110. {mcp_agent/mcp_server → fast_agent/mcp/server}/__init__.py +1 -1
  111. {mcp_agent/mcp_server → fast_agent/mcp/server}/agent_server.py +5 -6
  112. fast_agent/mcp/ui_agent.py +48 -0
  113. fast_agent/mcp/ui_mixin.py +209 -0
  114. fast_agent/mcp_server_registry.py +90 -0
  115. {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis-campaign.py +5 -4
  116. {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis.py +1 -1
  117. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/forms_demo.py +3 -3
  118. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character.py +2 -2
  119. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character_handler.py +1 -1
  120. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/tool_call.py +1 -1
  121. {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_one.py +1 -1
  122. {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_two.py +1 -1
  123. {mcp_agent → fast_agent}/resources/examples/researcher/researcher-eval.py +1 -1
  124. {mcp_agent → fast_agent}/resources/examples/researcher/researcher-imp.py +1 -1
  125. {mcp_agent → fast_agent}/resources/examples/researcher/researcher.py +1 -1
  126. {mcp_agent → fast_agent}/resources/examples/tensorzero/agent.py +2 -2
  127. {mcp_agent → fast_agent}/resources/examples/tensorzero/image_demo.py +3 -3
  128. {mcp_agent → fast_agent}/resources/examples/tensorzero/simple_agent.py +1 -1
  129. {mcp_agent → fast_agent}/resources/examples/workflows/chaining.py +1 -1
  130. {mcp_agent → fast_agent}/resources/examples/workflows/evaluator.py +3 -3
  131. {mcp_agent → fast_agent}/resources/examples/workflows/human_input.py +5 -3
  132. {mcp_agent → fast_agent}/resources/examples/workflows/orchestrator.py +1 -1
  133. {mcp_agent → fast_agent}/resources/examples/workflows/parallel.py +2 -2
  134. {mcp_agent → fast_agent}/resources/examples/workflows/router.py +5 -2
  135. fast_agent/resources/setup/.gitignore +24 -0
  136. fast_agent/resources/setup/agent.py +18 -0
  137. fast_agent/resources/setup/fastagent.config.yaml +44 -0
  138. fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
  139. fast_agent/tools/elicitation.py +369 -0
  140. fast_agent/types/__init__.py +32 -0
  141. fast_agent/types/llm_stop_reason.py +77 -0
  142. fast_agent/ui/__init__.py +38 -0
  143. fast_agent/ui/console_display.py +1005 -0
  144. {mcp_agent/human_input → fast_agent/ui}/elicitation_form.py +17 -12
  145. mcp_agent/human_input/elicitation_forms.py → fast_agent/ui/elicitation_style.py +1 -1
  146. {mcp_agent/core → fast_agent/ui}/enhanced_prompt.py +96 -25
  147. {mcp_agent/core → fast_agent/ui}/interactive_prompt.py +330 -125
  148. fast_agent/ui/mcp_ui_utils.py +224 -0
  149. {mcp_agent → fast_agent/ui}/progress_display.py +2 -2
  150. {mcp_agent/logging → fast_agent/ui}/rich_progress.py +4 -4
  151. {mcp_agent/core → fast_agent/ui}/usage_display.py +3 -8
  152. {fast_agent_mcp-0.2.58.dist-info → fast_agent_mcp-0.3.0.dist-info}/METADATA +7 -7
  153. fast_agent_mcp-0.3.0.dist-info/RECORD +202 -0
  154. fast_agent_mcp-0.3.0.dist-info/entry_points.txt +5 -0
  155. fast_agent_mcp-0.2.58.dist-info/RECORD +0 -193
  156. fast_agent_mcp-0.2.58.dist-info/entry_points.txt +0 -6
  157. mcp_agent/__init__.py +0 -114
  158. mcp_agent/agents/agent.py +0 -92
  159. mcp_agent/agents/workflow/__init__.py +0 -1
  160. mcp_agent/agents/workflow/orchestrator_agent.py +0 -597
  161. mcp_agent/app.py +0 -175
  162. mcp_agent/core/__init__.py +0 -26
  163. mcp_agent/core/prompt.py +0 -191
  164. mcp_agent/event_progress.py +0 -134
  165. mcp_agent/human_input/handler.py +0 -81
  166. mcp_agent/llm/__init__.py +0 -2
  167. mcp_agent/llm/augmented_llm_passthrough.py +0 -232
  168. mcp_agent/llm/augmented_llm_slow.py +0 -53
  169. mcp_agent/llm/providers/__init__.py +0 -8
  170. mcp_agent/llm/providers/augmented_llm_anthropic.py +0 -718
  171. mcp_agent/llm/providers/augmented_llm_google_native.py +0 -496
  172. mcp_agent/llm/providers/sampling_converter_anthropic.py +0 -57
  173. mcp_agent/llm/providers/sampling_converter_openai.py +0 -26
  174. mcp_agent/llm/sampling_format_converter.py +0 -37
  175. mcp_agent/logging/__init__.py +0 -0
  176. mcp_agent/mcp/__init__.py +0 -50
  177. mcp_agent/mcp/helpers/__init__.py +0 -25
  178. mcp_agent/mcp/helpers/content_helpers.py +0 -187
  179. mcp_agent/mcp/interfaces.py +0 -266
  180. mcp_agent/mcp/prompts/__init__.py +0 -0
  181. mcp_agent/mcp/prompts/__main__.py +0 -10
  182. mcp_agent/mcp_server_registry.py +0 -343
  183. mcp_agent/tools/tool_definition.py +0 -14
  184. mcp_agent/ui/console_display.py +0 -790
  185. mcp_agent/ui/console_display_legacy.py +0 -401
  186. {mcp_agent → fast_agent}/agents/workflow/orchestrator_prompts.py +0 -0
  187. {mcp_agent/agents → fast_agent/cli}/__init__.py +0 -0
  188. {mcp_agent → fast_agent}/cli/constants.py +0 -0
  189. {mcp_agent → fast_agent}/core/error_handling.py +0 -0
  190. {mcp_agent → fast_agent}/core/exceptions.py +0 -0
  191. {mcp_agent/cli → fast_agent/core/executor}/__init__.py +0 -0
  192. {mcp_agent → fast_agent/core}/executor/task_registry.py +0 -0
  193. {mcp_agent → fast_agent/core}/executor/workflow_signal.py +0 -0
  194. {mcp_agent → fast_agent}/human_input/form_fields.py +0 -0
  195. {mcp_agent → fast_agent}/llm/prompt_utils.py +0 -0
  196. {mcp_agent/core → fast_agent/llm}/request_params.py +0 -0
  197. {mcp_agent → fast_agent}/mcp/common.py +0 -0
  198. {mcp_agent/executor → fast_agent/mcp/prompts}/__init__.py +0 -0
  199. {mcp_agent → fast_agent}/mcp/prompts/prompt_constants.py +0 -0
  200. {mcp_agent → fast_agent}/py.typed +0 -0
  201. {mcp_agent → fast_agent}/resources/examples/data-analysis/fastagent.config.yaml +0 -0
  202. {mcp_agent → fast_agent}/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
  203. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_account_server.py +0 -0
  204. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_forms_server.py +0 -0
  205. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_game_server.py +0 -0
  206. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.config.yaml +0 -0
  207. {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +0 -0
  208. {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
  209. {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
  210. {mcp_agent → fast_agent}/resources/examples/researcher/fastagent.config.yaml +0 -0
  211. {mcp_agent → fast_agent}/resources/examples/tensorzero/.env.sample +0 -0
  212. {mcp_agent → fast_agent}/resources/examples/tensorzero/Makefile +0 -0
  213. {mcp_agent → fast_agent}/resources/examples/tensorzero/README.md +0 -0
  214. {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
  215. {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/crab.png +0 -0
  216. {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
  217. {mcp_agent → fast_agent}/resources/examples/tensorzero/docker-compose.yml +0 -0
  218. {mcp_agent → fast_agent}/resources/examples/tensorzero/fastagent.config.yaml +0 -0
  219. {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/Dockerfile +0 -0
  220. {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/entrypoint.sh +0 -0
  221. {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/mcp_server.py +0 -0
  222. {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/pyproject.toml +0 -0
  223. {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_schema.json +0 -0
  224. {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +0 -0
  225. {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +0 -0
  226. {mcp_agent → fast_agent}/resources/examples/workflows/fastagent.config.yaml +0 -0
  227. {mcp_agent → fast_agent}/resources/examples/workflows/graded_report.md +0 -0
  228. {mcp_agent → fast_agent}/resources/examples/workflows/short_story.md +0 -0
  229. {mcp_agent → fast_agent}/resources/examples/workflows/short_story.txt +0 -0
  230. {mcp_agent → fast_agent/ui}/console.py +0 -0
  231. {mcp_agent/core → fast_agent/ui}/mermaid_utils.py +0 -0
  232. {fast_agent_mcp-0.2.58.dist-info → fast_agent_mcp-0.3.0.dist-info}/WHEEL +0 -0
  233. {fast_agent_mcp-0.2.58.dist-info → fast_agent_mcp-0.3.0.dist-info}/licenses/LICENSE +0 -0
@@ -3,6 +3,7 @@ from typing import Any, Dict, List, Tuple
3
3
 
4
4
  # Import necessary types from google.genai
5
5
  from google.genai import types
6
+ from mcp import Tool
6
7
  from mcp.types import (
7
8
  BlobResourceContents,
8
9
  CallToolRequest,
@@ -14,16 +15,14 @@ from mcp.types import (
14
15
  TextContent,
15
16
  )
16
17
 
17
- from mcp_agent.core.request_params import RequestParams
18
- from mcp_agent.mcp.helpers.content_helpers import (
18
+ from fast_agent.mcp.helpers.content_helpers import (
19
19
  get_image_data,
20
20
  get_text,
21
21
  is_image_content,
22
22
  is_resource_content,
23
23
  is_text_content,
24
24
  )
25
- from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
26
- from mcp_agent.tools.tool_definition import ToolDefinition
25
+ from fast_agent.types import PromptMessageExtended, RequestParams
27
26
 
28
27
 
29
28
  class GoogleConverter:
@@ -35,13 +34,18 @@ class GoogleConverter:
35
34
  """
36
35
  Recursively removes unsupported JSON schema keywords for google.genai.types.Schema.
37
36
  Specifically removes 'additionalProperties', '$schema', 'exclusiveMaximum', and 'exclusiveMinimum'.
37
+ Also resolves $ref references and inlines $defs.
38
38
  """
39
+ # First, resolve any $ref references in the schema
40
+ schema = self._resolve_refs(schema, schema)
41
+
39
42
  cleaned_schema = {}
40
43
  unsupported_keys = {
41
44
  "additionalProperties",
42
45
  "$schema",
43
46
  "exclusiveMaximum",
44
47
  "exclusiveMinimum",
48
+ "$defs", # Remove $defs after resolving references
45
49
  }
46
50
  supported_string_formats = {"enum", "date-time"}
47
51
 
@@ -67,11 +71,59 @@ class GoogleConverter:
67
71
  cleaned_schema[key] = value
68
72
  return cleaned_schema
69
73
 
74
+ def _resolve_refs(self, schema: Dict[str, Any], root_schema: Dict[str, Any]) -> Dict[str, Any]:
75
+ """
76
+ Resolve $ref references in a JSON schema by inlining the referenced definitions.
77
+
78
+ Args:
79
+ schema: The current schema fragment being processed
80
+ root_schema: The root schema containing $defs
81
+
82
+ Returns:
83
+ Schema with $ref references resolved
84
+ """
85
+ if not isinstance(schema, dict):
86
+ return schema
87
+
88
+ # If this is a $ref, resolve it
89
+ if "$ref" in schema:
90
+ ref_path = schema["$ref"]
91
+ if ref_path.startswith("#/"):
92
+ # Parse the reference path (e.g., "#/$defs/HumanInputRequest")
93
+ path_parts = ref_path[2:].split("/") # Remove "#/" and split
94
+
95
+ # Navigate to the referenced definition
96
+ ref_target = root_schema
97
+ for part in path_parts:
98
+ if part in ref_target:
99
+ ref_target = ref_target[part]
100
+ else:
101
+ # If reference not found, return the original schema
102
+ return schema
103
+
104
+ # Return the resolved definition (recursively resolve any nested refs)
105
+ return self._resolve_refs(ref_target, root_schema)
106
+
107
+ # Otherwise, recursively process all values in the schema
108
+ resolved = {}
109
+ for key, value in schema.items():
110
+ if isinstance(value, dict):
111
+ resolved[key] = self._resolve_refs(value, root_schema)
112
+ elif isinstance(value, list):
113
+ resolved[key] = [
114
+ self._resolve_refs(item, root_schema) if isinstance(item, dict) else item
115
+ for item in value
116
+ ]
117
+ else:
118
+ resolved[key] = value
119
+
120
+ return resolved
121
+
70
122
  def convert_to_google_content(
71
- self, messages: List[PromptMessageMultipart]
123
+ self, messages: List[PromptMessageExtended]
72
124
  ) -> List[types.Content]:
73
125
  """
74
- Converts a list of fast-agent PromptMessageMultipart to google.genai types.Content.
126
+ Converts a list of fast-agent PromptMessageExtended to google.genai types.Content.
75
127
  Handles different roles and content types (text, images, etc.).
76
128
  """
77
129
  google_contents: List[types.Content] = []
@@ -142,7 +194,7 @@ class GoogleConverter:
142
194
  google_contents.append(types.Content(role=google_role, parts=parts))
143
195
  return google_contents
144
196
 
145
- def convert_to_google_tools(self, tools: List[ToolDefinition]) -> List[types.Tool]:
197
+ def convert_to_google_tools(self, tools: List[Tool]) -> List[types.Tool]:
146
198
  """
147
199
  Converts a list of fast-agent ToolDefinition to google.genai types.Tool.
148
200
  """
@@ -326,22 +378,22 @@ class GoogleConverter:
326
378
 
327
379
  def convert_from_google_content_list(
328
380
  self, contents: List[types.Content]
329
- ) -> List[PromptMessageMultipart]:
381
+ ) -> List[PromptMessageExtended]:
330
382
  """
331
- Converts a list of google.genai types.Content to a list of fast-agent PromptMessageMultipart.
383
+ Converts a list of google.genai types.Content to a list of fast-agent PromptMessageExtended.
332
384
  """
333
385
  return [self._convert_from_google_content(content) for content in contents]
334
386
 
335
- def _convert_from_google_content(self, content: types.Content) -> PromptMessageMultipart:
387
+ def _convert_from_google_content(self, content: types.Content) -> PromptMessageExtended:
336
388
  """
337
- Converts a single google.genai types.Content to a fast-agent PromptMessageMultipart.
389
+ Converts a single google.genai types.Content to a fast-agent PromptMessageExtended.
338
390
  """
339
391
  # Official fix for GitHub issue #207: Handle None content or content.parts
340
392
  if content is None or not hasattr(content, "parts") or content.parts is None:
341
- return PromptMessageMultipart(role="assistant", content=[])
393
+ return PromptMessageExtended(role="assistant", content=[])
342
394
 
343
395
  if content.role == "model" and any(part.function_call for part in content.parts):
344
- return PromptMessageMultipart(role="assistant", content=[])
396
+ return PromptMessageExtended(role="assistant", content=[])
345
397
 
346
398
  fast_agent_parts: List[ContentBlock | CallToolRequestParams] = []
347
399
  for part in content.parts:
@@ -363,4 +415,4 @@ class GoogleConverter:
363
415
  )
364
416
 
365
417
  fast_agent_role = "user" if content.role == "user" else "assistant"
366
- return PromptMessageMultipart(role=fast_agent_role, content=fast_agent_parts)
418
+ return PromptMessageExtended(role=fast_agent_role, content=fast_agent_parts)
@@ -0,0 +1,431 @@
1
+ import secrets
2
+ from typing import Dict, List
3
+
4
+ # Import necessary types and client from google.genai
5
+ from google import genai
6
+ from google.genai import (
7
+ errors, # For error handling
8
+ types,
9
+ )
10
+ from mcp import Tool as McpTool
11
+ from mcp.types import (
12
+ CallToolRequest,
13
+ CallToolRequestParams,
14
+ ContentBlock,
15
+ TextContent,
16
+ )
17
+
18
+ from fast_agent.core.exceptions import ProviderKeyError
19
+ from fast_agent.core.prompt import Prompt
20
+ from fast_agent.llm.fastagent_llm import FastAgentLLM
21
+
22
+ # Import the new converter class
23
+ from fast_agent.llm.provider.google.google_converter import GoogleConverter
24
+ from fast_agent.llm.provider_types import Provider
25
+ from fast_agent.llm.usage_tracking import TurnUsage
26
+ from fast_agent.types import PromptMessageExtended, RequestParams
27
+ from fast_agent.types.llm_stop_reason import LlmStopReason
28
+
29
+ # Define default model and potentially other Google-specific defaults
30
+ DEFAULT_GOOGLE_MODEL = "gemini25"
31
+
32
+
33
+ # Define Google-specific parameter exclusions if necessary
34
+ GOOGLE_EXCLUDE_FIELDS = {
35
+ # Add fields that should not be passed directly from RequestParams to google.genai config
36
+ FastAgentLLM.PARAM_MESSAGES, # Handled by contents
37
+ FastAgentLLM.PARAM_MODEL, # Handled during client/call setup
38
+ FastAgentLLM.PARAM_SYSTEM_PROMPT, # Handled by system_instruction in config
39
+ # AugmentedLLM.PARAM_PARALLEL_TOOL_CALLS, # Handled by tool_config in config
40
+ FastAgentLLM.PARAM_USE_HISTORY, # Handled by AugmentedLLM base / this class's logic
41
+ FastAgentLLM.PARAM_MAX_ITERATIONS, # Handled by this class's loop
42
+ # Add any other OpenAI-specific params not applicable to google.genai
43
+ FastAgentLLM.PARAM_MCP_METADATA,
44
+ }.union(FastAgentLLM.BASE_EXCLUDE_FIELDS)
45
+
46
+
47
+ class GoogleNativeLLM(FastAgentLLM[types.Content, types.Content]):
48
+ """
49
+ Google LLM provider using the native google.genai library.
50
+ """
51
+
52
+ def __init__(self, *args, **kwargs) -> None:
53
+ super().__init__(*args, provider=Provider.GOOGLE, **kwargs)
54
+ # Initialize the google.genai client
55
+ self._google_client = self._initialize_google_client()
56
+ # Initialize the converter
57
+ self._converter = GoogleConverter()
58
+
59
+ def _initialize_google_client(self) -> genai.Client:
60
+ """
61
+ Initializes the google.genai client.
62
+
63
+ Reads Google API key or Vertex AI configuration from context config.
64
+ """
65
+ try:
66
+ # Example: Authenticate using API key from config
67
+ api_key = self._api_key() # Assuming _api_key() exists in base class
68
+ if not api_key:
69
+ # Handle case where API key is missing
70
+ raise ProviderKeyError(
71
+ "Google API key not found.", "Please configure your Google API key."
72
+ )
73
+
74
+ # Check for Vertex AI configuration
75
+ if (
76
+ self.context
77
+ and self.context.config
78
+ and hasattr(self.context.config, "google")
79
+ and hasattr(self.context.config.google, "vertex_ai")
80
+ and self.context.config.google.vertex_ai.enabled
81
+ ):
82
+ vertex_config = self.context.config.google.vertex_ai
83
+ return genai.Client(
84
+ vertexai=True,
85
+ project=vertex_config.project_id,
86
+ location=vertex_config.location,
87
+ # Add other Vertex AI specific options if needed
88
+ # http_options=types.HttpOptions(api_version='v1') # Example for v1 API
89
+ )
90
+ else:
91
+ # Default to Gemini Developer API
92
+ return genai.Client(
93
+ api_key=api_key,
94
+ # http_options=types.HttpOptions(api_version='v1') # Example for v1 API
95
+ )
96
+ except Exception as e:
97
+ # Catch potential initialization errors and raise ProviderKeyError
98
+ raise ProviderKeyError("Failed to initialize Google GenAI client.", str(e)) from e
99
+
100
+ def _initialize_default_params(self, kwargs: dict) -> RequestParams:
101
+ """Initialize Google-specific default parameters."""
102
+ chosen_model = kwargs.get("model", DEFAULT_GOOGLE_MODEL)
103
+
104
+ return RequestParams(
105
+ model=chosen_model,
106
+ systemPrompt=self.instruction, # System instruction will be mapped in _google_completion
107
+ parallel_tool_calls=True, # Assume parallel tool calls are supported by default with native API
108
+ max_iterations=20,
109
+ use_history=True,
110
+ maxTokens=65536, # Default max tokens for Google models
111
+ # Include other relevant default parameters
112
+ )
113
+
114
+ async def _google_completion(
115
+ self,
116
+ message: List[types.Content] | None,
117
+ request_params: RequestParams | None = None,
118
+ tools: List[McpTool] | None = None,
119
+ *,
120
+ response_mime_type: str | None = None,
121
+ response_schema: object | None = None,
122
+ ) -> PromptMessageExtended:
123
+ """
124
+ Process a query using Google's generate_content API and available tools.
125
+ """
126
+ request_params = self.get_request_params(request_params=request_params)
127
+ responses: List[ContentBlock] = []
128
+
129
+ # Build conversation history from stored provider-specific messages
130
+ # and the provided message for this turn (no implicit conversion here).
131
+ # We store provider-native Content objects in history.
132
+ # Start with prompts + (optionally) accumulated conversation messages
133
+ base_history: List[types.Content] = self.history.get(
134
+ include_completion_history=request_params.use_history
135
+ )
136
+ # Make a working copy and add the provided turn message(s) if present
137
+ conversation_history: List[types.Content] = list(base_history)
138
+ if message:
139
+ conversation_history.extend(message)
140
+
141
+ self.logger.debug(f"Google completion requested with messages: {conversation_history}")
142
+ self._log_chat_progress(self.chat_turn(), model=request_params.model)
143
+
144
+ available_tools: List[types.Tool] = (
145
+ self._converter.convert_to_google_tools(tools or []) if tools else []
146
+ )
147
+
148
+ # 2. Prepare generate_content arguments
149
+ generate_content_config = self._converter.convert_request_params_to_google_config(
150
+ request_params
151
+ )
152
+
153
+ # Apply structured output config OR tool calling (mutually exclusive)
154
+ if response_schema or response_mime_type:
155
+ # Structured output mode: disable tool use
156
+ if response_mime_type:
157
+ generate_content_config.response_mime_type = response_mime_type
158
+ if response_schema is not None:
159
+ generate_content_config.response_schema = response_schema
160
+ elif available_tools:
161
+ # Tool calling enabled only when not doing structured output
162
+ generate_content_config.tools = available_tools
163
+ generate_content_config.tool_config = types.ToolConfig(
164
+ function_calling_config=types.FunctionCallingConfig(mode="AUTO")
165
+ )
166
+
167
+ # 3. Call the google.genai API
168
+ try:
169
+ # Use the async client
170
+ api_response = await self._google_client.aio.models.generate_content(
171
+ model=request_params.model,
172
+ contents=conversation_history, # Full conversational context for this turn
173
+ config=generate_content_config,
174
+ )
175
+ self.logger.debug("Google generate_content response:", data=api_response)
176
+
177
+ # Track usage if response is valid and has usage data
178
+ if (
179
+ hasattr(api_response, "usage_metadata")
180
+ and api_response.usage_metadata
181
+ and not isinstance(api_response, BaseException)
182
+ ):
183
+ try:
184
+ turn_usage = TurnUsage.from_google(
185
+ api_response.usage_metadata, request_params.model
186
+ )
187
+ self._finalize_turn_usage(turn_usage)
188
+
189
+ except Exception as e:
190
+ self.logger.warning(f"Failed to track usage: {e}")
191
+
192
+ except errors.APIError as e:
193
+ # Handle specific Google API errors
194
+ self.logger.error(f"Google API Error: {e.code} - {e.message}")
195
+ raise ProviderKeyError(f"Google API Error: {e.code}", e.message or "") from e
196
+ except Exception as e:
197
+ self.logger.error(f"Error during Google generate_content call: {e}")
198
+ # Decide how to handle other exceptions - potentially re-raise or return an error message
199
+ raise e
200
+
201
+ # 4. Process the API response
202
+ if not api_response.candidates:
203
+ # No response from the model, we're done
204
+ self.logger.debug("No candidates returned.")
205
+
206
+ candidate = api_response.candidates[0] # Process the first candidate
207
+
208
+ # Convert the model's response content to fast-agent types
209
+ model_response_content_parts = self._converter.convert_from_google_content(
210
+ candidate.content
211
+ )
212
+ stop_reason = LlmStopReason.END_TURN
213
+ tool_calls: Dict[str, CallToolRequest] | None = None
214
+ # Add model's response to the working conversation history for this turn
215
+ conversation_history.append(candidate.content)
216
+
217
+ # Extract and process text content and tool calls
218
+ assistant_message_parts = []
219
+ tool_calls_to_execute = []
220
+
221
+ for part in model_response_content_parts:
222
+ if isinstance(part, TextContent):
223
+ responses.append(part) # Add text content to the final responses to be returned
224
+ assistant_message_parts.append(
225
+ part
226
+ ) # Collect text for potential assistant message display
227
+ elif isinstance(part, CallToolRequestParams):
228
+ # This is a function call requested by the model
229
+ # If in structured mode, ignore tool calls per either-or rule
230
+ if response_schema or response_mime_type:
231
+ continue
232
+ tool_calls_to_execute.append(part) # Collect tool calls to execute
233
+
234
+ if tool_calls_to_execute:
235
+ stop_reason = LlmStopReason.TOOL_USE
236
+ tool_calls = {}
237
+ for tool_call_params in tool_calls_to_execute:
238
+ # Convert to CallToolRequest and execute
239
+ tool_call_request = CallToolRequest(method="tools/call", params=tool_call_params)
240
+ hex_string = secrets.token_hex(3)[:5]
241
+ tool_calls[hex_string] = tool_call_request
242
+
243
+ self.logger.debug("Tool call results processed.")
244
+ else:
245
+ stop_reason = self._map_finish_reason(getattr(candidate, "finish_reason", None))
246
+
247
+ # 6. Persist conversation state to provider-native history (exclude prompt messages)
248
+ if request_params.use_history:
249
+ # History store separates prompt vs conversation messages; keep prompts as-is
250
+ prompt_messages = self.history.get(include_completion_history=False)
251
+ # messages after prompts are the true conversation history
252
+ new_messages = conversation_history[len(prompt_messages) :]
253
+ self.history.set(new_messages, is_prompt=False)
254
+
255
+ self._log_chat_finished(model=request_params.model) # Use model from request_params
256
+ return Prompt.assistant(*responses, stop_reason=stop_reason, tool_calls=tool_calls)
257
+
258
+ # return responses # Return the accumulated responses (fast-agent content types)
259
+
260
+ async def _apply_prompt_provider_specific(
261
+ self,
262
+ multipart_messages: List[PromptMessageExtended],
263
+ request_params: RequestParams | None = None,
264
+ tools: List[McpTool] | None = None,
265
+ is_template: bool = False,
266
+ ) -> PromptMessageExtended:
267
+ """
268
+ Applies the prompt messages and potentially calls the LLM for completion.
269
+ """
270
+
271
+ request_params = self.get_request_params(request_params=request_params)
272
+
273
+ # Determine the last message
274
+ last_message = multipart_messages[-1]
275
+
276
+ # Add previous messages (excluding the last user message) to provider-native history
277
+ # If last is assistant, we add all messages and return it directly (no inference).
278
+ messages_to_add = (
279
+ multipart_messages[:-1] if last_message.role == "user" else multipart_messages
280
+ )
281
+
282
+ if messages_to_add:
283
+ # Convert prior messages to google.genai Content and store in provider history
284
+ converted_prior = self._converter.convert_to_google_content(messages_to_add)
285
+ self.history.extend(converted_prior, is_prompt=is_template)
286
+
287
+ if last_message.role == "assistant":
288
+ # No generation required; the provided assistant message is the output
289
+ return last_message
290
+
291
+ # Build the provider-native message list for this turn from the last user message
292
+ # This must handle tool results as function responses before any additional user content.
293
+ turn_messages: List[types.Content] = []
294
+
295
+ # 1) Convert tool results (if any) to google function responses
296
+ if last_message.tool_results:
297
+ # Map correlation IDs back to tool names using the last assistant tool_calls
298
+ # found in our high-level message history
299
+ id_to_name: Dict[str, str] = {}
300
+ for prev in reversed(self._message_history):
301
+ if prev.role == "assistant" and prev.tool_calls:
302
+ for call_id, call in prev.tool_calls.items():
303
+ try:
304
+ id_to_name[call_id] = call.params.name
305
+ except Exception:
306
+ pass
307
+ break
308
+
309
+ tool_results_pairs = []
310
+ for call_id, result in last_message.tool_results.items():
311
+ tool_name = id_to_name.get(call_id, "tool")
312
+ tool_results_pairs.append((tool_name, result))
313
+
314
+ if tool_results_pairs:
315
+ turn_messages.extend(
316
+ self._converter.convert_function_results_to_google(tool_results_pairs)
317
+ )
318
+
319
+ # 2) Convert any direct user content in the last message
320
+ if last_message.content:
321
+ user_contents = self._converter.convert_to_google_content([last_message])
322
+ # convert_to_google_content returns a list; preserve order after tool responses
323
+ turn_messages.extend(user_contents)
324
+
325
+ # If we somehow have no provider-native parts, ensure we send an empty user content
326
+ if not turn_messages:
327
+ turn_messages.append(types.Content(role="user", parts=[types.Part.from_text("")]))
328
+
329
+ # Delegate to the native completion with explicit turn messages
330
+ return await self._google_completion(
331
+ turn_messages, request_params=request_params, tools=tools
332
+ )
333
+
334
+ def _map_finish_reason(self, finish_reason: object) -> LlmStopReason:
335
+ """Map Google finish reasons to LlmStopReason robustly."""
336
+ # Normalize to string if it's an enum-like object
337
+ reason = None
338
+ try:
339
+ reason = str(finish_reason) if finish_reason is not None else None
340
+ except Exception:
341
+ reason = None
342
+
343
+ if not reason:
344
+ return LlmStopReason.END_TURN
345
+
346
+ # Extract last token after any dots or enum prefixes
347
+ key = reason.split(".")[-1].upper()
348
+
349
+ if key in {"STOP"}:
350
+ return LlmStopReason.END_TURN
351
+ if key in {"MAX_TOKENS", "LENGTH"}:
352
+ return LlmStopReason.MAX_TOKENS
353
+ if key in {
354
+ "PROHIBITED_CONTENT",
355
+ "SAFETY",
356
+ "RECITATION",
357
+ "BLOCKLIST",
358
+ "SPII",
359
+ "IMAGE_SAFETY",
360
+ }:
361
+ return LlmStopReason.SAFETY
362
+ if key in {"MALFORMED_FUNCTION_CALL", "UNEXPECTED_TOOL_CALL", "TOO_MANY_TOOL_CALLS"}:
363
+ return LlmStopReason.ERROR
364
+ # Some SDKs include OTHER, LANGUAGE, GROUNDING, UNSPECIFIED, etc.
365
+ return LlmStopReason.ERROR
366
+
367
+ async def _apply_prompt_provider_specific_structured(
368
+ self,
369
+ multipart_messages,
370
+ model,
371
+ request_params=None,
372
+ ):
373
+ """
374
+ Handles structured output for Gemini models using response_schema and response_mime_type,
375
+ keeping provider-native (google.genai) history consistent with non-structured calls.
376
+ """
377
+ import json
378
+
379
+ # Determine the last message and add prior messages to provider-native history
380
+ last_message = multipart_messages[-1] if multipart_messages else None
381
+ messages_to_add = (
382
+ multipart_messages
383
+ if last_message and last_message.role == "assistant"
384
+ else multipart_messages[:-1]
385
+ )
386
+ if messages_to_add:
387
+ converted_prior = self._converter.convert_to_google_content(messages_to_add)
388
+ self.history.extend(converted_prior, is_prompt=False)
389
+
390
+ # If the last message is an assistant message, attempt to parse its JSON and return
391
+ if last_message and last_message.role == "assistant":
392
+ assistant_text = last_message.last_text()
393
+ if assistant_text:
394
+ try:
395
+ json_data = json.loads(assistant_text)
396
+ validated_model = model.model_validate(json_data)
397
+ return validated_model, last_message
398
+ except (json.JSONDecodeError, Exception) as e:
399
+ self.logger.warning(
400
+ f"Failed to parse assistant message as structured response: {e}"
401
+ )
402
+ return None, last_message
403
+
404
+ # Prepare request params
405
+ request_params = self.get_request_params(request_params)
406
+
407
+ # Build schema for structured output
408
+ schema = None
409
+ try:
410
+ schema = model.model_json_schema()
411
+ except Exception:
412
+ pass
413
+ response_schema = model if schema is None else schema
414
+
415
+ # Convert the last user message to provider-native content for the current turn
416
+ turn_messages: List[types.Content] = []
417
+ if last_message:
418
+ turn_messages = self._converter.convert_to_google_content([last_message])
419
+
420
+ # Delegate to unified completion with structured options enabled (no tools)
421
+ assistant_msg = await self._google_completion(
422
+ turn_messages,
423
+ request_params=request_params,
424
+ tools=None,
425
+ response_mime_type="application/json",
426
+ response_schema=response_schema,
427
+ )
428
+
429
+ # Parse using shared helper for consistency
430
+ parsed, _ = self._structured_from_multipart(assistant_msg, model)
431
+ return parsed, assistant_msg
@@ -1,15 +1,15 @@
1
- from mcp_agent.core.request_params import RequestParams
2
- from mcp_agent.llm.provider_types import Provider
3
- from mcp_agent.llm.providers.augmented_llm_groq import GroqAugmentedLLM
4
- from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
1
+ from fast_agent.llm.provider.openai.llm_groq import GroqLLM
2
+ from fast_agent.llm.provider.openai.llm_openai import OpenAILLM
3
+ from fast_agent.llm.provider_types import Provider
4
+ from fast_agent.types import RequestParams
5
5
 
6
6
  ALIYUN_BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"
7
7
  DEFAULT_QWEN_MODEL = "qwen-turbo"
8
8
 
9
9
 
10
- class AliyunAugmentedLLM(GroqAugmentedLLM):
10
+ class AliyunLLM(GroqLLM):
11
11
  def __init__(self, *args, **kwargs) -> None:
12
- OpenAIAugmentedLLM.__init__(self, *args, provider=Provider.ALIYUN, **kwargs)
12
+ OpenAILLM.__init__(self, *args, provider=Provider.ALIYUN, **kwargs)
13
13
 
14
14
  def _initialize_default_params(self, kwargs: dict) -> RequestParams:
15
15
  """Initialize Aliyun-specific default parameters"""
@@ -29,4 +29,3 @@ class AliyunAugmentedLLM(GroqAugmentedLLM):
29
29
  base_url = self.context.config.aliyun.base_url
30
30
 
31
31
  return base_url if base_url else ALIYUN_BASE_URL
32
-
@@ -1,8 +1,8 @@
1
1
  from openai import AsyncAzureOpenAI, AsyncOpenAI, AuthenticationError
2
2
 
3
- from mcp_agent.core.exceptions import ProviderKeyError
4
- from mcp_agent.llm.provider_types import Provider
5
- from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
3
+ from fast_agent.core.exceptions import ProviderKeyError
4
+ from fast_agent.llm.provider.openai.llm_openai import OpenAILLM
5
+ from fast_agent.llm.provider_types import Provider
6
6
 
7
7
  try:
8
8
  from azure.identity import DefaultAzureCredential
@@ -21,7 +21,7 @@ def _extract_resource_name(url: str) -> str | None:
21
21
  DEFAULT_AZURE_API_VERSION = "2024-10-21"
22
22
 
23
23
 
24
- class AzureOpenAIAugmentedLLM(OpenAIAugmentedLLM):
24
+ class AzureOpenAILLM(OpenAILLM):
25
25
  """
26
26
  Azure OpenAI implementation extending OpenAIAugmentedLLM.
27
27
  Handles both API Key and DefaultAzureCredential authentication.