fast-agent-mcp 0.3.15__py3-none-any.whl → 0.3.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (47) hide show
  1. fast_agent/__init__.py +2 -0
  2. fast_agent/agents/agent_types.py +5 -0
  3. fast_agent/agents/llm_agent.py +7 -0
  4. fast_agent/agents/llm_decorator.py +6 -0
  5. fast_agent/agents/mcp_agent.py +134 -10
  6. fast_agent/cli/__main__.py +35 -0
  7. fast_agent/cli/commands/check_config.py +85 -0
  8. fast_agent/cli/commands/go.py +100 -36
  9. fast_agent/cli/constants.py +15 -1
  10. fast_agent/cli/main.py +2 -1
  11. fast_agent/config.py +39 -10
  12. fast_agent/constants.py +8 -0
  13. fast_agent/context.py +24 -15
  14. fast_agent/core/direct_decorators.py +9 -0
  15. fast_agent/core/fastagent.py +101 -1
  16. fast_agent/core/logging/listeners.py +8 -0
  17. fast_agent/interfaces.py +12 -0
  18. fast_agent/llm/fastagent_llm.py +45 -0
  19. fast_agent/llm/memory.py +26 -1
  20. fast_agent/llm/model_database.py +4 -1
  21. fast_agent/llm/model_factory.py +4 -2
  22. fast_agent/llm/model_info.py +19 -43
  23. fast_agent/llm/provider/anthropic/llm_anthropic.py +112 -0
  24. fast_agent/llm/provider/google/llm_google_native.py +238 -7
  25. fast_agent/llm/provider/openai/llm_openai.py +382 -19
  26. fast_agent/llm/provider/openai/responses.py +133 -0
  27. fast_agent/resources/setup/agent.py +2 -0
  28. fast_agent/resources/setup/fastagent.config.yaml +6 -0
  29. fast_agent/skills/__init__.py +9 -0
  30. fast_agent/skills/registry.py +208 -0
  31. fast_agent/tools/shell_runtime.py +404 -0
  32. fast_agent/ui/console_display.py +47 -996
  33. fast_agent/ui/elicitation_form.py +76 -24
  34. fast_agent/ui/elicitation_style.py +2 -2
  35. fast_agent/ui/enhanced_prompt.py +107 -37
  36. fast_agent/ui/history_display.py +20 -5
  37. fast_agent/ui/interactive_prompt.py +108 -3
  38. fast_agent/ui/markdown_helpers.py +104 -0
  39. fast_agent/ui/markdown_truncator.py +103 -45
  40. fast_agent/ui/message_primitives.py +50 -0
  41. fast_agent/ui/streaming.py +638 -0
  42. fast_agent/ui/tool_display.py +417 -0
  43. {fast_agent_mcp-0.3.15.dist-info → fast_agent_mcp-0.3.17.dist-info}/METADATA +8 -7
  44. {fast_agent_mcp-0.3.15.dist-info → fast_agent_mcp-0.3.17.dist-info}/RECORD +47 -39
  45. {fast_agent_mcp-0.3.15.dist-info → fast_agent_mcp-0.3.17.dist-info}/WHEEL +0 -0
  46. {fast_agent_mcp-0.3.15.dist-info → fast_agent_mcp-0.3.17.dist-info}/entry_points.txt +0 -0
  47. {fast_agent_mcp-0.3.15.dist-info → fast_agent_mcp-0.3.17.dist-info}/licenses/LICENSE +0 -0
@@ -1,3 +1,4 @@
1
+ import json
1
2
  import secrets
2
3
  from typing import Dict, List
3
4
 
@@ -49,8 +50,6 @@ class GoogleNativeLLM(FastAgentLLM[types.Content, types.Content]):
49
50
 
50
51
  def __init__(self, *args, **kwargs) -> None:
51
52
  super().__init__(*args, provider=Provider.GOOGLE, **kwargs)
52
- # Initialize the google.genai client
53
- self._google_client = self._initialize_google_client()
54
53
  # Initialize the converter
55
54
  self._converter = GoogleConverter()
56
55
 
@@ -109,6 +108,218 @@ class GoogleNativeLLM(FastAgentLLM[types.Content, types.Content]):
109
108
  # Include other relevant default parameters
110
109
  )
111
110
 
111
+ async def _stream_generate_content(
112
+ self,
113
+ *,
114
+ model: str,
115
+ contents: List[types.Content],
116
+ config: types.GenerateContentConfig,
117
+ client: genai.Client,
118
+ ) -> types.GenerateContentResponse | None:
119
+ """Stream Gemini responses and return the final aggregated completion."""
120
+ try:
121
+ response_stream = await client.aio.models.generate_content_stream(
122
+ model=model,
123
+ contents=contents,
124
+ config=config,
125
+ )
126
+ except AttributeError:
127
+ # Older SDKs might not expose streaming; fall back to non-streaming.
128
+ return None
129
+ except errors.APIError:
130
+ raise
131
+ except Exception as exc: # pragma: no cover - defensive fallback
132
+ self.logger.warning(
133
+ "Google streaming failed during setup; falling back to non-streaming",
134
+ exc_info=exc,
135
+ )
136
+ return None
137
+
138
+ return await self._consume_google_stream(response_stream, model=model)
139
+
140
+ async def _consume_google_stream(
141
+ self,
142
+ response_stream,
143
+ *,
144
+ model: str,
145
+ ) -> types.GenerateContentResponse | None:
146
+ """Consume the async streaming iterator and aggregate the final response."""
147
+ estimated_tokens = 0
148
+ timeline: List[tuple[str, int | None, str]] = []
149
+ tool_streams: Dict[int, Dict[str, str]] = {}
150
+ active_tool_index: int | None = None
151
+ tool_counter = 0
152
+ usage_metadata = None
153
+ last_chunk: types.GenerateContentResponse | None = None
154
+
155
+ try:
156
+ async for chunk in response_stream:
157
+ last_chunk = chunk
158
+ if getattr(chunk, "usage_metadata", None):
159
+ usage_metadata = chunk.usage_metadata
160
+
161
+ if not getattr(chunk, "candidates", None):
162
+ continue
163
+
164
+ candidate = chunk.candidates[0]
165
+ content = getattr(candidate, "content", None)
166
+ if content is None or not getattr(content, "parts", None):
167
+ continue
168
+
169
+ for part in content.parts:
170
+ if getattr(part, "text", None):
171
+ text = part.text or ""
172
+ if text:
173
+ if timeline and timeline[-1][0] == "text":
174
+ prev_type, prev_index, prev_text = timeline[-1]
175
+ timeline[-1] = (prev_type, prev_index, prev_text + text)
176
+ else:
177
+ timeline.append(("text", None, text))
178
+ estimated_tokens = self._update_streaming_progress(
179
+ text,
180
+ model,
181
+ estimated_tokens,
182
+ )
183
+ self._notify_tool_stream_listeners(
184
+ "text",
185
+ {
186
+ "chunk": text,
187
+ "streams_arguments": False,
188
+ },
189
+ )
190
+
191
+ if getattr(part, "function_call", None):
192
+ function_call = part.function_call
193
+ name = getattr(function_call, "name", None) or "tool"
194
+ args = getattr(function_call, "args", None) or {}
195
+
196
+ if active_tool_index is None:
197
+ active_tool_index = tool_counter
198
+ tool_counter += 1
199
+ tool_use_id = f"tool_{self.chat_turn()}_{active_tool_index}"
200
+ tool_streams[active_tool_index] = {
201
+ "name": name,
202
+ "tool_use_id": tool_use_id,
203
+ "buffer": "",
204
+ }
205
+ self._notify_tool_stream_listeners(
206
+ "start",
207
+ {
208
+ "tool_name": name,
209
+ "tool_use_id": tool_use_id,
210
+ "index": active_tool_index,
211
+ "streams_arguments": False,
212
+ },
213
+ )
214
+ timeline.append(("tool_call", active_tool_index, ""))
215
+
216
+ stream_info = tool_streams.get(active_tool_index)
217
+ if not stream_info:
218
+ continue
219
+
220
+ try:
221
+ serialized_args = json.dumps(args, separators=(",", ":"))
222
+ except Exception:
223
+ serialized_args = str(args)
224
+
225
+ previous = stream_info.get("buffer", "")
226
+ if isinstance(previous, str) and serialized_args.startswith(previous):
227
+ delta = serialized_args[len(previous) :]
228
+ else:
229
+ delta = serialized_args
230
+ stream_info["buffer"] = serialized_args
231
+
232
+ if delta:
233
+ self._notify_tool_stream_listeners(
234
+ "delta",
235
+ {
236
+ "tool_name": stream_info["name"],
237
+ "tool_use_id": stream_info["tool_use_id"],
238
+ "index": active_tool_index,
239
+ "chunk": delta,
240
+ "streams_arguments": False,
241
+ },
242
+ )
243
+
244
+ finish_reason = getattr(candidate, "finish_reason", None)
245
+ if finish_reason:
246
+ finish_value = str(finish_reason).split(".")[-1].upper()
247
+ if finish_value in {"FUNCTION_CALL", "STOP"} and active_tool_index is not None:
248
+ stream_info = tool_streams.get(active_tool_index)
249
+ if stream_info:
250
+ self._notify_tool_stream_listeners(
251
+ "stop",
252
+ {
253
+ "tool_name": stream_info["name"],
254
+ "tool_use_id": stream_info["tool_use_id"],
255
+ "index": active_tool_index,
256
+ "streams_arguments": False,
257
+ },
258
+ )
259
+ active_tool_index = None
260
+ finally:
261
+ stream_close = getattr(response_stream, "aclose", None)
262
+ if callable(stream_close):
263
+ try:
264
+ await stream_close()
265
+ except Exception:
266
+ pass
267
+
268
+ if active_tool_index is not None:
269
+ stream_info = tool_streams.get(active_tool_index)
270
+ if stream_info:
271
+ self._notify_tool_stream_listeners(
272
+ "stop",
273
+ {
274
+ "tool_name": stream_info["name"],
275
+ "tool_use_id": stream_info["tool_use_id"],
276
+ "index": active_tool_index,
277
+ "streams_arguments": False,
278
+ },
279
+ )
280
+
281
+ if not timeline and last_chunk is None:
282
+ return None
283
+
284
+ final_parts: List[types.Part] = []
285
+ for entry_type, index, payload in timeline:
286
+ if entry_type == "text":
287
+ final_parts.append(types.Part.from_text(text=payload))
288
+ elif entry_type == "tool_call" and index is not None:
289
+ stream_info = tool_streams.get(index)
290
+ if not stream_info:
291
+ continue
292
+ buffer = stream_info.get("buffer", "")
293
+ try:
294
+ args_obj = json.loads(buffer) if buffer else {}
295
+ except json.JSONDecodeError:
296
+ args_obj = {"__raw": buffer}
297
+ final_parts.append(
298
+ types.Part.from_function_call(
299
+ name=str(stream_info.get("name") or "tool"),
300
+ args=args_obj,
301
+ )
302
+ )
303
+
304
+ final_content = types.Content(role="model", parts=final_parts)
305
+
306
+ if last_chunk is not None:
307
+ final_response = last_chunk.model_copy(deep=True)
308
+ if getattr(final_response, "candidates", None):
309
+ final_candidate = final_response.candidates[0]
310
+ final_candidate.content = final_content
311
+ else:
312
+ final_response.candidates = [types.Candidate(content=final_content)]
313
+ else:
314
+ final_response = types.GenerateContentResponse(
315
+ candidates=[types.Candidate(content=final_content)]
316
+ )
317
+
318
+ if usage_metadata:
319
+ final_response.usage_metadata = usage_metadata
320
+
321
+ return final_response
322
+
112
323
  async def _google_completion(
113
324
  self,
114
325
  message: List[types.Content] | None,
@@ -163,13 +374,24 @@ class GoogleNativeLLM(FastAgentLLM[types.Content, types.Content]):
163
374
  )
164
375
 
165
376
  # 3. Call the google.genai API
377
+ client = self._initialize_google_client()
166
378
  try:
167
379
  # Use the async client
168
- api_response = await self._google_client.aio.models.generate_content(
169
- model=request_params.model,
170
- contents=conversation_history, # Full conversational context for this turn
171
- config=generate_content_config,
172
- )
380
+ api_response = None
381
+ streaming_supported = response_schema is None and response_mime_type is None
382
+ if streaming_supported:
383
+ api_response = await self._stream_generate_content(
384
+ model=request_params.model,
385
+ contents=conversation_history,
386
+ config=generate_content_config,
387
+ client=client,
388
+ )
389
+ if api_response is None:
390
+ api_response = await client.aio.models.generate_content(
391
+ model=request_params.model,
392
+ contents=conversation_history, # Full conversational context for this turn
393
+ config=generate_content_config,
394
+ )
173
395
  self.logger.debug("Google generate_content response:", data=api_response)
174
396
 
175
397
  # Track usage if response is valid and has usage data
@@ -195,6 +417,15 @@ class GoogleNativeLLM(FastAgentLLM[types.Content, types.Content]):
195
417
  self.logger.error(f"Error during Google generate_content call: {e}")
196
418
  # Decide how to handle other exceptions - potentially re-raise or return an error message
197
419
  raise e
420
+ finally:
421
+ try:
422
+ await client.aio.aclose()
423
+ except Exception:
424
+ pass
425
+ try:
426
+ client.close()
427
+ except Exception:
428
+ pass
198
429
 
199
430
  # 4. Process the API response
200
431
  if not api_response.candidates: