fast-agent-mcp 0.2.47__py3-none-any.whl → 0.2.48__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fast-agent-mcp
3
- Version: 0.2.47
3
+ Version: 0.2.48
4
4
  Summary: Define, Prompt and Test MCP enabled Agents and Workflows
5
5
  Author-email: Shaun Smith <fastagent@llmindset.co.uk>
6
6
  License: Apache License
@@ -1,6 +1,6 @@
1
1
  mcp_agent/__init__.py,sha256=HWWxZeB-VxrUNNXZnu4duzKGwdfCdD2M_O6drN4kfs8,2389
2
2
  mcp_agent/app.py,sha256=3mtHP1nRQcRaKhhxgTmCOv00alh70nT7UxNA8bN47QE,5560
3
- mcp_agent/config.py,sha256=RjwgvR-Sys4JIzhNyEsaS_NarCe157RenJLOsioUtDk,18980
3
+ mcp_agent/config.py,sha256=GBmdsNlW8YqL8Dp-6Ke4ufQkFuLiaUSq_b8mciQ6sOQ,19382
4
4
  mcp_agent/console.py,sha256=Gjf2QLFumwG1Lav__c07X_kZxxEUSkzV-1_-YbAwcwo,813
5
5
  mcp_agent/context.py,sha256=lzz_Fyf9lz9BBAUt1bRVBlyyHjLkyeuyIziAi4qXYUk,7639
6
6
  mcp_agent/context_dependent.py,sha256=QXfhw3RaQCKfscEEBRGuZ3sdMWqkgShz2jJ1ivGGX1I,1455
@@ -23,10 +23,10 @@ mcp_agent/agents/workflow/router_agent.py,sha256=DYxld96C_xy2TXtZDHPB0CaJqyX-7p0
23
23
  mcp_agent/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
24
  mcp_agent/cli/__main__.py,sha256=KyZnfXkml0KsOnfy8T9JDYNVNynKix9cslwuafmKNbc,1089
25
25
  mcp_agent/cli/constants.py,sha256=KawdkaN289nVB02DKPB4IVUJ8-fohIUD0gLfOp0P7B8,551
26
- mcp_agent/cli/main.py,sha256=Hfa6yn47gfx_d6TUEbzDz68k2gaIS9vvN0yDbVOUgnc,3181
26
+ mcp_agent/cli/main.py,sha256=Oo13X7LB0Cf7JrkilQXz8Eqi_48cE0Rr2qqDUOQifEQ,3175
27
27
  mcp_agent/cli/terminal.py,sha256=GRwD-RGW7saIz2IOWZn5vD6JjiArscELBThm1GTFkuI,1065
28
28
  mcp_agent/cli/commands/check_config.py,sha256=15YK0mtDQbVopnMm3HBjOeY2-00FUHj6tt8RvaemKmI,21081
29
- mcp_agent/cli/commands/go.py,sha256=ydVEyLrMxkp0ZuiPbcOhxUuxBx5FE2RJAV02dppSkaU,14121
29
+ mcp_agent/cli/commands/go.py,sha256=SHFJlO3SeZzAWvx_Gz72zXKA8n7XfaZ49W6UNS5mbV4,14668
30
30
  mcp_agent/cli/commands/quickstart.py,sha256=lcozUGP9RRO8xZaayJg4pQNeY5zDQs-eg-ABm0A15cI,16471
31
31
  mcp_agent/cli/commands/server_helpers.py,sha256=x5tD_qhf1W4D2li09sfOyfRWCOCa6lmpumYAPsEfIQs,3649
32
32
  mcp_agent/cli/commands/setup.py,sha256=eOEd4TL-b0DaDeSJMGOfNOsTEItoZ67W88eTP4aP-bo,6482
@@ -67,11 +67,11 @@ mcp_agent/llm/augmented_llm_playback.py,sha256=rLzgai496e2RlxqQp_Bp0U-Y1FF1SGsWl
67
67
  mcp_agent/llm/augmented_llm_silent.py,sha256=IUnK_1Byy4D9TG0Pj46LFeNezgSTQ8d6MQIHWAImBwE,1846
68
68
  mcp_agent/llm/augmented_llm_slow.py,sha256=DDSD8bL2flmQrVHZm-UDs7sR8aHRWkDOcOW-mX_GPok,2067
69
69
  mcp_agent/llm/memory.py,sha256=pTOaTDV3EA3X68yKwEtUAu7s0xGIQQ_cKBhfYUnfR0w,8614
70
- mcp_agent/llm/model_database.py,sha256=wUWFJuBvJc4vcy52fdtlZQO1cKs5lNd3t81LX783TII,9316
71
- mcp_agent/llm/model_factory.py,sha256=Vr7Ypt2laE_gPPdLWOFAQSXZkD7p3D5KKSvH3ijDwWw,11616
70
+ mcp_agent/llm/model_database.py,sha256=7puiFtc1tH25M5JEnKdAxsEk9OeAMJHamXA1c37tdjw,9490
71
+ mcp_agent/llm/model_factory.py,sha256=2Ii81GrFJ9FvyjVb4OUyb0FRT59iAi9Vido5v0kZa7w,11732
72
72
  mcp_agent/llm/prompt_utils.py,sha256=yWQHykoK13QRF7evHUKxVF0SpVLN-Bsft0Yixzvn0g0,4825
73
73
  mcp_agent/llm/provider_key_manager.py,sha256=LSWIgcXlrUS4sfBvQBCya82qC6NcXQPYLtDHwHNOXR4,3394
74
- mcp_agent/llm/provider_types.py,sha256=XK9IwmJND7Gd-WvggRXKovBSV9tTwtLD2BSUs86epp4,1091
74
+ mcp_agent/llm/provider_types.py,sha256=LfuVuFjcM_lMED0lkrNfKK8s8Fs1vbxugQsrcBE2CIY,1119
75
75
  mcp_agent/llm/sampling_converter.py,sha256=C7wPBlmT0eD90XWabC22zkxsrVHKCrjwIwg6cG628cI,2926
76
76
  mcp_agent/llm/sampling_format_converter.py,sha256=xGz4odHpOcP7--eFaJaFtUR8eR9jxZS7MnLH6J7n0EU,1263
77
77
  mcp_agent/llm/usage_tracking.py,sha256=rF6v8QQDam8QbvlP4jzHljKqvuNHExeYDLkUMI86czY,16073
@@ -85,7 +85,8 @@ mcp_agent/llm/providers/augmented_llm_deepseek.py,sha256=YokCz22qzbq5DyFCzviB8L7
85
85
  mcp_agent/llm/providers/augmented_llm_generic.py,sha256=5Uq8ZBhcFuQTt7koP_5ykolREh2iWu8zKhNbh3pM9lQ,1210
86
86
  mcp_agent/llm/providers/augmented_llm_google_native.py,sha256=c6zczfs-Iw70j3OYELHJ4S7CRwAddkeXinex_yLMhmU,22194
87
87
  mcp_agent/llm/providers/augmented_llm_google_oai.py,sha256=g_g46h-YuxqbRZiO_dVo5zO2OkX1yx7nb6xDaQbOvWs,1137
88
- mcp_agent/llm/providers/augmented_llm_openai.py,sha256=cxctddW9a-5Kv3yGSMBudQh_L7pc9yV5fEOhT7J8ho0,24169
88
+ mcp_agent/llm/providers/augmented_llm_groq.py,sha256=MEk_1elggzIGlMIrnRP5bQ7kcBvu5jCwK0AoImkpb7c,1050
89
+ mcp_agent/llm/providers/augmented_llm_openai.py,sha256=faleOhWV9ShJ3QW8VhaHOcQsRpYHU1W44eVk6uEcguA,24359
89
90
  mcp_agent/llm/providers/augmented_llm_openrouter.py,sha256=xKhEfjQEOnjMjbHT2Dc0ZD0_Cyx7EfFz1wA3Uh9S6Xk,2341
90
91
  mcp_agent/llm/providers/augmented_llm_tensorzero.py,sha256=wxNffqdM1Psvfyya6Pmo__BZjyrek55_8iTRoH4TboI,20577
91
92
  mcp_agent/llm/providers/augmented_llm_xai.py,sha256=PpztvX26mTl2GPTCzoI24ugxlm5BPpqByHnKO19ej28,1332
@@ -167,8 +168,8 @@ mcp_agent/resources/examples/workflows/short_story.txt,sha256=X3y_1AyhLFN2AKzCKv
167
168
  mcp_agent/tools/tool_definition.py,sha256=L3Pxl-uLEXqlVoo-bYuFTFALeI-2pIU44YgFhsTKEtM,398
168
169
  mcp_agent/ui/console_display.py,sha256=XXrHr950wSBSedEKUaaGkXjOzuFpQYzUKKiyaZ58Mps,28280
169
170
  mcp_agent/ui/console_display_legacy.py,sha256=sm2v61-IPVafbF7uUaOyhO2tW_zgFWOjNS83IEWqGgI,14931
170
- fast_agent_mcp-0.2.47.dist-info/METADATA,sha256=p1oY0KA-3ubpyz2UUIHWURuKn84XeIRI1roY6hSBAqU,31048
171
- fast_agent_mcp-0.2.47.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
172
- fast_agent_mcp-0.2.47.dist-info/entry_points.txt,sha256=QaX5kLdI0VdMPRdPUF1nkG_WdLUTNjp_icW6e3EhNYU,232
173
- fast_agent_mcp-0.2.47.dist-info/licenses/LICENSE,sha256=Gx1L3axA4PnuK4FxsbX87jQ1opoOkSFfHHSytW6wLUU,10935
174
- fast_agent_mcp-0.2.47.dist-info/RECORD,,
171
+ fast_agent_mcp-0.2.48.dist-info/METADATA,sha256=urEeemSyTBTUjlI5CtGjrqLD8SZWgYx1ph9GeUhp3J8,31048
172
+ fast_agent_mcp-0.2.48.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
173
+ fast_agent_mcp-0.2.48.dist-info/entry_points.txt,sha256=QaX5kLdI0VdMPRdPUF1nkG_WdLUTNjp_icW6e3EhNYU,232
174
+ fast_agent_mcp-0.2.48.dist-info/licenses/LICENSE,sha256=Gx1L3axA4PnuK4FxsbX87jQ1opoOkSFfHHSytW6wLUU,10935
175
+ fast_agent_mcp-0.2.48.dist-info/RECORD,,
@@ -19,7 +19,7 @@ app = typer.Typer(
19
19
 
20
20
 
21
21
  async def _run_agent(
22
- name: str = "FastAgent CLI",
22
+ name: str = "fast-agent cli",
23
23
  instruction: str = "You are a helpful AI Agent.",
24
24
  config_path: Optional[str] = None,
25
25
  server_list: Optional[List[str]] = None,
@@ -28,6 +28,7 @@ async def _run_agent(
28
28
  prompt_file: Optional[str] = None,
29
29
  url_servers: Optional[Dict[str, Dict[str, str]]] = None,
30
30
  stdio_servers: Optional[Dict[str, Dict[str, str]]] = None,
31
+ agent_name: Optional[str] = "agent",
31
32
  ) -> None:
32
33
  """Async implementation to run an interactive agent."""
33
34
  from pathlib import Path
@@ -35,6 +36,7 @@ async def _run_agent(
35
36
  from mcp_agent.mcp.prompts.prompt_load import load_prompt_multipart
36
37
 
37
38
  # Create the FastAgent instance
39
+
38
40
  fast_kwargs = {
39
41
  "name": name,
40
42
  "config_path": config_path,
@@ -103,6 +105,8 @@ async def _run_agent(
103
105
  # Single model - use original behavior
104
106
  # Define the agent with specified parameters
105
107
  agent_kwargs = {"instruction": instruction}
108
+ if agent_name:
109
+ agent_kwargs["name"] = agent_name
106
110
  if server_list:
107
111
  agent_kwargs["servers"] = server_list
108
112
  if model:
@@ -117,7 +121,7 @@ async def _run_agent(
117
121
  print(response)
118
122
  elif prompt_file:
119
123
  prompt = load_prompt_multipart(Path(prompt_file))
120
- response = await agent.default.generate(prompt)
124
+ response = await agent.generate(prompt)
121
125
  # Print the response text and exit
122
126
  print(response.last_text())
123
127
  else:
@@ -138,6 +142,7 @@ def run_async_agent(
138
142
  message: Optional[str] = None,
139
143
  prompt_file: Optional[str] = None,
140
144
  stdio_commands: Optional[List[str]] = None,
145
+ agent_name: Optional[str] = None,
141
146
  ):
142
147
  """Run the async agent function with proper loop handling."""
143
148
  server_list = servers.split(",") if servers else None
@@ -237,6 +242,7 @@ def run_async_agent(
237
242
  prompt_file=prompt_file,
238
243
  url_servers=url_servers,
239
244
  stdio_servers=stdio_servers,
245
+ agent_name=agent_name,
240
246
  )
241
247
  )
242
248
  finally:
@@ -258,7 +264,7 @@ def run_async_agent(
258
264
  @app.callback(invoke_without_command=True, no_args_is_help=False)
259
265
  def go(
260
266
  ctx: typer.Context,
261
- name: str = typer.Option("FastAgent CLI", "--name", help="Name for the agent"),
267
+ name: str = typer.Option("fast-agent", "--name", help="Name for the agent"),
262
268
  instruction: Optional[str] = typer.Option(
263
269
  None, "--instruction", "-i", help="Path to file or URL containing instruction for the agent"
264
270
  ),
@@ -338,6 +344,8 @@ def go(
338
344
 
339
345
  # Resolve instruction from file/URL or use default
340
346
  resolved_instruction = "You are a helpful AI Agent." # Default
347
+ agent_name = "agent"
348
+
341
349
  if instruction:
342
350
  try:
343
351
  from pathlib import Path
@@ -352,6 +360,11 @@ def go(
352
360
  else:
353
361
  # Treat as file path
354
362
  resolved_instruction = _resolve_instruction(Path(instruction))
363
+ # Extract filename without extension to use as agent name
364
+ instruction_path = Path(instruction)
365
+ if instruction_path.exists() and instruction_path.is_file():
366
+ # Get filename without extension
367
+ agent_name = instruction_path.stem
355
368
  except Exception as e:
356
369
  typer.echo(f"Error loading instruction from {instruction}: {e}", err=True)
357
370
  raise typer.Exit(1)
@@ -367,4 +380,5 @@ def go(
367
380
  message=message,
368
381
  prompt_file=prompt_file,
369
382
  stdio_commands=stdio_commands,
383
+ agent_name=agent_name,
370
384
  )
mcp_agent/cli/main.py CHANGED
@@ -8,7 +8,7 @@ from mcp_agent.cli.commands import check_config, go, quickstart, setup
8
8
  from mcp_agent.cli.terminal import Application
9
9
 
10
10
  app = typer.Typer(
11
- help="FastAgent CLI - Build effective agents using Model Context Protocol",
11
+ help="fast-agent - Build effective agents using Model Context Protocol",
12
12
  add_completion=False, # We'll add this later when we have more commands
13
13
  )
14
14
 
@@ -60,7 +60,7 @@ def main(
60
60
  color: bool = typer.Option(True, "--color/--no-color", help="Enable/disable color output"),
61
61
  version: bool = typer.Option(False, "--version", help="Show version and exit"),
62
62
  ) -> None:
63
- """FastAgent CLI - Build effective agents using Model Context Protocol (MCP).
63
+ """fast-agent - Build effective agents using Model Context Protocol (MCP).
64
64
 
65
65
  Use --help with any command for detailed usage information.
66
66
  """
mcp_agent/config.py CHANGED
@@ -224,6 +224,17 @@ class AzureSettings(BaseModel):
224
224
  model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
225
225
 
226
226
 
227
+ class GroqSettings(BaseModel):
228
+ """
229
+ Settings for using xAI Grok models in the fast-agent application.
230
+ """
231
+
232
+ api_key: str | None = None
233
+ base_url: str | None = "https://api.groq.com/openai/v1"
234
+
235
+ model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
236
+
237
+
227
238
  class OpenTelemetrySettings(BaseModel):
228
239
  """
229
240
  OTEL settings for the fast-agent application.
@@ -441,6 +452,9 @@ class Settings(BaseSettings):
441
452
  huggingface: HuggingFaceSettings | None = None
442
453
  """Settings for HuggingFace authentication (used for MCP connections)"""
443
454
 
455
+ groq: GroqSettings | None = None
456
+ """Settings for using the Groq provider in the fast-agent application"""
457
+
444
458
  logger: LoggerSettings | None = LoggerSettings()
445
459
  """Logger settings for the fast-agent application"""
446
460
 
@@ -129,6 +129,10 @@ class ModelDatabase:
129
129
  context_window=2097152, max_output_tokens=8192, tokenizes=GOOGLE_MULTIMODAL
130
130
  )
131
131
 
132
+ KIMI_MOONSHOT = ModelParameters(
133
+ context_window=131072, max_output_tokens=16384, tokenizes=TEXT_ONLY
134
+ )
135
+
132
136
  # FIXME: xAI has not documented the max output tokens for Grok 4. Using Grok 3 as a placeholder. Will need to update when available (if ever)
133
137
  GROK_4 = ModelParameters(context_window=256000, max_output_tokens=16385, tokenizes=XAI_VISION)
134
138
 
@@ -209,6 +213,7 @@ class ModelDatabase:
209
213
  "grok-3-mini": GROK_3,
210
214
  "grok-3-fast": GROK_3,
211
215
  "grok-3-mini-fast": GROK_3,
216
+ "moonshotai/kimi-k2-instruct": KIMI_MOONSHOT,
212
217
  }
213
218
 
214
219
  @classmethod
@@ -19,6 +19,7 @@ from mcp_agent.llm.providers.augmented_llm_deepseek import DeepSeekAugmentedLLM
19
19
  from mcp_agent.llm.providers.augmented_llm_generic import GenericAugmentedLLM
20
20
  from mcp_agent.llm.providers.augmented_llm_google_native import GoogleNativeAugmentedLLM
21
21
  from mcp_agent.llm.providers.augmented_llm_google_oai import GoogleOaiAugmentedLLM
22
+ from mcp_agent.llm.providers.augmented_llm_groq import GroqAugmentedLLM
22
23
  from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
23
24
  from mcp_agent.llm.providers.augmented_llm_openrouter import OpenRouterAugmentedLLM
24
25
  from mcp_agent.llm.providers.augmented_llm_tensorzero import TensorZeroAugmentedLLM
@@ -43,6 +44,7 @@ LLMClass = Union[
43
44
  Type[GenericAugmentedLLM],
44
45
  Type[AzureOpenAIAugmentedLLM],
45
46
  Type[BedrockAugmentedLLM],
47
+ Type[GroqAugmentedLLM],
46
48
  ]
47
49
 
48
50
 
@@ -122,7 +124,6 @@ class ModelFactory:
122
124
  "qwen-plus": Provider.ALIYUN,
123
125
  "qwen-max": Provider.ALIYUN,
124
126
  "qwen-long": Provider.ALIYUN,
125
-
126
127
  }
127
128
 
128
129
  MODEL_ALIASES = {
@@ -159,6 +160,7 @@ class ModelFactory:
159
160
  Provider.AZURE: AzureOpenAIAugmentedLLM,
160
161
  Provider.ALIYUN: AliyunAugmentedLLM,
161
162
  Provider.BEDROCK: BedrockAugmentedLLM,
163
+ Provider.GROQ: GroqAugmentedLLM,
162
164
  }
163
165
 
164
166
  # Mapping of special model names to their specific LLM classes
@@ -213,11 +215,11 @@ class ModelFactory:
213
215
  # If provider still None, try to get from DEFAULT_PROVIDERS using the model_name_str
214
216
  if provider is None:
215
217
  provider = cls.DEFAULT_PROVIDERS.get(model_name_str)
216
-
218
+
217
219
  # If still None, try pattern matching for Bedrock models
218
220
  if provider is None and BedrockAugmentedLLM.matches_model_pattern(model_name_str):
219
221
  provider = Provider.BEDROCK
220
-
222
+
221
223
  if provider is None:
222
224
  raise ModelConfigError(
223
225
  f"Unknown model or provider for: {model_string}. Model name parsed as '{model_name_str}'"
@@ -28,3 +28,4 @@ class Provider(Enum):
28
28
  HUGGINGFACE = ("huggingface", "HuggingFace") # For HuggingFace MCP connections
29
29
  XAI = ("xai", "XAI") # For xAI Grok models
30
30
  BEDROCK = ("bedrock", "Bedrock")
31
+ GROQ = ("groq", "Groq")
@@ -0,0 +1,30 @@
1
+ from mcp_agent.core.request_params import RequestParams
2
+ from mcp_agent.llm.provider_types import Provider
3
+ from mcp_agent.llm.providers.augmented_llm_openai import OpenAIAugmentedLLM
4
+
5
+ GROQ_BASE_URL = "https://api.groq.com/openai/v1"
6
+ DEFAULT_GROQ_MODEL = ""
7
+
8
+
9
+ class GroqAugmentedLLM(OpenAIAugmentedLLM):
10
+ def __init__(self, *args, **kwargs) -> None:
11
+ super().__init__(*args, provider=Provider.GROQ, **kwargs)
12
+
13
+ def _initialize_default_params(self, kwargs: dict) -> RequestParams:
14
+ """Initialize Groq default parameters"""
15
+ chosen_model = kwargs.get("model", DEFAULT_GROQ_MODEL)
16
+
17
+ return RequestParams(
18
+ model=chosen_model,
19
+ systemPrompt=self.instruction,
20
+ parallel_tool_calls=False,
21
+ max_iterations=20,
22
+ use_history=True,
23
+ )
24
+
25
+ def _base_url(self) -> str:
26
+ base_url = None
27
+ if self.context.config and self.context.config.groq:
28
+ base_url = self.context.config.groq.base_url
29
+
30
+ return base_url if base_url else GROQ_BASE_URL
@@ -388,7 +388,10 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
388
388
 
389
389
  # ParsedChatCompletionMessage is compatible with ChatCompletionMessage
390
390
  # since it inherits from it, so we can use it directly
391
- messages.append(message)
391
+ # Convert to dict and remove None values
392
+ message_dict = message.model_dump()
393
+ message_dict = {k: v for k, v in message_dict.items() if v is not None}
394
+ messages.append(message_dict)
392
395
 
393
396
  message_text = message.content
394
397
  if await self._is_tool_stop_reason(choice.finish_reason) and message.tool_calls: