fast-agent-mcp 0.0.11__py3-none-any.whl → 0.0.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (30) hide show
  1. {fast_agent_mcp-0.0.11.dist-info → fast_agent_mcp-0.0.13.dist-info}/METADATA +9 -1
  2. {fast_agent_mcp-0.0.11.dist-info → fast_agent_mcp-0.0.13.dist-info}/RECORD +30 -25
  3. mcp_agent/agents/agent.py +48 -8
  4. mcp_agent/cli/commands/bootstrap.py +2 -5
  5. mcp_agent/cli/commands/setup.py +1 -1
  6. mcp_agent/cli/main.py +6 -6
  7. mcp_agent/core/enhanced_prompt.py +358 -0
  8. mcp_agent/core/exceptions.py +17 -0
  9. mcp_agent/core/fastagent.py +108 -34
  10. mcp_agent/human_input/handler.py +43 -18
  11. mcp_agent/mcp/mcp_connection_manager.py +14 -12
  12. mcp_agent/resources/examples/internal/agent.py +17 -0
  13. mcp_agent/resources/examples/internal/job.py +1 -1
  14. mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +1 -1
  15. mcp_agent/resources/examples/researcher/fastagent.config.yaml +53 -0
  16. mcp_agent/resources/examples/researcher/researcher-eval.py +53 -0
  17. mcp_agent/resources/examples/workflows/chaining.py +5 -1
  18. mcp_agent/resources/examples/workflows/evaluator.py +7 -4
  19. mcp_agent/resources/examples/workflows/fastagent.config.yaml +24 -0
  20. mcp_agent/resources/examples/workflows/orchestrator.py +3 -2
  21. mcp_agent/resources/examples/workflows/parallel.py +2 -1
  22. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +31 -30
  23. mcp_agent/workflows/llm/augmented_llm.py +8 -2
  24. mcp_agent/workflows/llm/augmented_llm_anthropic.py +3 -1
  25. mcp_agent/workflows/llm/augmented_llm_openai.py +20 -9
  26. mcp_agent/workflows/llm/model_factory.py +7 -4
  27. {fast_agent_mcp-0.0.11.dist-info → fast_agent_mcp-0.0.13.dist-info}/WHEEL +0 -0
  28. {fast_agent_mcp-0.0.11.dist-info → fast_agent_mcp-0.0.13.dist-info}/entry_points.txt +0 -0
  29. {fast_agent_mcp-0.0.11.dist-info → fast_agent_mcp-0.0.13.dist-info}/licenses/LICENSE +0 -0
  30. /mcp_agent/resources/examples/{mcp_researcher → researcher}/researcher.py +0 -0
@@ -97,12 +97,7 @@ class OpenAIAugmentedLLM(
97
97
  use_history=True,
98
98
  )
99
99
 
100
- async def generate(self, message, request_params: RequestParams | None = None):
101
- """
102
- Process a query using an LLM and available tools.
103
- The default implementation uses OpenAI's ChatCompletion as the LLM.
104
- Override this method to use a different LLM.
105
- """
100
+ def _api_key(self) -> str:
106
101
  config = self.context.config
107
102
  api_key = None
108
103
 
@@ -121,9 +116,22 @@ class OpenAIAugmentedLLM(
121
116
  "Add it to your configuration file under openai.api_key\n"
122
117
  "Or set the OPENAI_API_KEY environment variable",
123
118
  )
119
+ return api_key
120
+
121
+ def _base_url(self) -> str:
122
+ return (
123
+ self.context.config.openai.base_url if self.context.config.openai else None
124
+ )
125
+
126
+ async def generate(self, message, request_params: RequestParams | None = None):
127
+ """
128
+ Process a query using an LLM and available tools.
129
+ The default implementation uses OpenAI's ChatCompletion as the LLM.
130
+ Override this method to use a different LLM.
131
+ """
124
132
 
125
133
  try:
126
- openai_client = OpenAI(api_key=api_key, base_url=config.openai.base_url)
134
+ openai_client = OpenAI(api_key=self._api_key(), base_url=self._base_url())
127
135
  messages: List[ChatCompletionMessageParam] = []
128
136
  params = self.get_request_params(request_params)
129
137
  except AuthenticationError as e:
@@ -356,8 +364,8 @@ class OpenAIAugmentedLLM(
356
364
  # Next we pass the text through instructor to extract structured data
357
365
  client = instructor.from_openai(
358
366
  OpenAI(
359
- api_key=self.context.config.openai.api_key,
360
- base_url=self.context.config.openai.base_url,
367
+ api_key=self._api_key(),
368
+ base_url=self._base_url(),
361
369
  ),
362
370
  mode=instructor.Mode.TOOLS_STRICT,
363
371
  )
@@ -373,6 +381,9 @@ class OpenAIAugmentedLLM(
373
381
  {"role": "user", "content": response},
374
382
  ],
375
383
  )
384
+ await self.show_assistant_message(
385
+ str(structured_response), title="ASSISTANT/STRUCTURED"
386
+ )
376
387
 
377
388
  return structured_response
378
389
 
@@ -3,6 +3,7 @@ from enum import Enum, auto
3
3
  from typing import Optional, Type, Dict, Union, Callable
4
4
 
5
5
  from mcp_agent.agents.agent import Agent
6
+ from mcp_agent.core.exceptions import ModelConfigError
6
7
  from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM
7
8
  from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM
8
9
  from mcp_agent.workflows.llm.augmented_llm import RequestParams
@@ -53,6 +54,7 @@ class ModelFactory:
53
54
 
54
55
  # TODO -- add context window size information for display/mmanagement
55
56
  # TODO -- add audio supporting got-4o-audio-preview
57
+ # TODO -- bring model parameter configuration here
56
58
  # Mapping of model names to their default providers
57
59
  DEFAULT_PROVIDERS = {
58
60
  "gpt-4o": Provider.OPENAI,
@@ -80,6 +82,7 @@ class ModelFactory:
80
82
  "claude": "claude-3-5-sonnet-latest",
81
83
  "haiku": "claude-3-5-haiku-latest",
82
84
  "haiku3": "claude-3-haiku-20240307",
85
+ "haiku35": "claude-3-5-haiku-latest",
83
86
  "opus": "claude-3-opus-latest",
84
87
  "opus3": "claude-3-opus-latest",
85
88
  }
@@ -121,7 +124,7 @@ class ModelFactory:
121
124
  if provider is None:
122
125
  provider = cls.DEFAULT_PROVIDERS.get(model_name)
123
126
  if provider is None:
124
- raise ValueError(f"Unknown model: {model_name}")
127
+ raise ModelConfigError(f"Unknown model: {model_name}")
125
128
 
126
129
  return ModelConfig(
127
130
  provider=provider, model_name=model_name, reasoning_effort=reasoning_effort
@@ -173,16 +176,16 @@ class ModelFactory:
173
176
  "request_params": factory_params,
174
177
  "name": kwargs.get("name"),
175
178
  }
176
-
179
+
177
180
  # Add reasoning effort if available
178
181
  if config.reasoning_effort:
179
182
  llm_args["reasoning_effort"] = config.reasoning_effort.value
180
-
183
+
181
184
  # Forward all other kwargs (including verb)
182
185
  for key, value in kwargs.items():
183
186
  if key not in ["agent", "default_request_params", "name"]:
184
187
  llm_args[key] = value
185
-
188
+
186
189
  llm = llm_class(**llm_args)
187
190
  return llm
188
191