fast-agent-mcp 0.0.9__py3-none-any.whl → 0.0.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (37) hide show
  1. {fast_agent_mcp-0.0.9.dist-info → fast_agent_mcp-0.0.12.dist-info}/METADATA +17 -11
  2. {fast_agent_mcp-0.0.9.dist-info → fast_agent_mcp-0.0.12.dist-info}/RECORD +36 -28
  3. mcp_agent/app.py +4 -4
  4. mcp_agent/cli/commands/bootstrap.py +2 -5
  5. mcp_agent/cli/commands/setup.py +1 -1
  6. mcp_agent/cli/main.py +4 -4
  7. mcp_agent/core/enhanced_prompt.py +315 -0
  8. mcp_agent/core/fastagent.py +520 -388
  9. mcp_agent/event_progress.py +5 -2
  10. mcp_agent/human_input/handler.py +6 -2
  11. mcp_agent/logging/rich_progress.py +10 -5
  12. mcp_agent/mcp/mcp_aggregator.py +2 -1
  13. mcp_agent/mcp/mcp_connection_manager.py +67 -37
  14. mcp_agent/resources/examples/internal/agent.py +17 -0
  15. mcp_agent/resources/examples/internal/job.py +83 -0
  16. mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +1 -1
  17. mcp_agent/resources/examples/researcher/fastagent.config.yaml +53 -0
  18. mcp_agent/resources/examples/researcher/researcher-eval.py +53 -0
  19. mcp_agent/resources/examples/researcher/researcher.py +38 -0
  20. mcp_agent/resources/examples/workflows/agent.py +17 -0
  21. mcp_agent/resources/examples/workflows/agent_build.py +61 -0
  22. mcp_agent/resources/examples/workflows/chaining.py +0 -1
  23. mcp_agent/resources/examples/workflows/evaluator.py +6 -3
  24. mcp_agent/resources/examples/workflows/fastagent.py +22 -0
  25. mcp_agent/resources/examples/workflows/orchestrator.py +1 -1
  26. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +91 -92
  27. mcp_agent/workflows/llm/augmented_llm.py +14 -3
  28. mcp_agent/workflows/llm/augmented_llm_anthropic.py +8 -5
  29. mcp_agent/workflows/llm/augmented_llm_openai.py +20 -9
  30. mcp_agent/workflows/llm/model_factory.py +25 -11
  31. mcp_agent/workflows/orchestrator/orchestrator.py +68 -7
  32. mcp_agent/workflows/orchestrator/orchestrator_prompts.py +11 -6
  33. mcp_agent/workflows/router/router_llm.py +13 -2
  34. mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -9
  35. {fast_agent_mcp-0.0.9.dist-info → fast_agent_mcp-0.0.12.dist-info}/WHEEL +0 -0
  36. {fast_agent_mcp-0.0.9.dist-info → fast_agent_mcp-0.0.12.dist-info}/entry_points.txt +0 -0
  37. {fast_agent_mcp-0.0.9.dist-info → fast_agent_mcp-0.0.12.dist-info}/licenses/LICENSE +0 -0
@@ -11,7 +11,7 @@ fast = FastAgent("Evaluator-Optimizer")
11
11
 
12
12
  # Define optimizer agent
13
13
  @fast.agent(
14
- name="optimizer",
14
+ name="generator",
15
15
  instruction="""You are a career coach specializing in cover letter writing.
16
16
  You are tasked with generating a compelling cover letter given the job posting,
17
17
  candidate details, and company information. Tailor the response to the company and job requirements.
@@ -38,12 +38,13 @@ fast = FastAgent("Evaluator-Optimizer")
38
38
  Summarize your evaluation as a structured response with:
39
39
  - Overall quality rating.
40
40
  - Specific feedback and areas for improvement.""",
41
- model="sonnet",
41
+ # instructor doesn't seem to work for sonnet37
42
+ # model="sonnet35",
42
43
  )
43
44
  # Define the evaluator-optimizer workflow
44
45
  @fast.evaluator_optimizer(
45
46
  name="cover_letter_writer",
46
- optimizer="optimizer", # Reference to optimizer agent
47
+ generator="generator", # Reference to optimizer agent
47
48
  evaluator="evaluator", # Reference to evaluator agent
48
49
  min_rating="EXCELLENT", # Strive for excellence
49
50
  max_refinements=3, # Maximum iterations
@@ -70,6 +71,8 @@ async def main():
70
71
  f"Company information: {company_information}",
71
72
  )
72
73
 
74
+ await agent()
75
+
73
76
 
74
77
  if __name__ == "__main__":
75
78
  asyncio.run(main())
@@ -0,0 +1,22 @@
1
+ import asyncio
2
+ from mcp_agent.core.fastagent import FastAgent
3
+
4
+ # Create the application
5
+ agent_app = FastAgent("FastAgent Example")
6
+ # Uncomment the below to disable human input callback tool
7
+ # agent_app.app._human_input_callback = None
8
+
9
+
10
+ # Define the agent
11
+ @agent_app.agent(
12
+ instruction="You are a helpful AI Agent",
13
+ servers=[],
14
+ )
15
+ async def main():
16
+ # use the --model= command line switch to specify model
17
+ async with agent_app.run() as agent:
18
+ await agent()
19
+
20
+
21
+ if __name__ == "__main__":
22
+ asyncio.run(main())
@@ -24,7 +24,7 @@ fast = FastAgent("Orchestrator-Workers")
24
24
  the closest match to a user's request, make the appropriate tool calls,
25
25
  and return the URI and CONTENTS of the closest match.""",
26
26
  servers=["fetch", "filesystem"],
27
- model="gpt-4o-mini",
27
+ model="gpt-4o",
28
28
  )
29
29
  @fast.agent(
30
30
  name="writer",
@@ -66,18 +66,19 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
66
66
 
67
67
  def __init__(
68
68
  self,
69
- optimizer: Agent | AugmentedLLM,
69
+ generator: Agent | AugmentedLLM,
70
70
  evaluator: str | Agent | AugmentedLLM,
71
71
  min_rating: QualityRating = QualityRating.GOOD,
72
72
  max_refinements: int = 3,
73
- llm_factory: Callable[[Agent], AugmentedLLM] | None = None, # TODO: Remove legacy - factory should only be needed for str evaluator
73
+ llm_factory: Callable[[Agent], AugmentedLLM]
74
+ | None = None, # TODO: Remove legacy - factory should only be needed for str evaluator
74
75
  context: Optional["Context"] = None,
75
76
  ):
76
77
  """
77
78
  Initialize the evaluator-optimizer workflow.
78
79
 
79
80
  Args:
80
- optimizer: The agent/LLM/workflow that generates responses. Can be:
81
+ generator: The agent/LLM/workflow that generates responses. Can be:
81
82
  - An Agent that will be converted to an AugmentedLLM
82
83
  - An AugmentedLLM instance
83
84
  - An Orchestrator/Router/ParallelLLM workflow
@@ -90,38 +91,38 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
90
91
  super().__init__(context=context)
91
92
 
92
93
  # Set up the optimizer
93
- self.name = optimizer.name
94
+ self.name = generator.name
94
95
  self.llm_factory = llm_factory
95
- self.optimizer = optimizer
96
+ self.generator = generator
96
97
  self.evaluator = evaluator
97
98
 
98
99
  # TODO: Remove legacy - optimizer should always be an AugmentedLLM, no conversion needed
99
- if isinstance(optimizer, Agent):
100
+ if isinstance(generator, Agent):
100
101
  if not llm_factory:
101
102
  raise ValueError("llm_factory is required when using an Agent")
102
103
 
103
104
  # Only create new LLM if agent doesn't have one
104
- if hasattr(optimizer, "_llm") and optimizer._llm:
105
- self.optimizer_llm = optimizer._llm
105
+ if hasattr(generator, "_llm") and generator._llm:
106
+ self.generator_llm = generator._llm
106
107
  else:
107
- self.optimizer_llm = llm_factory(agent=optimizer)
108
-
109
- self.aggregator = optimizer
108
+ self.generator_llm = llm_factory(agent=generator)
109
+
110
+ self.aggregator = generator
110
111
  self.instruction = (
111
- optimizer.instruction
112
- if isinstance(optimizer.instruction, str)
112
+ generator.instruction
113
+ if isinstance(generator.instruction, str)
113
114
  else None
114
115
  )
115
116
 
116
- elif isinstance(optimizer, AugmentedLLM):
117
- self.optimizer_llm = optimizer
118
- self.aggregator = optimizer.aggregator
119
- self.instruction = optimizer.instruction
117
+ elif isinstance(generator, AugmentedLLM):
118
+ self.generator_llm = generator
119
+ self.aggregator = generator.aggregator
120
+ self.instruction = generator.instruction
120
121
 
121
122
  else:
122
- raise ValueError(f"Unsupported optimizer type: {type(optimizer)}")
123
+ raise ValueError(f"Unsupported optimizer type: {type(generator)}")
123
124
 
124
- self.history = self.optimizer_llm.history
125
+ self.history = self.generator_llm.history
125
126
 
126
127
  # Set up the evaluator
127
128
  if isinstance(evaluator, AugmentedLLM):
@@ -170,97 +171,95 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
170
171
  best_rating = QualityRating.POOR
171
172
  self.refinement_history = []
172
173
 
173
- # Initial generation
174
+ # Use a single AsyncExitStack for the entire method to maintain connections
174
175
  async with contextlib.AsyncExitStack() as stack:
175
- if isinstance(self.optimizer, Agent):
176
- await stack.enter_async_context(self.optimizer)
177
- response = await self.optimizer_llm.generate(
176
+ # Enter all agent contexts once at the beginning
177
+ if isinstance(self.generator, Agent):
178
+ await stack.enter_async_context(self.generator)
179
+ if isinstance(self.evaluator, Agent):
180
+ await stack.enter_async_context(self.evaluator)
181
+
182
+ # Initial generation
183
+ response = await self.generator_llm.generate(
178
184
  message=message,
179
185
  request_params=request_params,
180
186
  )
181
187
 
182
- best_response = response
183
-
184
- while refinement_count < self.max_refinements:
185
- logger.debug("Optimizer result:", data=response)
188
+ best_response = response
186
189
 
187
- # Evaluate current response
188
- eval_prompt = self._build_eval_prompt(
189
- original_request=str(message),
190
- current_response="\n".join(str(r) for r in response)
191
- if isinstance(response, list)
192
- else str(response),
193
- iteration=refinement_count,
194
- )
190
+ while refinement_count < self.max_refinements:
191
+ logger.debug("Optimizer result:", data=response)
195
192
 
196
- evaluation_result = None
197
- async with contextlib.AsyncExitStack() as stack:
198
- if isinstance(self.evaluator, Agent):
199
- await stack.enter_async_context(self.evaluator)
193
+ # Evaluate current response
194
+ eval_prompt = self._build_eval_prompt(
195
+ original_request=str(message),
196
+ current_response="\n".join(str(r) for r in response)
197
+ if isinstance(response, list)
198
+ else str(response),
199
+ iteration=refinement_count,
200
+ )
200
201
 
202
+ # No need for nested AsyncExitStack here - using the outer one
201
203
  evaluation_result = await self.evaluator_llm.generate_structured(
202
204
  message=eval_prompt,
203
205
  response_model=EvaluationResult,
204
206
  request_params=request_params,
205
207
  )
206
208
 
207
- # Track iteration
208
- self.refinement_history.append(
209
- {
210
- "attempt": refinement_count + 1,
211
- "response": response,
212
- "evaluation_result": evaluation_result,
213
- }
214
- )
215
-
216
- logger.debug("Evaluator result:", data=evaluation_result)
217
-
218
- # Track best response (using enum ordering)
219
- if evaluation_result.rating.value > best_rating.value:
220
- best_rating = evaluation_result.rating
221
- best_response = response
222
- logger.debug(
223
- "New best response:",
224
- data={"rating": best_rating, "response": best_response},
209
+ # Track iteration
210
+ self.refinement_history.append(
211
+ {
212
+ "attempt": refinement_count + 1,
213
+ "response": response,
214
+ "evaluation_result": evaluation_result,
215
+ }
225
216
  )
226
217
 
227
- # Check if we've reached acceptable quality
228
- if (
229
- evaluation_result.rating.value >= self.min_rating.value
230
- or not evaluation_result.needs_improvement
231
- ):
232
- logger.debug(
233
- f"Acceptable quality {evaluation_result.rating.value} reached",
234
- data={
235
- "rating": evaluation_result.rating.value,
236
- "needs_improvement": evaluation_result.needs_improvement,
237
- "min_rating": self.min_rating.value,
238
- },
218
+ logger.debug("Evaluator result:", data=evaluation_result)
219
+
220
+ # Track best response (using enum ordering)
221
+ if evaluation_result.rating.value > best_rating.value:
222
+ best_rating = evaluation_result.rating
223
+ best_response = response
224
+ logger.debug(
225
+ "New best response:",
226
+ data={"rating": best_rating, "response": best_response},
227
+ )
228
+
229
+ # Check if we've reached acceptable quality
230
+ if (
231
+ evaluation_result.rating.value >= self.min_rating.value
232
+ or not evaluation_result.needs_improvement
233
+ ):
234
+ logger.debug(
235
+ f"Acceptable quality {evaluation_result.rating.value} reached",
236
+ data={
237
+ "rating": evaluation_result.rating.value,
238
+ "needs_improvement": evaluation_result.needs_improvement,
239
+ "min_rating": self.min_rating.value,
240
+ },
241
+ )
242
+ break
243
+
244
+ # Generate refined response
245
+ refinement_prompt = self._build_refinement_prompt(
246
+ original_request=str(message),
247
+ current_response="\n".join(str(r) for r in response)
248
+ if isinstance(response, list)
249
+ else str(response),
250
+ feedback=evaluation_result,
251
+ iteration=refinement_count,
239
252
  )
240
- break
241
-
242
- # Generate refined response
243
- refinement_prompt = self._build_refinement_prompt(
244
- original_request=str(message),
245
- current_response="\n".join(str(r) for r in response)
246
- if isinstance(response, list)
247
- else str(response),
248
- feedback=evaluation_result,
249
- iteration=refinement_count,
250
- )
251
-
252
- async with contextlib.AsyncExitStack() as stack:
253
- if isinstance(self.optimizer, Agent):
254
- await stack.enter_async_context(self.optimizer)
255
253
 
256
- response = await self.optimizer_llm.generate(
254
+ # No nested AsyncExitStack here either
255
+ response = await self.generator_llm.generate(
257
256
  message=refinement_prompt,
258
257
  request_params=request_params,
259
258
  )
260
259
 
261
- refinement_count += 1
260
+ refinement_count += 1
262
261
 
263
- return best_response
262
+ return best_response
264
263
 
265
264
  async def generate_str(
266
265
  self,
@@ -276,13 +275,13 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
276
275
  # Handle case where response is a single message
277
276
  if not isinstance(response, list):
278
277
  return str(response)
279
-
278
+
280
279
  # Convert all messages to strings, handling different message types
281
280
  result_strings = []
282
281
  for r in response:
283
- if hasattr(r, 'text'):
282
+ if hasattr(r, "text"):
284
283
  result_strings.append(r.text)
285
- elif hasattr(r, 'content'):
284
+ elif hasattr(r, "content"):
286
285
  # Handle ToolUseBlock and similar
287
286
  if isinstance(r.content, list):
288
287
  # Typically content is a list of blocks
@@ -292,7 +291,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
292
291
  else:
293
292
  # Fallback to string representation
294
293
  result_strings.append(str(r))
295
-
294
+
296
295
  return "\n".join(result_strings)
297
296
 
298
297
  async def generate_structured(
@@ -306,7 +305,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
306
305
  message=message, request_params=request_params
307
306
  )
308
307
 
309
- return await self.optimizer.generate_structured(
308
+ return await self.generator.generate_structured(
310
309
  message=response_str,
311
310
  response_model=response_model,
312
311
  request_params=request_params,
@@ -238,6 +238,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
238
238
 
239
239
  self.model_selector = self.context.model_selector
240
240
  self.type_converter = type_converter
241
+ self.verb = kwargs.get("verb")
241
242
 
242
243
  @abstractmethod
243
244
  async def generate(
@@ -494,7 +495,10 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
494
495
  console.console.print("\n")
495
496
 
496
497
  async def show_assistant_message(
497
- self, message_text: str | Text, highlight_namespaced_tool: str = ""
498
+ self,
499
+ message_text: str | Text,
500
+ highlight_namespaced_tool: str = "",
501
+ title: str = "ASSISTANT",
498
502
  ):
499
503
  """Display an assistant message in a formatted panel."""
500
504
 
@@ -524,7 +528,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
524
528
 
525
529
  panel = Panel(
526
530
  message_text,
527
- title=f"[ASSISTANT]{f' ({self.name})' if self.name else ''}",
531
+ title=f"[{title}]{f' ({self.name})' if self.name else ''}",
528
532
  title_align="left",
529
533
  style="green",
530
534
  border_style="bold white",
@@ -627,8 +631,15 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
627
631
  self, chat_turn: Optional[int] = None, model: Optional[str] = None
628
632
  ):
629
633
  """Log a chat progress event"""
634
+ # Determine action type based on verb
635
+ if hasattr(self, "verb") and self.verb:
636
+ # Use verb directly regardless of type
637
+ act = self.verb
638
+ else:
639
+ act = ProgressAction.CHATTING
640
+
630
641
  data = {
631
- "progress_action": ProgressAction.CHATTING,
642
+ "progress_action": act,
632
643
  "model": model,
633
644
  "agent_name": self.name,
634
645
  "chat_turn": chat_turn if chat_turn is not None else None,
@@ -52,11 +52,12 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
52
52
  """
53
53
 
54
54
  def __init__(self, *args, **kwargs):
55
- super().__init__(*args, type_converter=AnthropicMCPTypeConverter, **kwargs)
56
-
57
55
  self.provider = "Anthropic"
58
- # Initialize logger with name if available
59
- self.logger = get_logger(f"{__name__}.{self.name}" if self.name else __name__)
56
+ # Initialize logger - keep it simple without name reference
57
+ self.logger = get_logger(__name__)
58
+
59
+ # Now call super().__init__
60
+ super().__init__(*args, type_converter=AnthropicMCPTypeConverter, **kwargs)
60
61
 
61
62
  def _initialize_default_params(self, kwargs: dict) -> RequestParams:
62
63
  """Initialize Anthropic-specific default parameters"""
@@ -330,7 +331,9 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
330
331
  messages=[{"role": "user", "content": response}],
331
332
  max_tokens=params.maxTokens,
332
333
  )
333
-
334
+ await self.show_assistant_message(
335
+ str(structured_response), title="ASSISTANT/STRUCTURED"
336
+ )
334
337
  return structured_response
335
338
 
336
339
  @classmethod
@@ -97,12 +97,7 @@ class OpenAIAugmentedLLM(
97
97
  use_history=True,
98
98
  )
99
99
 
100
- async def generate(self, message, request_params: RequestParams | None = None):
101
- """
102
- Process a query using an LLM and available tools.
103
- The default implementation uses OpenAI's ChatCompletion as the LLM.
104
- Override this method to use a different LLM.
105
- """
100
+ def _api_key(self) -> str:
106
101
  config = self.context.config
107
102
  api_key = None
108
103
 
@@ -121,9 +116,22 @@ class OpenAIAugmentedLLM(
121
116
  "Add it to your configuration file under openai.api_key\n"
122
117
  "Or set the OPENAI_API_KEY environment variable",
123
118
  )
119
+ return api_key
120
+
121
+ def _base_url(self) -> str:
122
+ return (
123
+ self.context.config.openai.base_url if self.context.config.openai else None
124
+ )
125
+
126
+ async def generate(self, message, request_params: RequestParams | None = None):
127
+ """
128
+ Process a query using an LLM and available tools.
129
+ The default implementation uses OpenAI's ChatCompletion as the LLM.
130
+ Override this method to use a different LLM.
131
+ """
124
132
 
125
133
  try:
126
- openai_client = OpenAI(api_key=api_key, base_url=config.openai.base_url)
134
+ openai_client = OpenAI(api_key=self._api_key(), base_url=self._base_url())
127
135
  messages: List[ChatCompletionMessageParam] = []
128
136
  params = self.get_request_params(request_params)
129
137
  except AuthenticationError as e:
@@ -356,8 +364,8 @@ class OpenAIAugmentedLLM(
356
364
  # Next we pass the text through instructor to extract structured data
357
365
  client = instructor.from_openai(
358
366
  OpenAI(
359
- api_key=self.context.config.openai.api_key,
360
- base_url=self.context.config.openai.base_url,
367
+ api_key=self._api_key(),
368
+ base_url=self._base_url(),
361
369
  ),
362
370
  mode=instructor.Mode.TOOLS_STRICT,
363
371
  )
@@ -373,6 +381,9 @@ class OpenAIAugmentedLLM(
373
381
  {"role": "user", "content": response},
374
382
  ],
375
383
  )
384
+ await self.show_assistant_message(
385
+ str(structured_response), title="ASSISTANT/STRUCTURED"
386
+ )
376
387
 
377
388
  return structured_response
378
389
 
@@ -62,18 +62,23 @@ class ModelFactory:
62
62
  "o1-preview": Provider.OPENAI,
63
63
  "o3-mini": Provider.OPENAI,
64
64
  "claude-3-haiku-20240307": Provider.ANTHROPIC,
65
+ "claude-3-5-haiku-20241022": Provider.ANTHROPIC,
66
+ "claude-3-5-haiku-latest": Provider.ANTHROPIC,
65
67
  "claude-3-5-sonnet-20240620": Provider.ANTHROPIC,
66
68
  "claude-3-5-sonnet-20241022": Provider.ANTHROPIC,
67
69
  "claude-3-5-sonnet-latest": Provider.ANTHROPIC,
70
+ "claude-3-7-sonnet-20250219": Provider.ANTHROPIC,
71
+ "claude-3-7-sonnet-latest": Provider.ANTHROPIC,
68
72
  "claude-3-opus-20240229": Provider.ANTHROPIC,
69
73
  "claude-3-opus-latest": Provider.ANTHROPIC,
70
74
  }
71
75
 
72
76
  MODEL_ALIASES = {
73
- "sonnet": "claude-3-5-sonnet-latest",
77
+ "sonnet": "claude-3-7-sonnet-latest",
74
78
  "sonnet35": "claude-3-5-sonnet-latest",
79
+ "sonnet37": "claude-3-7-sonnet-latest",
75
80
  "claude": "claude-3-5-sonnet-latest",
76
- "haiku": "claude-3-haiku-20240307",
81
+ "haiku": "claude-3-5-haiku-latest",
77
82
  "haiku3": "claude-3-haiku-20240307",
78
83
  "opus": "claude-3-opus-latest",
79
84
  "opus3": "claude-3-opus-latest",
@@ -161,15 +166,24 @@ class ModelFactory:
161
166
  config.model_name
162
167
  ) # Ensure parsed model name isn't overwritten
163
168
 
164
- llm = llm_class(
165
- agent=agent,
166
- model=config.model_name,
167
- reasoning_effort=config.reasoning_effort.value
168
- if config.reasoning_effort
169
- else None,
170
- request_params=factory_params,
171
- name=kwargs.get("name"),
172
- )
169
+ # Forward all keyword arguments to LLM constructor
170
+ llm_args = {
171
+ "agent": agent,
172
+ "model": config.model_name,
173
+ "request_params": factory_params,
174
+ "name": kwargs.get("name"),
175
+ }
176
+
177
+ # Add reasoning effort if available
178
+ if config.reasoning_effort:
179
+ llm_args["reasoning_effort"] = config.reasoning_effort.value
180
+
181
+ # Forward all other kwargs (including verb)
182
+ for key, value in kwargs.items():
183
+ if key not in ["agent", "default_request_params", "name"]:
184
+ llm_args[key] = value
185
+
186
+ llm = llm_class(**llm_args)
173
187
  return llm
174
188
 
175
189
  return factory