fast-agent-mcp 0.2.18__py3-none-any.whl → 0.2.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.2.18.dist-info → fast_agent_mcp-0.2.20.dist-info}/METADATA +15 -15
- {fast_agent_mcp-0.2.18.dist-info → fast_agent_mcp-0.2.20.dist-info}/RECORD +20 -21
- mcp_agent/__init__.py +1 -2
- mcp_agent/agents/base_agent.py +6 -2
- mcp_agent/agents/workflow/parallel_agent.py +53 -38
- mcp_agent/agents/workflow/router_agent.py +22 -17
- mcp_agent/config.py +5 -4
- mcp_agent/context.py +15 -11
- mcp_agent/core/fastagent.py +248 -217
- mcp_agent/executor/executor.py +8 -9
- mcp_agent/llm/augmented_llm.py +37 -3
- mcp_agent/llm/providers/augmented_llm_anthropic.py +1 -1
- mcp_agent/llm/providers/augmented_llm_openai.py +5 -2
- mcp_agent/mcp/mcp_aggregator.py +114 -119
- mcp_agent/mcp/mcp_connection_manager.py +2 -1
- mcp_agent/mcp_server/agent_server.py +4 -1
- mcp_agent/mcp_server_registry.py +1 -0
- mcp_agent/logging/tracing.py +0 -138
- {fast_agent_mcp-0.2.18.dist-info → fast_agent_mcp-0.2.20.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.2.18.dist-info → fast_agent_mcp-0.2.20.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.2.18.dist-info → fast_agent_mcp-0.2.20.dist-info}/licenses/LICENSE +0 -0
mcp_agent/core/fastagent.py
CHANGED
@@ -13,6 +13,7 @@ from pathlib import Path
|
|
13
13
|
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, TypeVar
|
14
14
|
|
15
15
|
import yaml
|
16
|
+
from opentelemetry import trace
|
16
17
|
|
17
18
|
from mcp_agent import config
|
18
19
|
from mcp_agent.app import MCPApp
|
@@ -76,81 +77,97 @@ class FastAgent:
|
|
76
77
|
name: str,
|
77
78
|
config_path: str | None = None,
|
78
79
|
ignore_unknown_args: bool = False,
|
80
|
+
parse_cli_args: bool = True, # Add new parameter with default True
|
79
81
|
) -> None:
|
80
82
|
"""
|
81
|
-
Initialize the
|
83
|
+
Initialize the fast-agent application.
|
82
84
|
|
83
85
|
Args:
|
84
86
|
name: Name of the application
|
85
87
|
config_path: Optional path to config file
|
86
88
|
ignore_unknown_args: Whether to ignore unknown command line arguments
|
89
|
+
when parse_cli_args is True.
|
90
|
+
parse_cli_args: If True, parse command line arguments using argparse.
|
91
|
+
Set to False when embedding FastAgent in another framework
|
92
|
+
(like FastAPI/Uvicorn) that handles its own arguments.
|
87
93
|
"""
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
else:
|
144
|
-
self.args = parser.parse_args()
|
94
|
+
self.args = argparse.Namespace() # Initialize args always
|
95
|
+
|
96
|
+
# --- Wrap argument parsing logic ---
|
97
|
+
if parse_cli_args:
|
98
|
+
# Setup command line argument parsing
|
99
|
+
parser = argparse.ArgumentParser(description="DirectFastAgent Application")
|
100
|
+
parser.add_argument(
|
101
|
+
"--model",
|
102
|
+
help="Override the default model for all agents",
|
103
|
+
)
|
104
|
+
parser.add_argument(
|
105
|
+
"--agent",
|
106
|
+
default="default",
|
107
|
+
help="Specify the agent to send a message to (used with --message)",
|
108
|
+
)
|
109
|
+
parser.add_argument(
|
110
|
+
"-m",
|
111
|
+
"--message",
|
112
|
+
help="Message to send to the specified agent",
|
113
|
+
)
|
114
|
+
parser.add_argument(
|
115
|
+
"-p", "--prompt-file", help="Path to a prompt file to use (either text or JSON)"
|
116
|
+
)
|
117
|
+
parser.add_argument(
|
118
|
+
"--quiet",
|
119
|
+
action="store_true",
|
120
|
+
help="Disable progress display, tool and message logging for cleaner output",
|
121
|
+
)
|
122
|
+
parser.add_argument(
|
123
|
+
"--version",
|
124
|
+
action="store_true",
|
125
|
+
help="Show version and exit",
|
126
|
+
)
|
127
|
+
parser.add_argument(
|
128
|
+
"--server",
|
129
|
+
action="store_true",
|
130
|
+
help="Run as an MCP server",
|
131
|
+
)
|
132
|
+
parser.add_argument(
|
133
|
+
"--transport",
|
134
|
+
choices=["sse", "stdio"],
|
135
|
+
default="sse",
|
136
|
+
help="Transport protocol to use when running as a server (sse or stdio)",
|
137
|
+
)
|
138
|
+
parser.add_argument(
|
139
|
+
"--port",
|
140
|
+
type=int,
|
141
|
+
default=8000,
|
142
|
+
help="Port to use when running as a server with SSE transport",
|
143
|
+
)
|
144
|
+
parser.add_argument(
|
145
|
+
"--host",
|
146
|
+
default="0.0.0.0",
|
147
|
+
help="Host address to bind to when running as a server with SSE transport",
|
148
|
+
)
|
145
149
|
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
150
|
+
if ignore_unknown_args:
|
151
|
+
known_args, _ = parser.parse_known_args()
|
152
|
+
self.args = known_args
|
153
|
+
else:
|
154
|
+
# Use parse_known_args here too, to avoid crashing on uvicorn args etc.
|
155
|
+
# even if ignore_unknown_args is False, we only care about *our* args.
|
156
|
+
known_args, unknown = parser.parse_known_args()
|
157
|
+
self.args = known_args
|
158
|
+
# Optionally, warn about unknown args if not ignoring?
|
159
|
+
# if unknown and not ignore_unknown_args:
|
160
|
+
# logger.warning(f"Ignoring unknown command line arguments: {unknown}")
|
161
|
+
|
162
|
+
# Handle version flag
|
163
|
+
if self.args.version:
|
164
|
+
try:
|
165
|
+
app_version = get_version("fast-agent-mcp")
|
166
|
+
except: # noqa: E722
|
167
|
+
app_version = "unknown"
|
168
|
+
print(f"fast-agent-mcp v{app_version}")
|
169
|
+
sys.exit(0)
|
170
|
+
# --- End of wrapped logic ---
|
154
171
|
|
155
172
|
self.name = name
|
156
173
|
self.config_path = config_path
|
@@ -220,164 +237,178 @@ class FastAgent:
|
|
220
237
|
had_error = False
|
221
238
|
await self.app.initialize()
|
222
239
|
|
223
|
-
# Handle quiet mode
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
self.context,
|
254
|
-
model=model,
|
255
|
-
request_params=request_params,
|
256
|
-
cli_model=self.args.model if hasattr(self, "args") else None,
|
257
|
-
)
|
258
|
-
|
259
|
-
# Create all agents in dependency order
|
260
|
-
active_agents = await create_agents_in_dependency_order(
|
261
|
-
self.app,
|
262
|
-
self.agents,
|
263
|
-
model_factory_func,
|
264
|
-
)
|
265
|
-
|
266
|
-
# Create a wrapper with all agents for simplified access
|
267
|
-
wrapper = AgentApp(active_agents)
|
268
|
-
|
269
|
-
# Handle command line options that should be processed after agent initialization
|
270
|
-
|
271
|
-
# Handle --server option
|
272
|
-
if hasattr(self, "args") and self.args.server:
|
273
|
-
try:
|
274
|
-
# Print info message if not in quiet mode
|
275
|
-
if not quiet_mode:
|
276
|
-
print(f"Starting FastAgent '{self.name}' in server mode")
|
277
|
-
print(f"Transport: {self.args.transport}")
|
278
|
-
if self.args.transport == "sse":
|
279
|
-
print(f"Listening on {self.args.host}:{self.args.port}")
|
280
|
-
print("Press Ctrl+C to stop")
|
281
|
-
|
282
|
-
# Create the MCP server
|
283
|
-
from mcp_agent.mcp_server import AgentMCPServer
|
284
|
-
|
285
|
-
mcp_server = AgentMCPServer(
|
286
|
-
agent_app=wrapper,
|
287
|
-
server_name=f"{self.name}-MCP-Server",
|
288
|
-
)
|
289
|
-
|
290
|
-
# Run the server directly (this is a blocking call)
|
291
|
-
await mcp_server.run_async(
|
292
|
-
transport=self.args.transport, host=self.args.host, port=self.args.port
|
240
|
+
# Handle quiet mode and CLI model override safely
|
241
|
+
# Define these *before* they are used, checking if self.args exists and has the attributes
|
242
|
+
quiet_mode = hasattr(self.args, "quiet") and self.args.quiet
|
243
|
+
cli_model_override = (
|
244
|
+
self.args.model if hasattr(self.args, "model") and self.args.model else None
|
245
|
+
) # Define cli_model_override here
|
246
|
+
tracer = trace.get_tracer(__name__)
|
247
|
+
with tracer.start_as_current_span(self.name):
|
248
|
+
try:
|
249
|
+
async with self.app.run():
|
250
|
+
# Apply quiet mode if requested
|
251
|
+
if (
|
252
|
+
quiet_mode
|
253
|
+
and hasattr(self.app.context, "config")
|
254
|
+
and hasattr(self.app.context.config, "logger")
|
255
|
+
):
|
256
|
+
# Update our app's config directly
|
257
|
+
self.app.context.config.logger.progress_display = False
|
258
|
+
self.app.context.config.logger.show_chat = False
|
259
|
+
self.app.context.config.logger.show_tools = False
|
260
|
+
|
261
|
+
# Directly disable the progress display singleton
|
262
|
+
from mcp_agent.progress_display import progress_display
|
263
|
+
|
264
|
+
progress_display.stop()
|
265
|
+
|
266
|
+
# Pre-flight validation
|
267
|
+
if 0 == len(self.agents):
|
268
|
+
raise AgentConfigError(
|
269
|
+
"No agents defined. Please define at least one agent."
|
293
270
|
)
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
if self.args.message:
|
306
|
-
agent_name = self.args.agent
|
307
|
-
message = self.args.message
|
308
|
-
|
309
|
-
if agent_name not in active_agents:
|
310
|
-
available_agents = ", ".join(active_agents.keys())
|
311
|
-
print(
|
312
|
-
f"\n\nError: Agent '{agent_name}' not found. Available agents: {available_agents}"
|
271
|
+
validate_server_references(self.context, self.agents)
|
272
|
+
validate_workflow_references(self.agents)
|
273
|
+
|
274
|
+
# Get a model factory function
|
275
|
+
# Now cli_model_override is guaranteed to be defined
|
276
|
+
def model_factory_func(model=None, request_params=None):
|
277
|
+
return get_model_factory(
|
278
|
+
self.context,
|
279
|
+
model=model,
|
280
|
+
request_params=request_params,
|
281
|
+
cli_model=cli_model_override, # Use the variable defined above
|
313
282
|
)
|
314
|
-
raise SystemExit(1)
|
315
283
|
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
# In quiet mode, just print the raw response
|
322
|
-
# The chat display should already be turned off by the configuration
|
323
|
-
if self.args.quiet:
|
324
|
-
print(f"{response}")
|
325
|
-
|
326
|
-
raise SystemExit(0)
|
327
|
-
except Exception as e:
|
328
|
-
print(f"\n\nError sending message to agent '{agent_name}': {str(e)}")
|
329
|
-
raise SystemExit(1)
|
330
|
-
|
331
|
-
if self.args.prompt_file:
|
332
|
-
agent_name = self.args.agent
|
333
|
-
prompt: List[PromptMessageMultipart] = load_prompt_multipart(
|
334
|
-
Path(self.args.prompt_file)
|
284
|
+
# Create all agents in dependency order
|
285
|
+
active_agents = await create_agents_in_dependency_order(
|
286
|
+
self.app,
|
287
|
+
self.agents,
|
288
|
+
model_factory_func,
|
335
289
|
)
|
336
|
-
if agent_name not in active_agents:
|
337
|
-
available_agents = ", ".join(active_agents.keys())
|
338
|
-
print(
|
339
|
-
f"\n\nError: Agent '{agent_name}' not found. Available agents: {available_agents}"
|
340
|
-
)
|
341
|
-
raise SystemExit(1)
|
342
|
-
|
343
|
-
try:
|
344
|
-
# Get response from the agent
|
345
|
-
agent = active_agents[agent_name]
|
346
|
-
response = await agent.generate(prompt)
|
347
|
-
|
348
|
-
# In quiet mode, just print the raw response
|
349
|
-
# The chat display should already be turned off by the configuration
|
350
|
-
if self.args.quiet:
|
351
|
-
print(f"{response.last_text()}")
|
352
290
|
|
291
|
+
# Create a wrapper with all agents for simplified access
|
292
|
+
wrapper = AgentApp(active_agents)
|
293
|
+
|
294
|
+
# Handle command line options that should be processed after agent initialization
|
295
|
+
|
296
|
+
# Handle --server option
|
297
|
+
# Check if parse_cli_args was True before checking self.args.server
|
298
|
+
if hasattr(self.args, "server") and self.args.server:
|
299
|
+
try:
|
300
|
+
# Print info message if not in quiet mode
|
301
|
+
if not quiet_mode:
|
302
|
+
print(f"Starting FastAgent '{self.name}' in server mode")
|
303
|
+
print(f"Transport: {self.args.transport}")
|
304
|
+
if self.args.transport == "sse":
|
305
|
+
print(f"Listening on {self.args.host}:{self.args.port}")
|
306
|
+
print("Press Ctrl+C to stop")
|
307
|
+
|
308
|
+
# Create the MCP server
|
309
|
+
from mcp_agent.mcp_server import AgentMCPServer
|
310
|
+
|
311
|
+
mcp_server = AgentMCPServer(
|
312
|
+
agent_app=wrapper,
|
313
|
+
server_name=f"{self.name}-MCP-Server",
|
314
|
+
)
|
315
|
+
|
316
|
+
# Run the server directly (this is a blocking call)
|
317
|
+
await mcp_server.run_async(
|
318
|
+
transport=self.args.transport,
|
319
|
+
host=self.args.host,
|
320
|
+
port=self.args.port,
|
321
|
+
)
|
322
|
+
except KeyboardInterrupt:
|
323
|
+
if not quiet_mode:
|
324
|
+
print("\nServer stopped by user (Ctrl+C)")
|
325
|
+
except Exception as e:
|
326
|
+
if not quiet_mode:
|
327
|
+
import traceback
|
328
|
+
|
329
|
+
traceback.print_exc()
|
330
|
+
print(f"\nServer stopped with error: {e}")
|
331
|
+
|
332
|
+
# Exit after server shutdown
|
353
333
|
raise SystemExit(0)
|
354
|
-
except Exception as e:
|
355
|
-
print(f"\n\nError sending message to agent '{agent_name}': {str(e)}")
|
356
|
-
raise SystemExit(1)
|
357
|
-
|
358
|
-
yield wrapper
|
359
|
-
|
360
|
-
except (
|
361
|
-
ServerConfigError,
|
362
|
-
ProviderKeyError,
|
363
|
-
AgentConfigError,
|
364
|
-
ServerInitializationError,
|
365
|
-
ModelConfigError,
|
366
|
-
CircularDependencyError,
|
367
|
-
PromptExitError,
|
368
|
-
) as e:
|
369
|
-
had_error = True
|
370
|
-
self._handle_error(e)
|
371
|
-
raise SystemExit(1)
|
372
334
|
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
|
380
|
-
|
335
|
+
# Handle direct message sending if --message is provided
|
336
|
+
if hasattr(self.args, "message") and self.args.message:
|
337
|
+
agent_name = self.args.agent
|
338
|
+
message = self.args.message
|
339
|
+
|
340
|
+
if agent_name not in active_agents:
|
341
|
+
available_agents = ", ".join(active_agents.keys())
|
342
|
+
print(
|
343
|
+
f"\n\nError: Agent '{agent_name}' not found. Available agents: {available_agents}"
|
344
|
+
)
|
345
|
+
raise SystemExit(1)
|
346
|
+
|
347
|
+
try:
|
348
|
+
# Get response from the agent
|
349
|
+
agent = active_agents[agent_name]
|
350
|
+
response = await agent.send(message)
|
351
|
+
|
352
|
+
# In quiet mode, just print the raw response
|
353
|
+
# The chat display should already be turned off by the configuration
|
354
|
+
if self.args.quiet:
|
355
|
+
print(f"{response}")
|
356
|
+
|
357
|
+
raise SystemExit(0)
|
358
|
+
except Exception as e:
|
359
|
+
print(f"\n\nError sending message to agent '{agent_name}': {str(e)}")
|
360
|
+
raise SystemExit(1)
|
361
|
+
|
362
|
+
if hasattr(self.args, "prompt_file") and self.args.prompt_file:
|
363
|
+
agent_name = self.args.agent
|
364
|
+
prompt: List[PromptMessageMultipart] = load_prompt_multipart(
|
365
|
+
Path(self.args.prompt_file)
|
366
|
+
)
|
367
|
+
if agent_name not in active_agents:
|
368
|
+
available_agents = ", ".join(active_agents.keys())
|
369
|
+
print(
|
370
|
+
f"\n\nError: Agent '{agent_name}' not found. Available agents: {available_agents}"
|
371
|
+
)
|
372
|
+
raise SystemExit(1)
|
373
|
+
|
374
|
+
try:
|
375
|
+
# Get response from the agent
|
376
|
+
agent = active_agents[agent_name]
|
377
|
+
response = await agent.generate(prompt)
|
378
|
+
|
379
|
+
# In quiet mode, just print the raw response
|
380
|
+
# The chat display should already be turned off by the configuration
|
381
|
+
if self.args.quiet:
|
382
|
+
print(f"{response.last_text()}")
|
383
|
+
|
384
|
+
raise SystemExit(0)
|
385
|
+
except Exception as e:
|
386
|
+
print(f"\n\nError sending message to agent '{agent_name}': {str(e)}")
|
387
|
+
raise SystemExit(1)
|
388
|
+
|
389
|
+
yield wrapper
|
390
|
+
|
391
|
+
except (
|
392
|
+
ServerConfigError,
|
393
|
+
ProviderKeyError,
|
394
|
+
AgentConfigError,
|
395
|
+
ServerInitializationError,
|
396
|
+
ModelConfigError,
|
397
|
+
CircularDependencyError,
|
398
|
+
PromptExitError,
|
399
|
+
) as e:
|
400
|
+
had_error = True
|
401
|
+
self._handle_error(e)
|
402
|
+
raise SystemExit(1)
|
403
|
+
|
404
|
+
finally:
|
405
|
+
# Clean up any active agents
|
406
|
+
if active_agents and not had_error:
|
407
|
+
for agent in active_agents.values():
|
408
|
+
try:
|
409
|
+
await agent.shutdown()
|
410
|
+
except Exception:
|
411
|
+
pass
|
381
412
|
|
382
413
|
def _handle_error(self, e: Exception, error_type: Optional[str] = None) -> None:
|
383
414
|
"""
|
mcp_agent/executor/executor.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1
1
|
import asyncio
|
2
|
+
import contextvars
|
2
3
|
import functools
|
3
4
|
from abc import ABC, abstractmethod
|
4
5
|
from contextlib import asynccontextmanager
|
@@ -206,13 +207,13 @@ class AsyncioExecutor(Executor):
|
|
206
207
|
else:
|
207
208
|
# Execute the callable and await if it returns a coroutine
|
208
209
|
loop = asyncio.get_running_loop()
|
209
|
-
|
210
|
+
ctx = contextvars.copy_context()
|
210
211
|
# If kwargs are provided, wrap the function with partial
|
211
212
|
if kwargs:
|
212
213
|
wrapped_task = functools.partial(task, **kwargs)
|
213
|
-
result = await loop.run_in_executor(None, wrapped_task)
|
214
|
+
result = await loop.run_in_executor(None, lambda: ctx.run(wrapped_task))
|
214
215
|
else:
|
215
|
-
result = await loop.run_in_executor(None, task)
|
216
|
+
result = await loop.run_in_executor(None, lambda: ctx.run(task))
|
216
217
|
|
217
218
|
# Handle case where the sync function returns a coroutine
|
218
219
|
if asyncio.iscoroutine(result):
|
@@ -234,12 +235,10 @@ class AsyncioExecutor(Executor):
|
|
234
235
|
*tasks: Callable[..., R] | Coroutine[Any, Any, R],
|
235
236
|
**kwargs: Any,
|
236
237
|
) -> List[R | BaseException]:
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
return_exceptions=True,
|
242
|
-
)
|
238
|
+
return await asyncio.gather(
|
239
|
+
*(self._execute_task(task, **kwargs) for task in tasks),
|
240
|
+
return_exceptions=True,
|
241
|
+
)
|
243
242
|
|
244
243
|
async def execute_streaming(
|
245
244
|
self,
|
mcp_agent/llm/augmented_llm.py
CHANGED
@@ -2,6 +2,7 @@ from abc import abstractmethod
|
|
2
2
|
from typing import (
|
3
3
|
TYPE_CHECKING,
|
4
4
|
Any,
|
5
|
+
Dict,
|
5
6
|
Generic,
|
6
7
|
List,
|
7
8
|
Optional,
|
@@ -59,7 +60,36 @@ if TYPE_CHECKING:
|
|
59
60
|
HUMAN_INPUT_TOOL_NAME = "__human_input__"
|
60
61
|
|
61
62
|
|
62
|
-
|
63
|
+
def deep_merge(dict1: Dict[Any, Any], dict2: Dict[Any, Any]) -> Dict[Any, Any]:
|
64
|
+
"""
|
65
|
+
Recursively merges `dict2` into `dict1` in place.
|
66
|
+
|
67
|
+
If a key exists in both dictionaries and their values are dictionaries,
|
68
|
+
the function merges them recursively. Otherwise, the value from `dict2`
|
69
|
+
overwrites or is added to `dict1`.
|
70
|
+
|
71
|
+
Args:
|
72
|
+
dict1 (Dict): The dictionary to be updated.
|
73
|
+
dict2 (Dict): The dictionary to merge into `dict1`.
|
74
|
+
|
75
|
+
Returns:
|
76
|
+
Dict: The updated `dict1`.
|
77
|
+
"""
|
78
|
+
for key in dict2:
|
79
|
+
if (
|
80
|
+
key in dict1
|
81
|
+
and isinstance(dict1[key], dict)
|
82
|
+
and isinstance(dict2[key], dict)
|
83
|
+
):
|
84
|
+
deep_merge(dict1[key], dict2[key])
|
85
|
+
else:
|
86
|
+
dict1[key] = dict2[key]
|
87
|
+
return dict1
|
88
|
+
|
89
|
+
|
90
|
+
class AugmentedLLM(
|
91
|
+
ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT, MessageT]
|
92
|
+
):
|
63
93
|
# Common parameter names used across providers
|
64
94
|
PARAM_MESSAGES = "messages"
|
65
95
|
PARAM_MODEL = "model"
|
@@ -171,6 +201,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
171
201
|
# We never expect this for structured() calls - this is for interactive use - developers
|
172
202
|
# can do this programatically
|
173
203
|
# TODO -- create a "fast-agent" control role rather than magic strings
|
204
|
+
|
174
205
|
if multipart_messages[-1].first_text().startswith("***SAVE_HISTORY"):
|
175
206
|
parts: list[str] = multipart_messages[-1].first_text().split(" ", 1)
|
176
207
|
filename: str = (
|
@@ -220,6 +251,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
220
251
|
request_params: RequestParams | None = None,
|
221
252
|
) -> Tuple[ModelT | None, PromptMessageMultipart]:
|
222
253
|
"""Return a structured response from the LLM using the provided messages."""
|
254
|
+
|
223
255
|
self._precall(multipart_messages)
|
224
256
|
result, assistant_response = await self._apply_prompt_provider_specific_structured(
|
225
257
|
multipart_messages, model, request_params
|
@@ -355,8 +387,10 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
355
387
|
) -> RequestParams:
|
356
388
|
"""Merge default and provided request parameters"""
|
357
389
|
|
358
|
-
merged =
|
359
|
-
|
390
|
+
merged = deep_merge(
|
391
|
+
default_params.model_dump(),
|
392
|
+
provided_params.model_dump(exclude_unset=True),
|
393
|
+
)
|
360
394
|
final_params = RequestParams(**merged)
|
361
395
|
|
362
396
|
return final_params
|
@@ -95,7 +95,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
95
95
|
model=chosen_model,
|
96
96
|
systemPrompt=self.instruction,
|
97
97
|
parallel_tool_calls=True,
|
98
|
-
max_iterations=
|
98
|
+
max_iterations=20,
|
99
99
|
use_history=True,
|
100
100
|
)
|
101
101
|
|
@@ -222,7 +222,10 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
222
222
|
method="tools/call",
|
223
223
|
params=CallToolRequestParams(
|
224
224
|
name=tool_call.function.name,
|
225
|
-
arguments={}
|
225
|
+
arguments={}
|
226
|
+
if not tool_call.function.arguments
|
227
|
+
or tool_call.function.arguments.strip() == ""
|
228
|
+
else from_json(tool_call.function.arguments, allow_partial=True),
|
226
229
|
),
|
227
230
|
)
|
228
231
|
result = await self.call_tool(tool_call_request, tool_call.id)
|