massgen 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of massgen might be problematic. Click here for more details.
- massgen/__init__.py +94 -0
- massgen/agent_config.py +507 -0
- massgen/backend/CLAUDE_API_RESEARCH.md +266 -0
- massgen/backend/Function calling openai responses.md +1161 -0
- massgen/backend/GEMINI_API_DOCUMENTATION.md +410 -0
- massgen/backend/OPENAI_RESPONSES_API_FORMAT.md +65 -0
- massgen/backend/__init__.py +25 -0
- massgen/backend/base.py +180 -0
- massgen/backend/chat_completions.py +228 -0
- massgen/backend/claude.py +661 -0
- massgen/backend/gemini.py +652 -0
- massgen/backend/grok.py +187 -0
- massgen/backend/response.py +397 -0
- massgen/chat_agent.py +440 -0
- massgen/cli.py +686 -0
- massgen/configs/README.md +293 -0
- massgen/configs/creative_team.yaml +53 -0
- massgen/configs/gemini_4o_claude.yaml +31 -0
- massgen/configs/news_analysis.yaml +51 -0
- massgen/configs/research_team.yaml +51 -0
- massgen/configs/single_agent.yaml +18 -0
- massgen/configs/single_flash2.5.yaml +44 -0
- massgen/configs/technical_analysis.yaml +51 -0
- massgen/configs/three_agents_default.yaml +31 -0
- massgen/configs/travel_planning.yaml +51 -0
- massgen/configs/two_agents.yaml +39 -0
- massgen/frontend/__init__.py +20 -0
- massgen/frontend/coordination_ui.py +945 -0
- massgen/frontend/displays/__init__.py +24 -0
- massgen/frontend/displays/base_display.py +83 -0
- massgen/frontend/displays/rich_terminal_display.py +3497 -0
- massgen/frontend/displays/simple_display.py +93 -0
- massgen/frontend/displays/terminal_display.py +381 -0
- massgen/frontend/logging/__init__.py +9 -0
- massgen/frontend/logging/realtime_logger.py +197 -0
- massgen/message_templates.py +431 -0
- massgen/orchestrator.py +1222 -0
- massgen/tests/__init__.py +10 -0
- massgen/tests/multi_turn_conversation_design.md +214 -0
- massgen/tests/multiturn_llm_input_analysis.md +189 -0
- massgen/tests/test_case_studies.md +113 -0
- massgen/tests/test_claude_backend.py +310 -0
- massgen/tests/test_grok_backend.py +160 -0
- massgen/tests/test_message_context_building.py +293 -0
- massgen/tests/test_rich_terminal_display.py +378 -0
- massgen/tests/test_v3_3agents.py +117 -0
- massgen/tests/test_v3_simple.py +216 -0
- massgen/tests/test_v3_three_agents.py +272 -0
- massgen/tests/test_v3_two_agents.py +176 -0
- massgen/utils.py +79 -0
- massgen/v1/README.md +330 -0
- massgen/v1/__init__.py +91 -0
- massgen/v1/agent.py +605 -0
- massgen/v1/agents.py +330 -0
- massgen/v1/backends/gemini.py +584 -0
- massgen/v1/backends/grok.py +410 -0
- massgen/v1/backends/oai.py +571 -0
- massgen/v1/cli.py +351 -0
- massgen/v1/config.py +169 -0
- massgen/v1/examples/fast-4o-mini-config.yaml +44 -0
- massgen/v1/examples/fast_config.yaml +44 -0
- massgen/v1/examples/production.yaml +70 -0
- massgen/v1/examples/single_agent.yaml +39 -0
- massgen/v1/logging.py +974 -0
- massgen/v1/main.py +368 -0
- massgen/v1/orchestrator.py +1138 -0
- massgen/v1/streaming_display.py +1190 -0
- massgen/v1/tools.py +160 -0
- massgen/v1/types.py +245 -0
- massgen/v1/utils.py +199 -0
- massgen-0.0.3.dist-info/METADATA +568 -0
- massgen-0.0.3.dist-info/RECORD +76 -0
- massgen-0.0.3.dist-info/WHEEL +5 -0
- massgen-0.0.3.dist-info/entry_points.txt +2 -0
- massgen-0.0.3.dist-info/licenses/LICENSE +204 -0
- massgen-0.0.3.dist-info/top_level.txt +1 -0
massgen/chat_agent.py
ADDED
|
@@ -0,0 +1,440 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Common chat interface for MassGen agents.
|
|
3
|
+
|
|
4
|
+
Defines the standard interface that both individual agents and the orchestrator implement,
|
|
5
|
+
allowing seamless interaction regardless of whether you're talking to a single agent
|
|
6
|
+
or a coordinated multi-agent system.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import uuid
|
|
10
|
+
from abc import ABC, abstractmethod
|
|
11
|
+
from typing import Dict, List, Optional, Any, AsyncGenerator
|
|
12
|
+
|
|
13
|
+
from .backend.base import LLMBackend, StreamChunk
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ChatAgent(ABC):
|
|
17
|
+
"""
|
|
18
|
+
Abstract base class defining the common chat interface.
|
|
19
|
+
|
|
20
|
+
This interface is implemented by both individual agents and the MassGen orchestrator,
|
|
21
|
+
providing a unified way to interact with any type of agent system.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
def __init__(self, session_id: Optional[str] = None):
|
|
25
|
+
self.session_id = session_id or f"chat_session_{uuid.uuid4().hex[:8]}"
|
|
26
|
+
self.conversation_history: List[Dict[str, Any]] = []
|
|
27
|
+
|
|
28
|
+
@abstractmethod
|
|
29
|
+
async def chat(
|
|
30
|
+
self,
|
|
31
|
+
messages: List[Dict[str, Any]],
|
|
32
|
+
tools: List[Dict[str, Any]] = None,
|
|
33
|
+
reset_chat: bool = False,
|
|
34
|
+
clear_history: bool = False,
|
|
35
|
+
) -> AsyncGenerator[StreamChunk, None]:
|
|
36
|
+
"""
|
|
37
|
+
Enhanced chat interface supporting tool calls and responses.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
messages: List of conversation messages including:
|
|
41
|
+
- {"role": "user", "content": "..."}
|
|
42
|
+
- {"role": "assistant", "content": "...", "tool_calls": [...]}
|
|
43
|
+
- {"role": "tool", "tool_call_id": "...", "content": "..."}
|
|
44
|
+
Or a single string for backwards compatibility
|
|
45
|
+
tools: Optional tools to provide to the agent
|
|
46
|
+
reset_chat: If True, reset the agent's conversation history to the provided messages
|
|
47
|
+
clear_history: If True, clear history but keep system message before processing messages
|
|
48
|
+
|
|
49
|
+
Yields:
|
|
50
|
+
StreamChunk: Streaming response chunks
|
|
51
|
+
"""
|
|
52
|
+
pass
|
|
53
|
+
|
|
54
|
+
async def chat_simple(self, user_message: str) -> AsyncGenerator[StreamChunk, None]:
|
|
55
|
+
"""
|
|
56
|
+
Backwards compatible simple chat interface.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
user_message: Simple string message from user
|
|
60
|
+
|
|
61
|
+
Yields:
|
|
62
|
+
StreamChunk: Streaming response chunks
|
|
63
|
+
"""
|
|
64
|
+
messages = [{"role": "user", "content": user_message}]
|
|
65
|
+
async for chunk in self.chat(messages):
|
|
66
|
+
yield chunk
|
|
67
|
+
|
|
68
|
+
@abstractmethod
|
|
69
|
+
def get_status(self) -> Dict[str, Any]:
|
|
70
|
+
"""Get current agent status and state."""
|
|
71
|
+
pass
|
|
72
|
+
|
|
73
|
+
@abstractmethod
|
|
74
|
+
def reset(self) -> None:
|
|
75
|
+
"""Reset agent state for new conversation."""
|
|
76
|
+
pass
|
|
77
|
+
|
|
78
|
+
# Common conversation management
|
|
79
|
+
def get_conversation_history(self) -> List[Dict[str, Any]]:
|
|
80
|
+
"""Get full conversation history."""
|
|
81
|
+
return self.conversation_history.copy()
|
|
82
|
+
|
|
83
|
+
def add_to_history(self, role: str, content: str, **kwargs) -> None:
|
|
84
|
+
"""Add message to conversation history."""
|
|
85
|
+
message = {"role": role, "content": content}
|
|
86
|
+
message.update(kwargs) # Support tool_calls, tool_call_id, etc.
|
|
87
|
+
self.conversation_history.append(message)
|
|
88
|
+
|
|
89
|
+
def add_tool_message(self, tool_call_id: str, result: str) -> None:
|
|
90
|
+
"""Add tool result to conversation history."""
|
|
91
|
+
self.add_to_history("tool", result, tool_call_id=tool_call_id)
|
|
92
|
+
|
|
93
|
+
def get_last_tool_calls(self) -> List[Dict[str, Any]]:
|
|
94
|
+
"""Get tool calls from the last assistant message."""
|
|
95
|
+
for message in reversed(self.conversation_history):
|
|
96
|
+
if message.get("role") == "assistant" and "tool_calls" in message:
|
|
97
|
+
return message["tool_calls"]
|
|
98
|
+
return []
|
|
99
|
+
|
|
100
|
+
def get_session_id(self) -> str:
|
|
101
|
+
"""Get session identifier."""
|
|
102
|
+
return self.session_id
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class SingleAgent(ChatAgent):
|
|
106
|
+
"""
|
|
107
|
+
Individual agent implementation with direct backend communication.
|
|
108
|
+
|
|
109
|
+
This class wraps a single LLM backend and provides the standard chat interface,
|
|
110
|
+
making it interchangeable with the MassGen orchestrator from the user's perspective.
|
|
111
|
+
"""
|
|
112
|
+
|
|
113
|
+
def __init__(
|
|
114
|
+
self,
|
|
115
|
+
backend: LLMBackend,
|
|
116
|
+
agent_id: Optional[str] = None,
|
|
117
|
+
system_message: Optional[str] = None,
|
|
118
|
+
session_id: Optional[str] = None,
|
|
119
|
+
):
|
|
120
|
+
"""
|
|
121
|
+
Initialize single agent.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
backend: LLM backend for this agent
|
|
125
|
+
agent_id: Optional agent identifier
|
|
126
|
+
system_message: Optional system message for the agent
|
|
127
|
+
session_id: Optional session identifier
|
|
128
|
+
"""
|
|
129
|
+
super().__init__(session_id)
|
|
130
|
+
self.backend = backend
|
|
131
|
+
self.agent_id = agent_id or f"agent_{uuid.uuid4().hex[:8]}"
|
|
132
|
+
self.system_message = system_message
|
|
133
|
+
|
|
134
|
+
# Add system message to history if provided
|
|
135
|
+
if self.system_message:
|
|
136
|
+
self.conversation_history.append(
|
|
137
|
+
{"role": "system", "content": self.system_message}
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
async def _process_stream(
|
|
141
|
+
self, backend_stream, tools: List[Dict[str, Any]] = None
|
|
142
|
+
) -> AsyncGenerator[StreamChunk, None]:
|
|
143
|
+
"""Common streaming logic for processing backend responses."""
|
|
144
|
+
assistant_response = ""
|
|
145
|
+
tool_calls = []
|
|
146
|
+
complete_message = None
|
|
147
|
+
|
|
148
|
+
try:
|
|
149
|
+
async for chunk in backend_stream:
|
|
150
|
+
if chunk.type == "content":
|
|
151
|
+
assistant_response += chunk.content
|
|
152
|
+
yield chunk
|
|
153
|
+
elif chunk.type == "tool_calls":
|
|
154
|
+
chunk_tool_calls = getattr(chunk, "tool_calls", []) or []
|
|
155
|
+
tool_calls.extend(chunk_tool_calls)
|
|
156
|
+
yield chunk
|
|
157
|
+
elif chunk.type == "complete_message":
|
|
158
|
+
# Backend provided the complete message structure
|
|
159
|
+
complete_message = chunk.complete_message
|
|
160
|
+
# Don't yield this - it's for internal use
|
|
161
|
+
elif chunk.type == "complete_response":
|
|
162
|
+
# Backend provided the raw Responses API response
|
|
163
|
+
if chunk.response:
|
|
164
|
+
complete_message = chunk.response
|
|
165
|
+
|
|
166
|
+
# Extract and yield tool calls for orchestrator processing
|
|
167
|
+
if (
|
|
168
|
+
isinstance(chunk.response, dict)
|
|
169
|
+
and "output" in chunk.response
|
|
170
|
+
):
|
|
171
|
+
response_tool_calls = []
|
|
172
|
+
for output_item in chunk.response["output"]:
|
|
173
|
+
if output_item.get("type") == "function_call":
|
|
174
|
+
response_tool_calls.append(output_item)
|
|
175
|
+
tool_calls.append(
|
|
176
|
+
output_item
|
|
177
|
+
) # Also store for fallback
|
|
178
|
+
|
|
179
|
+
# Yield tool calls so orchestrator can process them
|
|
180
|
+
if response_tool_calls:
|
|
181
|
+
yield StreamChunk(
|
|
182
|
+
type="tool_calls", tool_calls=response_tool_calls
|
|
183
|
+
)
|
|
184
|
+
# Complete response is for internal use - don't yield it
|
|
185
|
+
elif chunk.type == "done":
|
|
186
|
+
# Add complete response to history
|
|
187
|
+
if complete_message:
|
|
188
|
+
# For Responses API: complete_message is the response object with 'output' array
|
|
189
|
+
# Each item in output should be added to conversation history individually
|
|
190
|
+
if (
|
|
191
|
+
isinstance(complete_message, dict)
|
|
192
|
+
and "output" in complete_message
|
|
193
|
+
):
|
|
194
|
+
self.conversation_history.extend(complete_message["output"])
|
|
195
|
+
else:
|
|
196
|
+
# Fallback if it's already in message format
|
|
197
|
+
self.conversation_history.append(complete_message)
|
|
198
|
+
elif assistant_response.strip() or tool_calls:
|
|
199
|
+
# Fallback for legacy backends
|
|
200
|
+
message_data = {
|
|
201
|
+
"role": "assistant",
|
|
202
|
+
"content": assistant_response.strip(),
|
|
203
|
+
}
|
|
204
|
+
if tool_calls:
|
|
205
|
+
message_data["tool_calls"] = tool_calls
|
|
206
|
+
self.conversation_history.append(message_data)
|
|
207
|
+
yield chunk
|
|
208
|
+
else:
|
|
209
|
+
yield chunk
|
|
210
|
+
|
|
211
|
+
except Exception as e:
|
|
212
|
+
error_msg = f"Error: {str(e)}"
|
|
213
|
+
self.add_to_history("assistant", error_msg)
|
|
214
|
+
yield StreamChunk(type="content", content=error_msg)
|
|
215
|
+
yield StreamChunk(type="error", error=str(e))
|
|
216
|
+
|
|
217
|
+
async def chat(
|
|
218
|
+
self,
|
|
219
|
+
messages: List[Dict[str, Any]],
|
|
220
|
+
tools: List[Dict[str, Any]] = None,
|
|
221
|
+
reset_chat: bool = False,
|
|
222
|
+
clear_history: bool = False,
|
|
223
|
+
) -> AsyncGenerator[StreamChunk, None]:
|
|
224
|
+
"""Process messages through single backend with tool support."""
|
|
225
|
+
if clear_history:
|
|
226
|
+
# Clear history but keep system message if it exists
|
|
227
|
+
system_messages = [
|
|
228
|
+
msg for msg in self.conversation_history if msg.get("role") == "system"
|
|
229
|
+
]
|
|
230
|
+
self.conversation_history = system_messages.copy()
|
|
231
|
+
|
|
232
|
+
if reset_chat:
|
|
233
|
+
# Reset conversation history to the provided messages
|
|
234
|
+
self.conversation_history = messages.copy()
|
|
235
|
+
backend_messages = self.conversation_history.copy()
|
|
236
|
+
else:
|
|
237
|
+
# Regular conversation - append new messages to agent's history
|
|
238
|
+
self.conversation_history.extend(messages)
|
|
239
|
+
backend_messages = self.conversation_history.copy()
|
|
240
|
+
|
|
241
|
+
# Create backend stream and process it
|
|
242
|
+
backend_stream = self.backend.stream_with_tools(
|
|
243
|
+
messages=backend_messages,
|
|
244
|
+
tools=tools, # Use provided tools (for MassGen workflow)
|
|
245
|
+
agent_id=self.agent_id,
|
|
246
|
+
session_id=self.session_id,
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
async for chunk in self._process_stream(backend_stream, tools):
|
|
250
|
+
yield chunk
|
|
251
|
+
|
|
252
|
+
def get_status(self) -> Dict[str, Any]:
|
|
253
|
+
"""Get current agent status."""
|
|
254
|
+
return {
|
|
255
|
+
"agent_type": "single",
|
|
256
|
+
"agent_id": self.agent_id,
|
|
257
|
+
"session_id": self.session_id,
|
|
258
|
+
"system_message": self.system_message,
|
|
259
|
+
"conversation_length": len(self.conversation_history),
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
def reset(self) -> None:
|
|
263
|
+
"""Reset conversation for new chat."""
|
|
264
|
+
self.conversation_history.clear()
|
|
265
|
+
|
|
266
|
+
# Re-add system message if it exists
|
|
267
|
+
if self.system_message:
|
|
268
|
+
self.conversation_history.append(
|
|
269
|
+
{"role": "system", "content": self.system_message}
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
def set_model(self, model: str) -> None:
|
|
273
|
+
"""Set the model for this agent."""
|
|
274
|
+
self.model = model
|
|
275
|
+
|
|
276
|
+
def set_system_message(self, system_message: str) -> None:
|
|
277
|
+
"""Set or update the system message."""
|
|
278
|
+
self.system_message = system_message
|
|
279
|
+
|
|
280
|
+
# Remove old system message if exists
|
|
281
|
+
if (
|
|
282
|
+
self.conversation_history
|
|
283
|
+
and self.conversation_history[0].get("role") == "system"
|
|
284
|
+
):
|
|
285
|
+
self.conversation_history.pop(0)
|
|
286
|
+
|
|
287
|
+
# Add new system message at the beginning
|
|
288
|
+
self.conversation_history.insert(
|
|
289
|
+
0, {"role": "system", "content": system_message}
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
class ConfigurableAgent(SingleAgent):
|
|
294
|
+
"""
|
|
295
|
+
Single agent that uses AgentConfig for advanced configuration.
|
|
296
|
+
|
|
297
|
+
This bridges the gap between SingleAgent and the MassGen system by supporting
|
|
298
|
+
all the advanced configuration options (web search, code execution, etc.)
|
|
299
|
+
while maintaining the simple chat interface.
|
|
300
|
+
"""
|
|
301
|
+
|
|
302
|
+
def __init__(
|
|
303
|
+
self,
|
|
304
|
+
config, # AgentConfig - avoid circular import
|
|
305
|
+
backend: LLMBackend,
|
|
306
|
+
session_id: Optional[str] = None,
|
|
307
|
+
):
|
|
308
|
+
"""
|
|
309
|
+
Initialize configurable agent.
|
|
310
|
+
|
|
311
|
+
Args:
|
|
312
|
+
config: AgentConfig with all settings
|
|
313
|
+
backend: LLM backend
|
|
314
|
+
session_id: Optional session identifier
|
|
315
|
+
"""
|
|
316
|
+
super().__init__(
|
|
317
|
+
backend=backend,
|
|
318
|
+
agent_id=config.agent_id,
|
|
319
|
+
system_message=config.custom_system_instruction,
|
|
320
|
+
session_id=session_id,
|
|
321
|
+
)
|
|
322
|
+
self.config = config
|
|
323
|
+
|
|
324
|
+
# ConfigurableAgent relies on backend_params for model configuration
|
|
325
|
+
|
|
326
|
+
async def chat(
|
|
327
|
+
self,
|
|
328
|
+
messages: List[Dict[str, Any]],
|
|
329
|
+
tools: List[Dict[str, Any]] = None,
|
|
330
|
+
reset_chat: bool = False,
|
|
331
|
+
clear_history: bool = False,
|
|
332
|
+
) -> AsyncGenerator[StreamChunk, None]:
|
|
333
|
+
"""Process messages with full AgentConfig capabilities."""
|
|
334
|
+
if clear_history:
|
|
335
|
+
# Clear history but keep system message if it exists
|
|
336
|
+
system_messages = [
|
|
337
|
+
msg for msg in self.conversation_history if msg.get("role") == "system"
|
|
338
|
+
]
|
|
339
|
+
self.conversation_history = system_messages.copy()
|
|
340
|
+
|
|
341
|
+
if reset_chat:
|
|
342
|
+
# Reset conversation history to the provided messages
|
|
343
|
+
self.conversation_history = messages.copy()
|
|
344
|
+
backend_messages = self.conversation_history.copy()
|
|
345
|
+
else:
|
|
346
|
+
# Regular conversation - append new messages to agent's history
|
|
347
|
+
self.conversation_history.extend(messages)
|
|
348
|
+
backend_messages = self.conversation_history.copy()
|
|
349
|
+
|
|
350
|
+
# Create backend stream with config parameters and process it
|
|
351
|
+
backend_params = self.config.get_backend_params()
|
|
352
|
+
backend_stream = self.backend.stream_with_tools(
|
|
353
|
+
messages=backend_messages,
|
|
354
|
+
tools=tools, # Use provided tools (for MassGen workflow)
|
|
355
|
+
agent_id=self.agent_id,
|
|
356
|
+
session_id=self.session_id,
|
|
357
|
+
**backend_params,
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
async for chunk in self._process_stream(backend_stream, tools):
|
|
361
|
+
yield chunk
|
|
362
|
+
|
|
363
|
+
def get_status(self) -> Dict[str, Any]:
|
|
364
|
+
"""Get current agent status with config details."""
|
|
365
|
+
status = super().get_status()
|
|
366
|
+
status.update(
|
|
367
|
+
{
|
|
368
|
+
"agent_type": "configurable",
|
|
369
|
+
"config": self.config.to_dict(),
|
|
370
|
+
"capabilities": {
|
|
371
|
+
"web_search": self.config.backend_params.get(
|
|
372
|
+
"enable_web_search", False
|
|
373
|
+
),
|
|
374
|
+
"code_execution": self.config.backend_params.get(
|
|
375
|
+
"enable_code_interpreter", False
|
|
376
|
+
),
|
|
377
|
+
},
|
|
378
|
+
}
|
|
379
|
+
)
|
|
380
|
+
return status
|
|
381
|
+
|
|
382
|
+
|
|
383
|
+
# =============================================================================
|
|
384
|
+
# CONVENIENCE FUNCTIONS
|
|
385
|
+
# =============================================================================
|
|
386
|
+
|
|
387
|
+
|
|
388
|
+
def create_simple_agent(
|
|
389
|
+
backend: LLMBackend, system_message: str = None, agent_id: str = None
|
|
390
|
+
) -> SingleAgent:
|
|
391
|
+
"""Create a simple single agent."""
|
|
392
|
+
# Use MassGen evaluation system message if no custom system message provided
|
|
393
|
+
if system_message is None:
|
|
394
|
+
from .message_templates import MessageTemplates
|
|
395
|
+
|
|
396
|
+
templates = MessageTemplates()
|
|
397
|
+
system_message = templates.evaluation_system_message()
|
|
398
|
+
|
|
399
|
+
import pdb
|
|
400
|
+
|
|
401
|
+
pdb.set_trace()
|
|
402
|
+
print(system_message)
|
|
403
|
+
|
|
404
|
+
import pdb
|
|
405
|
+
|
|
406
|
+
pdb.set_trace()
|
|
407
|
+
print(system_message)
|
|
408
|
+
return SingleAgent(
|
|
409
|
+
backend=backend, agent_id=agent_id, system_message=system_message
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
def create_expert_agent(
|
|
414
|
+
domain: str, backend: LLMBackend, model: str = "gpt-4o-mini"
|
|
415
|
+
) -> ConfigurableAgent:
|
|
416
|
+
"""Create an expert agent for a specific domain."""
|
|
417
|
+
from .agent_config import AgentConfig
|
|
418
|
+
|
|
419
|
+
config = AgentConfig.for_expert_domain(domain, model=model)
|
|
420
|
+
return ConfigurableAgent(config=config, backend=backend)
|
|
421
|
+
|
|
422
|
+
|
|
423
|
+
def create_research_agent(
|
|
424
|
+
backend: LLMBackend, model: str = "gpt-4o-mini"
|
|
425
|
+
) -> ConfigurableAgent:
|
|
426
|
+
"""Create a research agent with web search capabilities."""
|
|
427
|
+
from .agent_config import AgentConfig
|
|
428
|
+
|
|
429
|
+
config = AgentConfig.for_research_task(model=model)
|
|
430
|
+
return ConfigurableAgent(config=config, backend=backend)
|
|
431
|
+
|
|
432
|
+
|
|
433
|
+
def create_computational_agent(
|
|
434
|
+
backend: LLMBackend, model: str = "gpt-4o-mini"
|
|
435
|
+
) -> ConfigurableAgent:
|
|
436
|
+
"""Create a computational agent with code execution."""
|
|
437
|
+
from .agent_config import AgentConfig
|
|
438
|
+
|
|
439
|
+
config = AgentConfig.for_computational_task(model=model)
|
|
440
|
+
return ConfigurableAgent(config=config, backend=backend)
|