massgen 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of massgen might be problematic. Click here for more details.

Files changed (76) hide show
  1. massgen/__init__.py +94 -0
  2. massgen/agent_config.py +507 -0
  3. massgen/backend/CLAUDE_API_RESEARCH.md +266 -0
  4. massgen/backend/Function calling openai responses.md +1161 -0
  5. massgen/backend/GEMINI_API_DOCUMENTATION.md +410 -0
  6. massgen/backend/OPENAI_RESPONSES_API_FORMAT.md +65 -0
  7. massgen/backend/__init__.py +25 -0
  8. massgen/backend/base.py +180 -0
  9. massgen/backend/chat_completions.py +228 -0
  10. massgen/backend/claude.py +661 -0
  11. massgen/backend/gemini.py +652 -0
  12. massgen/backend/grok.py +187 -0
  13. massgen/backend/response.py +397 -0
  14. massgen/chat_agent.py +440 -0
  15. massgen/cli.py +686 -0
  16. massgen/configs/README.md +293 -0
  17. massgen/configs/creative_team.yaml +53 -0
  18. massgen/configs/gemini_4o_claude.yaml +31 -0
  19. massgen/configs/news_analysis.yaml +51 -0
  20. massgen/configs/research_team.yaml +51 -0
  21. massgen/configs/single_agent.yaml +18 -0
  22. massgen/configs/single_flash2.5.yaml +44 -0
  23. massgen/configs/technical_analysis.yaml +51 -0
  24. massgen/configs/three_agents_default.yaml +31 -0
  25. massgen/configs/travel_planning.yaml +51 -0
  26. massgen/configs/two_agents.yaml +39 -0
  27. massgen/frontend/__init__.py +20 -0
  28. massgen/frontend/coordination_ui.py +945 -0
  29. massgen/frontend/displays/__init__.py +24 -0
  30. massgen/frontend/displays/base_display.py +83 -0
  31. massgen/frontend/displays/rich_terminal_display.py +3497 -0
  32. massgen/frontend/displays/simple_display.py +93 -0
  33. massgen/frontend/displays/terminal_display.py +381 -0
  34. massgen/frontend/logging/__init__.py +9 -0
  35. massgen/frontend/logging/realtime_logger.py +197 -0
  36. massgen/message_templates.py +431 -0
  37. massgen/orchestrator.py +1222 -0
  38. massgen/tests/__init__.py +10 -0
  39. massgen/tests/multi_turn_conversation_design.md +214 -0
  40. massgen/tests/multiturn_llm_input_analysis.md +189 -0
  41. massgen/tests/test_case_studies.md +113 -0
  42. massgen/tests/test_claude_backend.py +310 -0
  43. massgen/tests/test_grok_backend.py +160 -0
  44. massgen/tests/test_message_context_building.py +293 -0
  45. massgen/tests/test_rich_terminal_display.py +378 -0
  46. massgen/tests/test_v3_3agents.py +117 -0
  47. massgen/tests/test_v3_simple.py +216 -0
  48. massgen/tests/test_v3_three_agents.py +272 -0
  49. massgen/tests/test_v3_two_agents.py +176 -0
  50. massgen/utils.py +79 -0
  51. massgen/v1/README.md +330 -0
  52. massgen/v1/__init__.py +91 -0
  53. massgen/v1/agent.py +605 -0
  54. massgen/v1/agents.py +330 -0
  55. massgen/v1/backends/gemini.py +584 -0
  56. massgen/v1/backends/grok.py +410 -0
  57. massgen/v1/backends/oai.py +571 -0
  58. massgen/v1/cli.py +351 -0
  59. massgen/v1/config.py +169 -0
  60. massgen/v1/examples/fast-4o-mini-config.yaml +44 -0
  61. massgen/v1/examples/fast_config.yaml +44 -0
  62. massgen/v1/examples/production.yaml +70 -0
  63. massgen/v1/examples/single_agent.yaml +39 -0
  64. massgen/v1/logging.py +974 -0
  65. massgen/v1/main.py +368 -0
  66. massgen/v1/orchestrator.py +1138 -0
  67. massgen/v1/streaming_display.py +1190 -0
  68. massgen/v1/tools.py +160 -0
  69. massgen/v1/types.py +245 -0
  70. massgen/v1/utils.py +199 -0
  71. massgen-0.0.3.dist-info/METADATA +568 -0
  72. massgen-0.0.3.dist-info/RECORD +76 -0
  73. massgen-0.0.3.dist-info/WHEEL +5 -0
  74. massgen-0.0.3.dist-info/entry_points.txt +2 -0
  75. massgen-0.0.3.dist-info/licenses/LICENSE +204 -0
  76. massgen-0.0.3.dist-info/top_level.txt +1 -0
massgen/__init__.py ADDED
@@ -0,0 +1,94 @@
1
+ """
2
+ MassGen - Multi-Agent System Generator (Foundation Release)
3
+
4
+ Built on the proven MassGen framework with working tool message handling,
5
+ async generator patterns, and reliable multi-agent coordination.
6
+
7
+ Key Features:
8
+ - Multi-backend support: Response API (standard format), Claude (Messages API), Grok (Chat API)
9
+ - Builtin tools: Code execution and web search with streaming results
10
+ - Async streaming with proper chat agent interfaces and tool result handling
11
+ - Multi-agent orchestration with voting and consensus mechanisms
12
+ - Real-time frontend displays with multi-region terminal UI
13
+ - CLI with file-based YAML configuration and interactive mode
14
+ - Proper StreamChunk architecture separating tool_calls from builtin_tool_results
15
+
16
+ TODO - Missing Features (to be added in future releases):
17
+ - ✅ Grok backend testing and fixes (COMPLETED)
18
+ - ✅ CLI interface for MassGen (COMPLETED - file-based config, interactive mode, slash commands)
19
+ - ✅ Missing test files recovery (COMPLETED - two agents, three agents)
20
+ - ✅ Multi-turn conversation support (COMPLETED - dynamic context reconstruction)
21
+ - ✅ Chat interface with orchestrator (COMPLETED - async streaming with context)
22
+ - ✅ Fix CLI multi-turn conversation display (COMPLETED - coordination UI integration)
23
+ - ✅ Case study configurations and test commands (COMPLETED - specialized YAML configs)
24
+ - ✅ Claude backend support (COMPLETED - production-ready multi-tool API with streaming)
25
+ - ✅ Claude streaming handler fixes (COMPLETED - proper tool argument capture)
26
+ - ✅ OpenAI builtin tools support (COMPLETED - code execution and web search streaming)
27
+ - ✅ CLI backend parameter passing (COMPLETED - proper ConfigurableAgent integration)
28
+ - ✅ StreamChunk builtin_tool_results support (COMPLETED - separate from regular tool_calls)
29
+ - ✅ Gemini backend support (COMPLETED - streaming with function calling and builtin tools)
30
+ - Orchestrator final_answer_agent configuration support (MEDIUM PRIORITY)
31
+ - Configuration options for voting info in user messages (MEDIUM PRIORITY)
32
+ - Enhanced frontend features from v0.0.1 (MEDIUM PRIORITY)
33
+ - Advanced logging and monitoring capabilities
34
+ - Tool execution with custom functions
35
+ - Performance optimizations
36
+
37
+ Usage:
38
+ from massgen import ResponseBackend, create_simple_agent, Orchestrator
39
+
40
+ backend = ResponseBackend()
41
+ agent = create_simple_agent(backend, "You are a helpful assistant")
42
+ orchestrator = Orchestrator(agents={"agent1": agent})
43
+
44
+ async for chunk in orchestrator.chat_simple("Your question"):
45
+ if chunk.type == "content":
46
+ print(chunk.content, end="")
47
+ """
48
+
49
+ # Import main classes for convenience
50
+ from .backend.response import ResponseBackend
51
+ from .backend.claude import ClaudeBackend
52
+ from .backend.gemini import GeminiBackend
53
+ from .backend.grok import GrokBackend
54
+ from .chat_agent import (
55
+ ChatAgent,
56
+ SingleAgent,
57
+ ConfigurableAgent,
58
+ create_simple_agent,
59
+ create_expert_agent,
60
+ create_research_agent,
61
+ create_computational_agent,
62
+ )
63
+ from .orchestrator import Orchestrator, create_orchestrator
64
+ from .message_templates import MessageTemplates, get_templates
65
+ from .agent_config import AgentConfig
66
+
67
+ __version__ = "0.0.3"
68
+ __author__ = "MassGen Contributors"
69
+
70
+ __all__ = [
71
+ # Backends
72
+ "ResponseBackend",
73
+ "ClaudeBackend",
74
+ "GeminiBackend",
75
+ "GrokBackend",
76
+ # Agents
77
+ "ChatAgent",
78
+ "SingleAgent",
79
+ "ConfigurableAgent",
80
+ "create_simple_agent",
81
+ "create_expert_agent",
82
+ "create_research_agent",
83
+ "create_computational_agent",
84
+ # Orchestrator
85
+ "Orchestrator",
86
+ "create_orchestrator",
87
+ # Configuration
88
+ "AgentConfig",
89
+ "MessageTemplates",
90
+ "get_templates",
91
+ # Metadata
92
+ "__version__",
93
+ "__author__",
94
+ ]
@@ -0,0 +1,507 @@
1
+ """
2
+ Agent configuration for MassGen framework following input_cases_reference.md
3
+ Simplified configuration focused on the proven binary decision approach.
4
+ """
5
+
6
+ from dataclasses import dataclass, field
7
+ from typing import Dict, Optional, Any, TYPE_CHECKING
8
+
9
+ if TYPE_CHECKING:
10
+ from .message_templates import MessageTemplates
11
+
12
+
13
+ @dataclass
14
+ class AgentConfig:
15
+ """Configuration for MassGen agents using the proven binary decision framework.
16
+
17
+ This configuration implements the simplified approach from input_cases_reference.md
18
+ that eliminates perfectionism loops through clear binary decisions.
19
+
20
+ Args:
21
+ backend_params: Settings passed directly to LLM backend (includes tool enablement)
22
+ message_templates: Custom message templates (None=default)
23
+ agent_id: Optional agent identifier for this configuration
24
+ custom_system_instruction: Additional system instruction prepended to evaluation message
25
+ """
26
+
27
+ # Core backend configuration (includes tool enablement)
28
+ backend_params: Dict[str, Any] = field(default_factory=dict)
29
+
30
+ # Framework configuration
31
+ message_templates: Optional["MessageTemplates"] = None
32
+
33
+ # Agent customization
34
+ agent_id: Optional[str] = None
35
+ custom_system_instruction: Optional[str] = None
36
+
37
+ @classmethod
38
+ def create_openai_config(
39
+ cls,
40
+ model: str = "gpt-4o-mini",
41
+ enable_web_search: bool = False,
42
+ enable_code_interpreter: bool = False,
43
+ **kwargs,
44
+ ) -> "AgentConfig":
45
+ """Create OpenAI configuration following proven patterns.
46
+
47
+ Args:
48
+ model: OpenAI model name
49
+ enable_web_search: Enable web search via Responses API
50
+ enable_code_interpreter: Enable code execution for computational tasks
51
+ **kwargs: Additional backend parameters
52
+
53
+ Examples:
54
+ # Basic configuration
55
+ config = AgentConfig.create_openai_config("gpt-4o-mini")
56
+
57
+ # Research task with web search
58
+ config = AgentConfig.create_openai_config("gpt-4o", enable_web_search=True)
59
+
60
+ # Computational task with code execution
61
+ config = AgentConfig.create_openai_config("gpt-4o", enable_code_interpreter=True)
62
+ """
63
+ backend_params = {"model": model, **kwargs}
64
+
65
+ # Add tool enablement to backend_params
66
+ if enable_web_search:
67
+ backend_params["enable_web_search"] = True
68
+ if enable_code_interpreter:
69
+ backend_params["enable_code_interpreter"] = True
70
+
71
+ return cls(backend_params=backend_params)
72
+
73
+ @classmethod
74
+ def create_claude_config(
75
+ cls,
76
+ model: str = "claude-3-sonnet-20240229",
77
+ enable_web_search: bool = False,
78
+ enable_code_execution: bool = False,
79
+ **kwargs,
80
+ ) -> "AgentConfig":
81
+ """Create Anthropic Claude configuration.
82
+
83
+ Args:
84
+ model: Claude model name
85
+ enable_web_search: Enable builtin web search tool
86
+ enable_code_execution: Enable builtin code execution tool
87
+ **kwargs: Additional backend parameters
88
+ """
89
+ backend_params = {"model": model, **kwargs}
90
+
91
+ if enable_web_search:
92
+ backend_params["enable_web_search"] = True
93
+
94
+ if enable_code_execution:
95
+ backend_params["enable_code_execution"] = True
96
+
97
+ return cls(backend_params=backend_params)
98
+
99
+ @classmethod
100
+ def create_grok_config(
101
+ cls, model: str = "grok-2-1212", enable_web_search: bool = False, **kwargs
102
+ ) -> "AgentConfig":
103
+ """Create xAI Grok configuration.
104
+
105
+ Args:
106
+ model: Grok model name
107
+ enable_web_search: Enable Live Search feature
108
+ **kwargs: Additional backend parameters
109
+ """
110
+ backend_params = {"model": model, **kwargs}
111
+
112
+ # Add tool enablement to backend_params
113
+ if enable_web_search:
114
+ backend_params["enable_web_search"] = True
115
+
116
+ return cls(backend_params=backend_params)
117
+
118
+ @classmethod
119
+ def create_gemini_config(
120
+ cls,
121
+ model: str = "gemini-2.5-flash",
122
+ enable_web_search: bool = False,
123
+ enable_code_execution: bool = False,
124
+ **kwargs,
125
+ ) -> "AgentConfig":
126
+ """Create Google Gemini configuration.
127
+
128
+ Args:
129
+ model: Gemini model name
130
+ enable_web_search: Enable Google Search retrieval tool
131
+ enable_code_execution: Enable code execution tool
132
+ **kwargs: Additional backend parameters
133
+ """
134
+ backend_params = {"model": model, **kwargs}
135
+
136
+ # Add tool enablement to backend_params
137
+ if enable_web_search:
138
+ backend_params["enable_web_search"] = True
139
+ if enable_code_execution:
140
+ backend_params["enable_code_execution"] = True
141
+
142
+ return cls(backend_params=backend_params)
143
+
144
+ # =============================================================================
145
+ # AGENT CUSTOMIZATION
146
+ # =============================================================================
147
+
148
+ def with_custom_instruction(self, instruction: str) -> "AgentConfig":
149
+ """Create a copy with custom system instruction."""
150
+ import copy
151
+
152
+ new_config = copy.deepcopy(self)
153
+ new_config.custom_system_instruction = instruction
154
+ return new_config
155
+
156
+ def with_agent_id(self, agent_id: str) -> "AgentConfig":
157
+ """Create a copy with specified agent ID."""
158
+ import copy
159
+
160
+ new_config = copy.deepcopy(self)
161
+ new_config.agent_id = agent_id
162
+ return new_config
163
+
164
+ # =============================================================================
165
+ # PROVEN PATTERN CONFIGURATIONS
166
+ # =============================================================================
167
+
168
+ @classmethod
169
+ def for_research_task(
170
+ cls, model: str = "gpt-4o", backend: str = "openai"
171
+ ) -> "AgentConfig":
172
+ """Create configuration optimized for research tasks.
173
+
174
+ Based on econometrics test success patterns:
175
+ - Enables web search for literature review
176
+ - Uses proven model defaults
177
+ """
178
+ if backend == "openai":
179
+ return cls.create_openai_config(model, enable_web_search=True)
180
+ elif backend == "grok":
181
+ return cls.create_grok_config(model, enable_web_search=True)
182
+ elif backend == "claude":
183
+ return cls.create_claude_config(model, enable_web_search=True)
184
+ elif backend == "gemini":
185
+ return cls.create_gemini_config(model, enable_web_search=True)
186
+ else:
187
+ raise ValueError(
188
+ f"Research configuration not available for backend: {backend}"
189
+ )
190
+
191
+ @classmethod
192
+ def for_computational_task(
193
+ cls, model: str = "gpt-4o", backend: str = "openai"
194
+ ) -> "AgentConfig":
195
+ """Create configuration optimized for computational tasks.
196
+
197
+ Based on Tower of Hanoi test success patterns:
198
+ - Enables code execution for calculations
199
+ - Uses proven model defaults
200
+ """
201
+ if backend == "openai":
202
+ return cls.create_openai_config(model, enable_code_interpreter=True)
203
+ elif backend == "claude":
204
+ return cls.create_claude_config(model, enable_code_execution=True)
205
+ elif backend == "gemini":
206
+ return cls.create_gemini_config(model, enable_code_execution=True)
207
+ else:
208
+ raise ValueError(
209
+ f"Computational configuration not available for backend: {backend}"
210
+ )
211
+
212
+ @classmethod
213
+ def for_analytical_task(
214
+ cls, model: str = "gpt-4o-mini", backend: str = "openai"
215
+ ) -> "AgentConfig":
216
+ """Create configuration optimized for analytical tasks.
217
+
218
+ Based on general reasoning test patterns:
219
+ - No special tools needed
220
+ - Uses efficient model defaults
221
+ """
222
+ if backend == "openai":
223
+ return cls.create_openai_config(model)
224
+ elif backend == "claude":
225
+ return cls.create_claude_config(model)
226
+ elif backend == "grok":
227
+ return cls.create_grok_config(model)
228
+ elif backend == "gemini":
229
+ return cls.create_gemini_config(model)
230
+ else:
231
+ raise ValueError(
232
+ f"Analytical configuration not available for backend: {backend}"
233
+ )
234
+
235
+ @classmethod
236
+ def for_expert_domain(
237
+ cls,
238
+ domain: str,
239
+ expertise_level: str = "expert",
240
+ model: str = "gpt-4o",
241
+ backend: str = "openai",
242
+ ) -> "AgentConfig":
243
+ """Create configuration for domain expertise.
244
+
245
+ Args:
246
+ domain: Domain of expertise (e.g., "econometrics", "computer science")
247
+ expertise_level: Level of expertise ("expert", "specialist", "researcher")
248
+ model: Model to use
249
+ backend: Backend provider
250
+ """
251
+ instruction = f"You are a {expertise_level} in {domain}. Apply your deep domain knowledge and methodological expertise when evaluating answers and providing solutions."
252
+
253
+ if backend == "openai":
254
+ config = cls.create_openai_config(model, enable_web_search=True)
255
+ elif backend == "grok":
256
+ config = cls.create_grok_config(model, enable_web_search=True)
257
+ elif backend == "gemini":
258
+ config = cls.create_gemini_config(model, enable_web_search=True)
259
+ else:
260
+ raise ValueError(
261
+ f"Domain expert configuration not available for backend: {backend}"
262
+ )
263
+
264
+ config.custom_system_instruction = instruction
265
+ return config
266
+
267
+ # =============================================================================
268
+ # CONVERSATION BUILDING
269
+ # =============================================================================
270
+
271
+ def build_conversation(
272
+ self,
273
+ task: str,
274
+ agent_summaries: Optional[Dict[str, str]] = None,
275
+ session_id: Optional[str] = None,
276
+ ) -> Dict[str, Any]:
277
+ """Build conversation using the proven MassGen approach.
278
+
279
+ Returns complete conversation configuration ready for backend.
280
+ Automatically determines Case 1 vs Case 2 based on agent_summaries.
281
+ """
282
+ from .message_templates import get_templates
283
+
284
+ templates = self.message_templates or get_templates()
285
+
286
+ # Derive valid agent IDs from agent summaries
287
+ valid_agent_ids = list(agent_summaries.keys()) if agent_summaries else None
288
+
289
+ # Build base conversation
290
+ conversation = templates.build_initial_conversation(
291
+ task=task, agent_summaries=agent_summaries, valid_agent_ids=valid_agent_ids
292
+ )
293
+
294
+ # Add custom system instruction if provided
295
+ if self.custom_system_instruction:
296
+ base_system = conversation["system_message"]
297
+ conversation["system_message"] = (
298
+ f"{self.custom_system_instruction}\n\n{base_system}"
299
+ )
300
+
301
+ # Add backend configuration
302
+ conversation.update(
303
+ {
304
+ "backend_params": self.get_backend_params(),
305
+ "session_id": session_id,
306
+ "agent_id": self.agent_id,
307
+ }
308
+ )
309
+
310
+ return conversation
311
+
312
+ def add_enforcement_message(self, conversation_messages: list) -> list:
313
+ """Add enforcement message to conversation (Case 3 handling).
314
+
315
+ Args:
316
+ conversation_messages: Existing conversation messages
317
+
318
+ Returns:
319
+ Updated conversation messages with enforcement
320
+ """
321
+ from .message_templates import get_templates
322
+
323
+ templates = self.message_templates or get_templates()
324
+ return templates.add_enforcement_message(conversation_messages)
325
+
326
+ def continue_conversation(
327
+ self,
328
+ existing_messages: list,
329
+ additional_message: Any = None,
330
+ additional_message_role: str = "user",
331
+ enforce_tools: bool = False,
332
+ ) -> Dict[str, Any]:
333
+ """Continue an existing conversation (Cases 3 & 4).
334
+
335
+ Args:
336
+ existing_messages: Previous conversation messages
337
+ additional_message: Additional message (str or dict for tool results)
338
+ additional_message_role: Role for additional message ("user", "tool", "assistant")
339
+ enforce_tools: Whether to add tool enforcement message
340
+
341
+ Returns:
342
+ Updated conversation configuration
343
+ """
344
+ messages = existing_messages.copy()
345
+
346
+ # Add additional message if provided
347
+ if additional_message is not None:
348
+ if isinstance(additional_message, dict):
349
+ # Full message object provided
350
+ messages.append(additional_message)
351
+ else:
352
+ # String content provided
353
+ messages.append(
354
+ {
355
+ "role": additional_message_role,
356
+ "content": str(additional_message),
357
+ }
358
+ )
359
+
360
+ # Add enforcement if requested (Case 3)
361
+ if enforce_tools:
362
+ messages = self.add_enforcement_message(messages)
363
+
364
+ # Build conversation with continued messages
365
+ from .message_templates import get_templates
366
+
367
+ templates = self.message_templates or get_templates()
368
+
369
+ return {
370
+ "messages": messages,
371
+ "tools": templates.get_standard_tools(), # Same tools as initial
372
+ "backend_params": self.get_backend_params(),
373
+ "session_id": None, # Maintain existing session
374
+ "agent_id": self.agent_id,
375
+ }
376
+
377
+ def handle_case3_enforcement(self, existing_messages: list) -> Dict[str, Any]:
378
+ """Handle Case 3: Non-workflow response requiring enforcement.
379
+
380
+ Args:
381
+ existing_messages: Messages from agent that didn't use tools
382
+
383
+ Returns:
384
+ Conversation with enforcement message added
385
+ """
386
+ return self.continue_conversation(
387
+ existing_messages=existing_messages, enforce_tools=True
388
+ )
389
+
390
+ def add_tool_result(
391
+ self, existing_messages: list, tool_call_id: str, result: str
392
+ ) -> Dict[str, Any]:
393
+ """Add tool result to conversation.
394
+
395
+ Args:
396
+ existing_messages: Previous conversation messages
397
+ tool_call_id: ID of the tool call this responds to
398
+ result: Tool execution result (success or error)
399
+
400
+ Returns:
401
+ Conversation with tool result added
402
+ """
403
+ tool_message = {"role": "tool", "tool_call_id": tool_call_id, "content": result}
404
+
405
+ return self.continue_conversation(
406
+ existing_messages=existing_messages, additional_message=tool_message
407
+ )
408
+
409
+ def handle_case4_error_recovery(
410
+ self, existing_messages: list, clarification: str = None
411
+ ) -> Dict[str, Any]:
412
+ """Handle Case 4: Error recovery after tool failure.
413
+
414
+ Args:
415
+ existing_messages: Messages including tool error response
416
+ clarification: Optional clarification message
417
+
418
+ Returns:
419
+ Conversation ready for retry
420
+ """
421
+ return self.continue_conversation(
422
+ existing_messages=existing_messages,
423
+ additional_message=clarification,
424
+ additional_message_role="user",
425
+ enforce_tools=False, # Agent should retry naturally
426
+ )
427
+
428
+ def get_backend_params(self) -> Dict[str, Any]:
429
+ """Get backend parameters (already includes tool enablement)."""
430
+ return self.backend_params.copy()
431
+
432
+ # =============================================================================
433
+ # SERIALIZATION
434
+ # =============================================================================
435
+
436
+ def to_dict(self) -> Dict[str, Any]:
437
+ """Convert to dictionary for serialization."""
438
+ result = {
439
+ "backend_params": self.backend_params,
440
+ "agent_id": self.agent_id,
441
+ "custom_system_instruction": self.custom_system_instruction,
442
+ }
443
+
444
+ # Handle message_templates serialization
445
+ if self.message_templates is not None:
446
+ try:
447
+ if hasattr(self.message_templates, "_template_overrides"):
448
+ overrides = self.message_templates._template_overrides
449
+ if all(not callable(v) for v in overrides.values()):
450
+ result["message_templates"] = overrides
451
+ else:
452
+ result["message_templates"] = "<contains_callable_functions>"
453
+ else:
454
+ result["message_templates"] = "<custom_message_templates>"
455
+ except (AttributeError, TypeError):
456
+ result["message_templates"] = "<non_serializable>"
457
+
458
+ return result
459
+
460
+ @classmethod
461
+ def from_dict(cls, data: Dict[str, Any]) -> "AgentConfig":
462
+ """Create from dictionary (for deserialization)."""
463
+ # Extract basic fields
464
+ backend_params = data.get("backend_params", {})
465
+ agent_id = data.get("agent_id")
466
+ custom_system_instruction = data.get("custom_system_instruction")
467
+
468
+ # Handle message_templates
469
+ message_templates = None
470
+ template_data = data.get("message_templates")
471
+ if isinstance(template_data, dict):
472
+ from .message_templates import MessageTemplates
473
+
474
+ message_templates = MessageTemplates(**template_data)
475
+
476
+ return cls(
477
+ backend_params=backend_params,
478
+ message_templates=message_templates,
479
+ agent_id=agent_id,
480
+ custom_system_instruction=custom_system_instruction,
481
+ )
482
+
483
+
484
+ # =============================================================================
485
+ # CONVENIENCE FUNCTIONS
486
+ # =============================================================================
487
+
488
+
489
+ def create_research_config(
490
+ model: str = "gpt-4o", backend: str = "openai"
491
+ ) -> AgentConfig:
492
+ """Create configuration for research tasks (web search enabled)."""
493
+ return AgentConfig.for_research_task(model, backend)
494
+
495
+
496
+ def create_computational_config(
497
+ model: str = "gpt-4o", backend: str = "openai"
498
+ ) -> AgentConfig:
499
+ """Create configuration for computational tasks (code execution enabled)."""
500
+ return AgentConfig.for_computational_task(model, backend)
501
+
502
+
503
+ def create_analytical_config(
504
+ model: str = "gpt-4o-mini", backend: str = "openai"
505
+ ) -> AgentConfig:
506
+ """Create configuration for analytical tasks (no special tools)."""
507
+ return AgentConfig.for_analytical_task(model, backend)