massgen 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of massgen might be problematic. Click here for more details.

Files changed (76) hide show
  1. massgen/__init__.py +94 -0
  2. massgen/agent_config.py +507 -0
  3. massgen/backend/CLAUDE_API_RESEARCH.md +266 -0
  4. massgen/backend/Function calling openai responses.md +1161 -0
  5. massgen/backend/GEMINI_API_DOCUMENTATION.md +410 -0
  6. massgen/backend/OPENAI_RESPONSES_API_FORMAT.md +65 -0
  7. massgen/backend/__init__.py +25 -0
  8. massgen/backend/base.py +180 -0
  9. massgen/backend/chat_completions.py +228 -0
  10. massgen/backend/claude.py +661 -0
  11. massgen/backend/gemini.py +652 -0
  12. massgen/backend/grok.py +187 -0
  13. massgen/backend/response.py +397 -0
  14. massgen/chat_agent.py +440 -0
  15. massgen/cli.py +686 -0
  16. massgen/configs/README.md +293 -0
  17. massgen/configs/creative_team.yaml +53 -0
  18. massgen/configs/gemini_4o_claude.yaml +31 -0
  19. massgen/configs/news_analysis.yaml +51 -0
  20. massgen/configs/research_team.yaml +51 -0
  21. massgen/configs/single_agent.yaml +18 -0
  22. massgen/configs/single_flash2.5.yaml +44 -0
  23. massgen/configs/technical_analysis.yaml +51 -0
  24. massgen/configs/three_agents_default.yaml +31 -0
  25. massgen/configs/travel_planning.yaml +51 -0
  26. massgen/configs/two_agents.yaml +39 -0
  27. massgen/frontend/__init__.py +20 -0
  28. massgen/frontend/coordination_ui.py +945 -0
  29. massgen/frontend/displays/__init__.py +24 -0
  30. massgen/frontend/displays/base_display.py +83 -0
  31. massgen/frontend/displays/rich_terminal_display.py +3497 -0
  32. massgen/frontend/displays/simple_display.py +93 -0
  33. massgen/frontend/displays/terminal_display.py +381 -0
  34. massgen/frontend/logging/__init__.py +9 -0
  35. massgen/frontend/logging/realtime_logger.py +197 -0
  36. massgen/message_templates.py +431 -0
  37. massgen/orchestrator.py +1222 -0
  38. massgen/tests/__init__.py +10 -0
  39. massgen/tests/multi_turn_conversation_design.md +214 -0
  40. massgen/tests/multiturn_llm_input_analysis.md +189 -0
  41. massgen/tests/test_case_studies.md +113 -0
  42. massgen/tests/test_claude_backend.py +310 -0
  43. massgen/tests/test_grok_backend.py +160 -0
  44. massgen/tests/test_message_context_building.py +293 -0
  45. massgen/tests/test_rich_terminal_display.py +378 -0
  46. massgen/tests/test_v3_3agents.py +117 -0
  47. massgen/tests/test_v3_simple.py +216 -0
  48. massgen/tests/test_v3_three_agents.py +272 -0
  49. massgen/tests/test_v3_two_agents.py +176 -0
  50. massgen/utils.py +79 -0
  51. massgen/v1/README.md +330 -0
  52. massgen/v1/__init__.py +91 -0
  53. massgen/v1/agent.py +605 -0
  54. massgen/v1/agents.py +330 -0
  55. massgen/v1/backends/gemini.py +584 -0
  56. massgen/v1/backends/grok.py +410 -0
  57. massgen/v1/backends/oai.py +571 -0
  58. massgen/v1/cli.py +351 -0
  59. massgen/v1/config.py +169 -0
  60. massgen/v1/examples/fast-4o-mini-config.yaml +44 -0
  61. massgen/v1/examples/fast_config.yaml +44 -0
  62. massgen/v1/examples/production.yaml +70 -0
  63. massgen/v1/examples/single_agent.yaml +39 -0
  64. massgen/v1/logging.py +974 -0
  65. massgen/v1/main.py +368 -0
  66. massgen/v1/orchestrator.py +1138 -0
  67. massgen/v1/streaming_display.py +1190 -0
  68. massgen/v1/tools.py +160 -0
  69. massgen/v1/types.py +245 -0
  70. massgen/v1/utils.py +199 -0
  71. massgen-0.0.3.dist-info/METADATA +568 -0
  72. massgen-0.0.3.dist-info/RECORD +76 -0
  73. massgen-0.0.3.dist-info/WHEEL +5 -0
  74. massgen-0.0.3.dist-info/entry_points.txt +2 -0
  75. massgen-0.0.3.dist-info/licenses/LICENSE +204 -0
  76. massgen-0.0.3.dist-info/top_level.txt +1 -0
massgen/v1/cli.py ADDED
@@ -0,0 +1,351 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ MassGen (Multi-Agent Scaling System) - Command Line Interface
5
+
6
+ This provides a clean command-line interface for the MassGen system.
7
+
8
+ Usage examples:
9
+ # Use YAML configuration file
10
+ uv run python -m massgen.v1.cli "What is 2+2?" --config examples/production.yaml
11
+
12
+ # Use model names directly (single or multiple agents)
13
+ uv run python -m massgen.v1.cli "What is 2+2?" --models gpt-4o gemini-2.5-flash
14
+ uv run python -m massgen.v1.cli "What is 2+2?" --models gpt-4o # Single agent mode
15
+
16
+ # Interactive mode (no question provided)
17
+ uv run python -m massgen.v1.cli --models gpt-4o grok-4
18
+ """
19
+
20
+ import argparse
21
+ import sys
22
+ import os
23
+ from pathlib import Path
24
+
25
+ # Add massgen package to path
26
+ sys.path.insert(0, str(Path(__file__).parent))
27
+
28
+ from massgen.v1 import (
29
+ run_mass_with_config,
30
+ load_config_from_yaml,
31
+ create_config_from_models,
32
+ ConfigurationError,
33
+ )
34
+
35
+ # Color constants for beautiful terminal output
36
+ BRIGHT_CYAN = "\033[96m"
37
+ BRIGHT_BLUE = "\033[94m"
38
+ BRIGHT_GREEN = "\033[92m"
39
+ BRIGHT_YELLOW = "\033[93m"
40
+ BRIGHT_MAGENTA = "\033[95m"
41
+ BRIGHT_RED = "\033[91m"
42
+ BRIGHT_WHITE = "\033[97m"
43
+ RESET = "\033[0m"
44
+ BOLD = "\033[1m"
45
+ DIM = "\033[2m"
46
+
47
+
48
+ def display_vote_distribution(vote_distribution):
49
+ """Display the vote distribution in a more readable format."""
50
+ # sort the keys
51
+ sorted_keys = sorted(vote_distribution.keys())
52
+ for agent_id in sorted_keys:
53
+ print(
54
+ f" {BRIGHT_CYAN}Agent {agent_id}{RESET}: {BRIGHT_GREEN}{vote_distribution[agent_id]}{RESET} votes"
55
+ )
56
+
57
+
58
+ def run_interactive_mode(config):
59
+ """Run MassGen in interactive mode, asking for questions repeatedly."""
60
+
61
+ print("\n🤖 MassGen Interactive Mode")
62
+ print("=" * 60)
63
+
64
+ # Display current configuration
65
+ print("📋 Current Configuration:")
66
+ print("-" * 30)
67
+
68
+ # Show models/agents
69
+ if hasattr(config, "agents") and config.agents:
70
+ print(f"🤖 Agents ({len(config.agents)}):")
71
+ for i, agent in enumerate(config.agents, 1):
72
+ model_name = (
73
+ getattr(agent.model_config, "model", "Unknown")
74
+ if hasattr(agent, "model_config")
75
+ else "Unknown"
76
+ )
77
+ agent_type = getattr(agent, "agent_type", "Unknown")
78
+ tools = (
79
+ getattr(agent.model_config, "tools", [])
80
+ if hasattr(agent, "model_config")
81
+ else []
82
+ )
83
+ tools_str = ", ".join(tools) if tools else "None"
84
+ print(f" {i}. {model_name} ({agent_type})")
85
+ print(f" Tools: {tools_str}")
86
+ else:
87
+ print("🤖 Single Agent Mode")
88
+
89
+ # Show orchestrator settings
90
+ if hasattr(config, "orchestrator"):
91
+ orch = config.orchestrator
92
+ print(f"⚙️ Orchestrator:")
93
+ print(f" • Duration: {getattr(orch, 'max_duration', 'Default')}s")
94
+ print(f" • Consensus: {getattr(orch, 'consensus_threshold', 'Default')}")
95
+ print(
96
+ f" • Max Debate Rounds: {getattr(orch, 'max_debate_rounds', 'Default')}"
97
+ )
98
+
99
+ # Show model parameters (from first agent as representative)
100
+ if (
101
+ hasattr(config, "agents")
102
+ and config.agents
103
+ and hasattr(config.agents[0], "model_config")
104
+ ):
105
+ model_config = config.agents[0].model_config
106
+ print(f"🔧 Model Config:")
107
+ temp = getattr(model_config, "temperature", "Default")
108
+ timeout = getattr(model_config, "inference_timeout", "Default")
109
+ max_rounds = getattr(model_config, "max_rounds", "Default")
110
+ print(f" • Temperature: {temp}")
111
+ print(f" • Timeout: {timeout}s")
112
+ print(f" • Max Debate Rounds: {max_rounds}")
113
+
114
+ # Show display settings
115
+ if hasattr(config, "streaming_display"):
116
+ display = config.streaming_display
117
+ display_status = (
118
+ "✅ Enabled" if getattr(display, "display_enabled", True) else "❌ Disabled"
119
+ )
120
+ logs_status = (
121
+ "✅ Enabled" if getattr(display, "save_logs", True) else "❌ Disabled"
122
+ )
123
+ print(f"📺 Display: {display_status}")
124
+ print(f"📁 Logs: {logs_status}")
125
+
126
+ print("-" * 30)
127
+ print("💬 Type your questions below. Type 'quit', 'exit', or press Ctrl+C to stop.")
128
+ print("=" * 60)
129
+
130
+ chat_history = ""
131
+ try:
132
+ while True:
133
+ try:
134
+ question = input("\n👤 User: ").strip()
135
+ chat_history += f"User: {question}\n"
136
+
137
+ if question.lower() in ["quit", "exit", "q"]:
138
+ print("👋 Goodbye!")
139
+ break
140
+
141
+ if not question:
142
+ print("Please enter a question or type 'quit' to exit.")
143
+ continue
144
+
145
+ print("\n🔄 Processing your question...")
146
+
147
+ # Run MassGen
148
+ result = run_mass_with_config(chat_history, config)
149
+
150
+ response = result["answer"]
151
+ chat_history += f"Assistant: {response}\n"
152
+
153
+ # Display complete conversation exchange
154
+ print(f"\n{BRIGHT_CYAN}{'='*80}{RESET}")
155
+ print(f"{BOLD}{BRIGHT_WHITE}💬 CONVERSATION EXCHANGE{RESET}")
156
+ print(f"{BRIGHT_CYAN}{'='*80}{RESET}")
157
+
158
+ # User input section with simple indentation
159
+ print(f"\n{BRIGHT_BLUE}👤 User:{RESET}")
160
+ print(f" {BRIGHT_WHITE}{question}{RESET}")
161
+
162
+ # Assistant response section
163
+ print(f"\n{BRIGHT_GREEN}🤖 Assistant:{RESET}")
164
+
165
+ agents = {
166
+ f"Agent {agent.agent_id}": agent.model_config.model
167
+ for agent in config.agents
168
+ }
169
+
170
+ # Show metadata with clean indentation
171
+ if result.get("single_agent_mode", False):
172
+ print(f" {BRIGHT_YELLOW}📋 Mode:{RESET} Single Agent")
173
+ print(f" {BRIGHT_MAGENTA}🤖 Agents:{RESET} {agents}")
174
+ print(
175
+ f" {BRIGHT_CYAN}🎯 Representative:{RESET} {result['representative_agent_id']}"
176
+ )
177
+ print(
178
+ f" {BRIGHT_GREEN}🔧 Model:{RESET} {result.get('model_used', 'Unknown')}"
179
+ )
180
+ print(
181
+ f" {BRIGHT_BLUE}⏱️ Duration:{RESET} {result['session_duration']:.1f}s"
182
+ )
183
+ if result.get("citations"):
184
+ print(
185
+ f" {BRIGHT_WHITE}📚 Citations:{RESET} {len(result['citations'])}"
186
+ )
187
+ if result.get("code"):
188
+ print(
189
+ f" {BRIGHT_WHITE}💻 Code blocks:{RESET} {len(result['code'])}"
190
+ )
191
+ else:
192
+ print(f" {BRIGHT_YELLOW}📋 Mode:{RESET} Multi-Agent")
193
+ print(f" {BRIGHT_MAGENTA}🤖 Agents:{RESET} {agents}")
194
+ print(
195
+ f" {BRIGHT_CYAN}🎯 Representative:{RESET} {result['representative_agent_id']}"
196
+ )
197
+ print(
198
+ f" {BRIGHT_GREEN}✅ Consensus:{RESET} {result['consensus_reached']}"
199
+ )
200
+ print(
201
+ f" {BRIGHT_BLUE}⏱️ Duration:{RESET} {result['session_duration']:.1f}s"
202
+ )
203
+ print(f" {BRIGHT_YELLOW}📊 Vote Distribution:{RESET}")
204
+ display_vote_distribution(
205
+ result["summary"]["final_vote_distribution"]
206
+ )
207
+
208
+ # Print the response with simple indentation
209
+ print(f"\n {BRIGHT_RED}💡 Response:{RESET}")
210
+ # Indent the response content
211
+ for line in response.split("\n"):
212
+ print(f" {line}")
213
+
214
+ print(f"\n{BRIGHT_CYAN}{'='*80}{RESET}")
215
+
216
+ except KeyboardInterrupt:
217
+ print("\n👋 Goodbye!")
218
+ break
219
+ except Exception as e:
220
+ print(f"❌ Error processing question: {e}")
221
+ print("Please try again or type 'quit' to exit.")
222
+
223
+ except KeyboardInterrupt:
224
+ print("\n👋 Goodbye!")
225
+
226
+
227
+ def main():
228
+ """Clean CLI interface for MassGen."""
229
+ parser = argparse.ArgumentParser(
230
+ description="MassGen (Multi-Agent Scaling System) - Clean CLI",
231
+ formatter_class=argparse.RawDescriptionHelpFormatter,
232
+ epilog="""
233
+ Examples:
234
+ # Use YAML configuration
235
+ uv run python -m massgen.v1.cli "What is the capital of France?" --config examples/production.yaml
236
+
237
+ # Use model names directly (single or multiple agents)
238
+ uv run python -m massgen.v1.cli "What is 2+2?" --models gpt-4o gemini-2.5-flash
239
+ uv run python -m massgen.v1.cli "What is 2+2?" --models gpt-4o # Single agent mode
240
+
241
+ # Interactive mode (no question provided)
242
+ uv run python -m massgen.v1.cli --models gpt-4o grok-4
243
+
244
+ # Override parameters
245
+ uv run python -m massgen.v1.cli "Question" --models gpt-4o gemini-2.5-flash --max-duration 1200 --consensus 0.8
246
+ """,
247
+ )
248
+
249
+ # Task input (now optional for interactive mode)
250
+ parser.add_argument(
251
+ "question",
252
+ nargs="?",
253
+ help="Question to solve (optional - if not provided, enters interactive mode)",
254
+ )
255
+
256
+ # Configuration options (mutually exclusive)
257
+ config_group = parser.add_mutually_exclusive_group(required=True)
258
+ config_group.add_argument(
259
+ "--config", type=str, help="Path to YAML configuration file"
260
+ )
261
+ config_group.add_argument(
262
+ "--models", nargs="+", help="Model names (e.g., gpt-4o gemini-2.5-flash)"
263
+ )
264
+
265
+ # Configuration overrides
266
+ parser.add_argument(
267
+ "--max-duration", type=int, default=None, help="Max duration in seconds"
268
+ )
269
+ parser.add_argument(
270
+ "--consensus", type=float, default=None, help="Consensus threshold (0.0-1.0)"
271
+ )
272
+ parser.add_argument(
273
+ "--max-debates", type=int, default=None, help="Maximum debate rounds"
274
+ )
275
+ parser.add_argument(
276
+ "--no-display", action="store_true", help="Disable streaming display"
277
+ )
278
+ parser.add_argument("--no-logs", action="store_true", help="Disable file logging")
279
+
280
+ args = parser.parse_args()
281
+
282
+ # Load configuration
283
+ try:
284
+ if args.config:
285
+ config = load_config_from_yaml(args.config)
286
+ else: # args.models
287
+ config = create_config_from_models(args.models)
288
+
289
+ # Apply command-line overrides
290
+ if args.max_duration is not None:
291
+ config.orchestrator.max_duration = args.max_duration
292
+ if args.consensus is not None:
293
+ config.orchestrator.consensus_threshold = args.consensus
294
+ if args.max_debates is not None:
295
+ config.orchestrator.max_debate_rounds = args.max_debates
296
+ if args.no_display:
297
+ config.streaming_display.display_enabled = False
298
+ if args.no_logs:
299
+ config.streaming_display.save_logs = False
300
+
301
+ # Validate final configuration
302
+ config.validate()
303
+
304
+ # The used models
305
+ agents = {
306
+ f"Agent {agent.agent_id}": agent.model_config.model
307
+ for agent in config.agents
308
+ }
309
+
310
+ # Check if question was provided
311
+ if args.question:
312
+ # Single question mode
313
+ result = run_mass_with_config(args.question, config)
314
+
315
+ # Display results
316
+ print("\n" + "=" * 60)
317
+ print(f"🎯 FINAL ANSWER (Agent {result['representative_agent_id']}):")
318
+ print("=" * 60)
319
+ print(result["answer"])
320
+ print("\n" + "=" * 60)
321
+
322
+ # Show different metadata based on single vs multi-agent mode
323
+ if result.get("single_agent_mode", False):
324
+ print("🤖 Single Agent Mode")
325
+ print(f"🤖 Agents: {agents}")
326
+ print(f"⏱️ Duration: {result['session_duration']:.1f}s")
327
+ if result.get("citations"):
328
+ print(f"📚 Citations: {len(result['citations'])}")
329
+ if result.get("code"):
330
+ print(f"💻 Code blocks: {len(result['code'])}")
331
+ else:
332
+ print(f"🤖 Agents: {agents}")
333
+ print(f"🎯 Representative Agent: {result['representative_agent_id']}")
334
+ print(f"✅ Consensus: {result['consensus_reached']}")
335
+ print(f"⏱️ Duration: {result['session_duration']:.1f}s")
336
+ print(f"📊 Votes:")
337
+ display_vote_distribution(result["summary"]["final_vote_distribution"])
338
+ else:
339
+ # Interactive mode
340
+ run_interactive_mode(config)
341
+
342
+ except ConfigurationError as e:
343
+ print(f"❌ Configuration error: {e}")
344
+ sys.exit(1)
345
+ except Exception as e:
346
+ print(f"❌ Error: {e}")
347
+ sys.exit(1)
348
+
349
+
350
+ if __name__ == "__main__":
351
+ main()
massgen/v1/config.py ADDED
@@ -0,0 +1,169 @@
1
+ """
2
+ MassGen Configuration System
3
+
4
+ This module provides configuration management for the MassGen system,
5
+ supporting YAML file loading and programmatic configuration creation.
6
+ """
7
+
8
+ import yaml
9
+ import os
10
+ from pathlib import Path
11
+ from typing import Dict, List, Any, Optional, Union
12
+ from dataclasses import asdict
13
+
14
+ from .types import (
15
+ MassConfig,
16
+ OrchestratorConfig,
17
+ AgentConfig,
18
+ ModelConfig,
19
+ StreamingDisplayConfig,
20
+ LoggingConfig,
21
+ )
22
+
23
+
24
+ class ConfigurationError(Exception):
25
+ """Exception raised for configuration-related errors."""
26
+
27
+ pass
28
+
29
+
30
+ def load_config_from_yaml(config_path: Union[str, Path]) -> MassConfig:
31
+ """
32
+ Load MassGen configuration from a YAML file.
33
+
34
+ Args:
35
+ config_path: Path to the YAML configuration file
36
+
37
+ Returns:
38
+ MassConfig object with loaded configuration
39
+
40
+ Raises:
41
+ ConfigurationError: If configuration is invalid or file cannot be loaded
42
+ """
43
+ config_path = Path(config_path)
44
+
45
+ if not config_path.exists():
46
+ raise ConfigurationError(f"Configuration file not found: {config_path}")
47
+
48
+ try:
49
+ with open(config_path, "r", encoding="utf-8") as f:
50
+ yaml_data = yaml.safe_load(f)
51
+ except yaml.YAMLError as e:
52
+ raise ConfigurationError(f"Invalid YAML format: {e}")
53
+ except Exception as e:
54
+ raise ConfigurationError(f"Failed to read configuration file: {e}")
55
+
56
+ if not yaml_data:
57
+ raise ConfigurationError("Empty configuration file")
58
+
59
+ return _dict_to_config(yaml_data)
60
+
61
+
62
+ def create_config_from_models(
63
+ models: List[str],
64
+ orchestrator_config: Optional[Dict[str, Any]] = None,
65
+ streaming_config: Optional[Dict[str, Any]] = None,
66
+ logging_config: Optional[Dict[str, Any]] = None,
67
+ ) -> MassConfig:
68
+ """
69
+ Create a MassGen configuration from a list of model names.
70
+
71
+ Args:
72
+ models: List of model names (e.g., ["gpt-4o", "gemini-2.5-flash"])
73
+ orchestrator_config: Optional orchestrator configuration overrides
74
+ streaming_config: Optional streaming display configuration overrides
75
+ logging_config: Optional logging configuration overrides
76
+
77
+ Returns:
78
+ MassConfig object ready to use
79
+ """
80
+ from .utils import get_agent_type_from_model
81
+
82
+ # Create agent configurations
83
+ agents = []
84
+ for i, model in enumerate(models):
85
+ agent_type = get_agent_type_from_model(model)
86
+ model_config = ModelConfig(
87
+ model=model,
88
+ tools=["live_search", "code_execution"], # Default tools
89
+ max_retries=10,
90
+ max_rounds=10,
91
+ temperature=None,
92
+ inference_timeout=180,
93
+ )
94
+
95
+ agent_config = AgentConfig(
96
+ agent_id=i + 1, agent_type=agent_type, model_config=model_config
97
+ )
98
+ agents.append(agent_config)
99
+
100
+ # Create configuration components
101
+ orchestrator = OrchestratorConfig(**(orchestrator_config or {}))
102
+ streaming_display = StreamingDisplayConfig(**(streaming_config or {}))
103
+ logging = LoggingConfig(**(logging_config or {}))
104
+
105
+ config = MassConfig(
106
+ orchestrator=orchestrator,
107
+ agents=agents,
108
+ streaming_display=streaming_display,
109
+ logging=logging,
110
+ )
111
+
112
+ config.validate()
113
+ return config
114
+
115
+
116
+ def _dict_to_config(data: Dict[str, Any]) -> MassConfig:
117
+ """Convert dictionary data to MassConfig object."""
118
+ try:
119
+ # Parse orchestrator configuration
120
+ orchestrator_data = data.get("orchestrator", {})
121
+ orchestrator = OrchestratorConfig(**orchestrator_data)
122
+
123
+ # Parse agents configuration
124
+ agents_data = data.get("agents", [])
125
+ if not agents_data:
126
+ raise ConfigurationError("No agents specified in configuration")
127
+
128
+ agents = []
129
+ for agent_data in agents_data:
130
+ # Parse model configuration
131
+ model_data = agent_data.get("model_config", {})
132
+ model_config = ModelConfig(**model_data)
133
+
134
+ # Create agent configuration
135
+ agent_config = AgentConfig(
136
+ agent_id=agent_data["agent_id"],
137
+ agent_type=agent_data["agent_type"],
138
+ model_config=model_config,
139
+ )
140
+ agents.append(agent_config)
141
+
142
+ # Parse streaming display configuration
143
+ streaming_data = data.get("streaming_display", {})
144
+ streaming_display = StreamingDisplayConfig(**streaming_data)
145
+
146
+ # Parse logging configuration
147
+ logging_data = data.get("logging", {})
148
+ logging = LoggingConfig(**logging_data)
149
+
150
+ # Parse task configuration
151
+ task = data.get("task")
152
+
153
+ config = MassConfig(
154
+ orchestrator=orchestrator,
155
+ agents=agents,
156
+ streaming_display=streaming_display,
157
+ logging=logging,
158
+ task=task,
159
+ )
160
+
161
+ config.validate()
162
+ return config
163
+
164
+ except KeyError as e:
165
+ raise ConfigurationError(f"Missing required configuration key: {e}")
166
+ except TypeError as e:
167
+ raise ConfigurationError(f"Invalid configuration value: {e}")
168
+ except Exception as e:
169
+ raise ConfigurationError(f"Configuration parsing error: {e}")
@@ -0,0 +1,44 @@
1
+ orchestrator:
2
+ max_duration: 600
3
+ consensus_threshold: 0.0
4
+ max_debate_rounds: 1
5
+ status_check_interval: 1.0
6
+ thread_pool_timeout: 5
7
+ agents:
8
+ - agent_id: 1
9
+ agent_type: openai
10
+ model_config:
11
+ model: gpt-4o-mini
12
+ tools:
13
+ - live_search
14
+ - code_execution
15
+ max_retries: 10
16
+ max_rounds: 5
17
+ inference_timeout: 150
18
+ stream: true
19
+ - agent_id: 2
20
+ agent_type: gemini
21
+ model_config:
22
+ model: gemini-2.5-flash
23
+ tools:
24
+ - live_search
25
+ max_retries: 10
26
+ max_rounds: 5
27
+ inference_timeout: 150
28
+ stream: true
29
+ - agent_id: 3
30
+ agent_type: "grok"
31
+ model_config:
32
+ model: "grok-3-mini"
33
+ tools: ["live_search"]
34
+ max_retries: 10
35
+ max_rounds: 5
36
+ inference_timeout: 150
37
+ stream: true
38
+ streaming_display:
39
+ display_enabled: true
40
+ max_lines: 10
41
+ save_logs: true
42
+ logging:
43
+ log_dir: logs
44
+ non_blocking: false
@@ -0,0 +1,44 @@
1
+ orchestrator:
2
+ max_duration: 600
3
+ consensus_threshold: 0.0
4
+ max_debate_rounds: 1
5
+ status_check_interval: 1.0
6
+ thread_pool_timeout: 5
7
+ agents:
8
+ - agent_id: 1
9
+ agent_type: openai
10
+ model_config:
11
+ model: gpt-4o
12
+ tools:
13
+ - live_search
14
+ - code_execution
15
+ max_retries: 10
16
+ max_rounds: 5
17
+ inference_timeout: 150
18
+ stream: true
19
+ - agent_id: 2
20
+ agent_type: gemini
21
+ model_config:
22
+ model: gemini-2.5-flash
23
+ tools:
24
+ - live_search
25
+ max_retries: 10
26
+ max_rounds: 5
27
+ inference_timeout: 150
28
+ stream: true
29
+ - agent_id: 3
30
+ agent_type: "grok"
31
+ model_config:
32
+ model: "grok-3-mini"
33
+ tools: ["live_search"]
34
+ max_retries: 10
35
+ max_rounds: 5
36
+ inference_timeout: 150
37
+ stream: true
38
+ streaming_display:
39
+ display_enabled: true
40
+ max_lines: 10
41
+ save_logs: true
42
+ logging:
43
+ log_dir: logs
44
+ non_blocking: false
@@ -0,0 +1,70 @@
1
+ # MassGen Configuration: Production
2
+ #
3
+ # Optimized for production use with reliable, high-quality results.
4
+ # Uses robust models with strict consensus requirements and comprehensive logging.
5
+ #
6
+ # Usage:
7
+ # python -m massgen --config examples/production.yaml "Production question"
8
+
9
+ orchestrator:
10
+ max_duration: 900 # 15 minutes for thorough analysis
11
+ consensus_threshold: 0.5 # 50% agreement for high confidence
12
+ max_debate_rounds: 2 # Standard debate rounds
13
+ status_check_interval: 1.0 # Balanced responsiveness
14
+ thread_pool_timeout: 10 # Longer timeout for stability
15
+
16
+ agents:
17
+ # Agent 1: OpenAI o4-mini (Primary analyst)
18
+ - agent_id: 1
19
+ agent_type: "openai"
20
+ model_config:
21
+ model: "o4-mini"
22
+ tools:
23
+ - live_search
24
+ - code_execution
25
+ max_retries: 15 # More retries for reliability
26
+ max_rounds: 5 # Allow thorough analysis
27
+ inference_timeout: 180 # Longer timeout for complex tasks
28
+ stream: true
29
+
30
+ # Agent 2: Google Gemini Pro (Research specialist)
31
+ - agent_id: 2
32
+ agent_type: "gemini"
33
+ model_config:
34
+ model: "gemini-2.5-pro"
35
+ tools:
36
+ - live_search
37
+ - code_execution
38
+ max_retries: 10
39
+ max_rounds: 5
40
+ temperature: 0.4 # Balanced creativity and reliability
41
+ top_p: 0.9
42
+ inference_timeout: 180
43
+ stream: true
44
+
45
+ # Agent 3: xAI Grok (Alternative perspective)
46
+ - agent_id: 3
47
+ agent_type: "grok"
48
+ model_config:
49
+ model: "grok-4"
50
+ tools: ["live_search"]
51
+ max_retries: 12
52
+ max_rounds: 10
53
+ temperature: 0.2 # Very low temperature for accuracy
54
+ top_p: 0.9
55
+ inference_timeout: 180
56
+ stream: true
57
+
58
+ streaming_display:
59
+ display_enabled: true
60
+ max_lines: 10 # More lines for detailed output
61
+ save_logs: true
62
+
63
+ logging:
64
+ log_dir: "logs/production"
65
+ non_blocking: false # Ensure all logs are saved
66
+
67
+ task:
68
+ category: "production"
69
+ domain: "business"
70
+ complexity: "high"