massgen 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of massgen might be problematic. Click here for more details.

Files changed (76) hide show
  1. massgen/__init__.py +94 -0
  2. massgen/agent_config.py +507 -0
  3. massgen/backend/CLAUDE_API_RESEARCH.md +266 -0
  4. massgen/backend/Function calling openai responses.md +1161 -0
  5. massgen/backend/GEMINI_API_DOCUMENTATION.md +410 -0
  6. massgen/backend/OPENAI_RESPONSES_API_FORMAT.md +65 -0
  7. massgen/backend/__init__.py +25 -0
  8. massgen/backend/base.py +180 -0
  9. massgen/backend/chat_completions.py +228 -0
  10. massgen/backend/claude.py +661 -0
  11. massgen/backend/gemini.py +652 -0
  12. massgen/backend/grok.py +187 -0
  13. massgen/backend/response.py +397 -0
  14. massgen/chat_agent.py +440 -0
  15. massgen/cli.py +686 -0
  16. massgen/configs/README.md +293 -0
  17. massgen/configs/creative_team.yaml +53 -0
  18. massgen/configs/gemini_4o_claude.yaml +31 -0
  19. massgen/configs/news_analysis.yaml +51 -0
  20. massgen/configs/research_team.yaml +51 -0
  21. massgen/configs/single_agent.yaml +18 -0
  22. massgen/configs/single_flash2.5.yaml +44 -0
  23. massgen/configs/technical_analysis.yaml +51 -0
  24. massgen/configs/three_agents_default.yaml +31 -0
  25. massgen/configs/travel_planning.yaml +51 -0
  26. massgen/configs/two_agents.yaml +39 -0
  27. massgen/frontend/__init__.py +20 -0
  28. massgen/frontend/coordination_ui.py +945 -0
  29. massgen/frontend/displays/__init__.py +24 -0
  30. massgen/frontend/displays/base_display.py +83 -0
  31. massgen/frontend/displays/rich_terminal_display.py +3497 -0
  32. massgen/frontend/displays/simple_display.py +93 -0
  33. massgen/frontend/displays/terminal_display.py +381 -0
  34. massgen/frontend/logging/__init__.py +9 -0
  35. massgen/frontend/logging/realtime_logger.py +197 -0
  36. massgen/message_templates.py +431 -0
  37. massgen/orchestrator.py +1222 -0
  38. massgen/tests/__init__.py +10 -0
  39. massgen/tests/multi_turn_conversation_design.md +214 -0
  40. massgen/tests/multiturn_llm_input_analysis.md +189 -0
  41. massgen/tests/test_case_studies.md +113 -0
  42. massgen/tests/test_claude_backend.py +310 -0
  43. massgen/tests/test_grok_backend.py +160 -0
  44. massgen/tests/test_message_context_building.py +293 -0
  45. massgen/tests/test_rich_terminal_display.py +378 -0
  46. massgen/tests/test_v3_3agents.py +117 -0
  47. massgen/tests/test_v3_simple.py +216 -0
  48. massgen/tests/test_v3_three_agents.py +272 -0
  49. massgen/tests/test_v3_two_agents.py +176 -0
  50. massgen/utils.py +79 -0
  51. massgen/v1/README.md +330 -0
  52. massgen/v1/__init__.py +91 -0
  53. massgen/v1/agent.py +605 -0
  54. massgen/v1/agents.py +330 -0
  55. massgen/v1/backends/gemini.py +584 -0
  56. massgen/v1/backends/grok.py +410 -0
  57. massgen/v1/backends/oai.py +571 -0
  58. massgen/v1/cli.py +351 -0
  59. massgen/v1/config.py +169 -0
  60. massgen/v1/examples/fast-4o-mini-config.yaml +44 -0
  61. massgen/v1/examples/fast_config.yaml +44 -0
  62. massgen/v1/examples/production.yaml +70 -0
  63. massgen/v1/examples/single_agent.yaml +39 -0
  64. massgen/v1/logging.py +974 -0
  65. massgen/v1/main.py +368 -0
  66. massgen/v1/orchestrator.py +1138 -0
  67. massgen/v1/streaming_display.py +1190 -0
  68. massgen/v1/tools.py +160 -0
  69. massgen/v1/types.py +245 -0
  70. massgen/v1/utils.py +199 -0
  71. massgen-0.0.3.dist-info/METADATA +568 -0
  72. massgen-0.0.3.dist-info/RECORD +76 -0
  73. massgen-0.0.3.dist-info/WHEEL +5 -0
  74. massgen-0.0.3.dist-info/entry_points.txt +2 -0
  75. massgen-0.0.3.dist-info/licenses/LICENSE +204 -0
  76. massgen-0.0.3.dist-info/top_level.txt +1 -0
massgen/cli.py ADDED
@@ -0,0 +1,686 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ MassGen Command Line Interface
4
+
5
+ A clean CLI for MassGen with file-based configuration support.
6
+ Supports both interactive mode and single-question mode.
7
+
8
+ Usage examples:
9
+ # Use YAML/JSON configuration file
10
+ python -m massgen.cli --config config.yaml "What is the capital of France?"
11
+
12
+ # Quick setup with backend and model
13
+ python -m massgen.cli --backend openai --model gpt-4o-mini "What is 2+2?"
14
+
15
+ # Interactive mode
16
+ python -m massgen.cli --config config.yaml
17
+
18
+ # Multiple agents from config
19
+ python -m massgen.cli --config multi_agent.yaml "Compare different approaches to renewable energy"
20
+ """
21
+
22
+ import argparse
23
+ import asyncio
24
+ import json
25
+ import os
26
+ import sys
27
+ import yaml
28
+ from pathlib import Path
29
+ from typing import Dict, Any, Optional, List
30
+
31
+ from .utils import MODEL_MAPPINGS, get_backend_type_from_model
32
+
33
+
34
+ # Load environment variables from .env file
35
+ def load_env_file():
36
+ """Load environment variables from .env file if it exists."""
37
+ env_file = Path(".env")
38
+ if env_file.exists():
39
+ with open(env_file, "r") as f:
40
+ for line in f:
41
+ line = line.strip()
42
+ if line and not line.startswith("#") and "=" in line:
43
+ key, value = line.split("=", 1)
44
+ # Remove quotes if present
45
+ value = value.strip("\"'")
46
+ os.environ[key] = value
47
+
48
+
49
+ # Load .env file at module import
50
+ load_env_file()
51
+
52
+ # Add project root to path for imports
53
+ project_root = Path(__file__).parent.parent.parent.parent
54
+ sys.path.insert(0, str(project_root))
55
+
56
+ from massgen.backend.response import ResponseBackend
57
+ from massgen.backend.grok import GrokBackend
58
+ from massgen.backend.claude import ClaudeBackend
59
+ from massgen.backend.gemini import GeminiBackend
60
+ from massgen.chat_agent import SingleAgent, ConfigurableAgent
61
+ from massgen.agent_config import AgentConfig
62
+ from massgen.orchestrator import Orchestrator
63
+ from massgen.frontend.coordination_ui import CoordinationUI
64
+
65
+ # Color constants for terminal output
66
+ BRIGHT_CYAN = "\033[96m"
67
+ BRIGHT_BLUE = "\033[94m"
68
+ BRIGHT_GREEN = "\033[92m"
69
+ BRIGHT_YELLOW = "\033[93m"
70
+ BRIGHT_MAGENTA = "\033[95m"
71
+ BRIGHT_RED = "\033[91m"
72
+ BRIGHT_WHITE = "\033[97m"
73
+ RESET = "\033[0m"
74
+ BOLD = "\033[1m"
75
+
76
+
77
+ class ConfigurationError(Exception):
78
+ """Configuration error for CLI."""
79
+
80
+ pass
81
+
82
+
83
+ def load_config_file(config_path: str) -> Dict[str, Any]:
84
+ """Load configuration from YAML or JSON file."""
85
+ path = Path(config_path)
86
+
87
+ # If file doesn't exist in current path, try massgen/configs/ directory
88
+ if not path.exists():
89
+ # Try in massgen/configs/ directory
90
+ configs_path = Path(__file__).parent / "configs" / path.name
91
+ if configs_path.exists():
92
+ path = configs_path
93
+ else:
94
+ raise ConfigurationError(
95
+ f"Configuration file not found: {config_path} (also checked {configs_path})"
96
+ )
97
+
98
+ try:
99
+ with open(path, "r", encoding="utf-8") as f:
100
+ if path.suffix.lower() in [".yaml", ".yml"]:
101
+ return yaml.safe_load(f)
102
+ elif path.suffix.lower() == ".json":
103
+ return json.load(f)
104
+ else:
105
+ raise ConfigurationError(
106
+ f"Unsupported config file format: {path.suffix}"
107
+ )
108
+ except Exception as e:
109
+ raise ConfigurationError(f"Error reading config file: {e}")
110
+
111
+
112
+ def create_backend(backend_type: str, **kwargs) -> Any:
113
+ """Create backend instance from type and parameters."""
114
+ backend_type = backend_type.lower()
115
+
116
+ if backend_type == "openai":
117
+ api_key = kwargs.get("api_key") or os.getenv("OPENAI_API_KEY")
118
+ if not api_key:
119
+ raise ConfigurationError(
120
+ "OpenAI API key not found. Set OPENAI_API_KEY or provide in config."
121
+ )
122
+ return ResponseBackend(api_key=api_key)
123
+
124
+ elif backend_type == "grok":
125
+ api_key = kwargs.get("api_key") or os.getenv("XAI_API_KEY")
126
+ if not api_key:
127
+ raise ConfigurationError(
128
+ "Grok API key not found. Set XAI_API_KEY or provide in config."
129
+ )
130
+ return GrokBackend(api_key=api_key)
131
+
132
+ elif backend_type == "claude":
133
+ api_key = kwargs.get("api_key") or os.getenv("ANTHROPIC_API_KEY")
134
+ if not api_key:
135
+ raise ConfigurationError(
136
+ "Claude API key not found. Set ANTHROPIC_API_KEY or provide in config."
137
+ )
138
+ return ClaudeBackend(api_key=api_key)
139
+
140
+ elif backend_type == "gemini":
141
+ api_key = (
142
+ kwargs.get("api_key")
143
+ or os.getenv("GOOGLE_API_KEY")
144
+ or os.getenv("GEMINI_API_KEY")
145
+ )
146
+ if not api_key:
147
+ raise ConfigurationError(
148
+ "Gemini API key not found. Set GOOGLE_API_KEY or provide in config."
149
+ )
150
+ return GeminiBackend(api_key=api_key)
151
+
152
+ else:
153
+ raise ConfigurationError(f"Unsupported backend type: {backend_type}")
154
+
155
+
156
+ def create_agents_from_config(config: Dict[str, Any]) -> Dict[str, ConfigurableAgent]:
157
+ """Create agents from configuration."""
158
+ agents = {}
159
+
160
+ # Handle single agent configuration
161
+ if "agent" in config:
162
+ agent_config_data = config["agent"]
163
+ backend_config = agent_config_data.get("backend", {})
164
+
165
+ # Infer backend type from model if not explicitly provided
166
+ if "type" not in backend_config and "model" in backend_config:
167
+ backend_type = get_backend_type_from_model(backend_config["model"])
168
+ else:
169
+ backend_type = backend_config.get("type")
170
+ if not backend_type:
171
+ raise ConfigurationError(
172
+ "Backend type must be specified or inferrable from model"
173
+ )
174
+
175
+ backend = create_backend(backend_type, **backend_config)
176
+
177
+ # Create proper AgentConfig with backend_params
178
+ if backend_type.lower() == "openai":
179
+ agent_config = AgentConfig.create_openai_config(
180
+ **{k: v for k, v in backend_config.items() if k != "type"}
181
+ )
182
+ elif backend_type.lower() == "claude":
183
+ agent_config = AgentConfig.create_claude_config(
184
+ **{k: v for k, v in backend_config.items() if k != "type"}
185
+ )
186
+ elif backend_type.lower() == "grok":
187
+ agent_config = AgentConfig.create_grok_config(
188
+ **{k: v for k, v in backend_config.items() if k != "type"}
189
+ )
190
+ elif backend_type.lower() == "gemini":
191
+ agent_config = AgentConfig.create_gemini_config(
192
+ **{k: v for k, v in backend_config.items() if k != "type"}
193
+ )
194
+ else:
195
+ # Fallback to basic config
196
+ agent_config = AgentConfig(backend_params=backend_config)
197
+
198
+ # Set agent ID and system message
199
+ agent_config.agent_id = agent_config_data.get("id", "agent1")
200
+ agent_config.custom_system_instruction = agent_config_data.get("system_message")
201
+
202
+ agent = ConfigurableAgent(config=agent_config, backend=backend)
203
+ agents[agent.agent_id] = agent
204
+
205
+ # Handle multiple agents configuration
206
+ elif "agents" in config:
207
+ for agent_config_data in config["agents"]:
208
+ backend_config = agent_config_data.get("backend", {})
209
+
210
+ # Infer backend type from model if not explicitly provided
211
+ if "type" not in backend_config and "model" in backend_config:
212
+ backend_type = get_backend_type_from_model(backend_config["model"])
213
+ else:
214
+ backend_type = backend_config.get("type")
215
+ if not backend_type:
216
+ raise ConfigurationError(
217
+ "Backend type must be specified or inferrable from model"
218
+ )
219
+
220
+ backend = create_backend(backend_type, **backend_config)
221
+
222
+ # Create proper AgentConfig with backend_params
223
+ if backend_type.lower() == "openai":
224
+ agent_config = AgentConfig.create_openai_config(
225
+ **{k: v for k, v in backend_config.items() if k != "type"}
226
+ )
227
+ elif backend_type.lower() == "claude":
228
+ agent_config = AgentConfig.create_claude_config(
229
+ **{k: v for k, v in backend_config.items() if k != "type"}
230
+ )
231
+ elif backend_type.lower() == "grok":
232
+ agent_config = AgentConfig.create_grok_config(
233
+ **{k: v for k, v in backend_config.items() if k != "type"}
234
+ )
235
+ else:
236
+ # Fallback to basic config
237
+ agent_config = AgentConfig(backend_params=backend_config)
238
+
239
+ # Set agent ID and system message
240
+ agent_config.agent_id = agent_config_data.get("id", f"agent{len(agents)+1}")
241
+ agent_config.custom_system_instruction = agent_config_data.get(
242
+ "system_message"
243
+ )
244
+
245
+ agent = ConfigurableAgent(config=agent_config, backend=backend)
246
+ agents[agent.agent_id] = agent
247
+
248
+ else:
249
+ raise ConfigurationError(
250
+ "Configuration must contain either 'agent' or 'agents' section"
251
+ )
252
+
253
+ return agents
254
+
255
+
256
+ def create_simple_config(
257
+ backend_type: str, model: str, system_message: Optional[str] = None
258
+ ) -> Dict[str, Any]:
259
+ """Create a simple single-agent configuration."""
260
+ return {
261
+ "agent": {
262
+ "id": "agent1",
263
+ "backend": {"type": backend_type, "model": model},
264
+ "system_message": system_message or "You are a helpful AI assistant.",
265
+ },
266
+ "ui": {"display_type": "rich_terminal", "logging_enabled": True},
267
+ }
268
+
269
+
270
+ async def run_question_with_history(
271
+ question: str,
272
+ agents: Dict[str, SingleAgent],
273
+ ui_config: Dict[str, Any],
274
+ history: List[Dict[str, Any]],
275
+ ) -> str:
276
+ """Run MassGen with a question and conversation history."""
277
+ # Build messages including history
278
+ messages = history.copy()
279
+ messages.append({"role": "user", "content": question})
280
+
281
+ # Check if we should use orchestrator for single agents (default: False for backward compatibility)
282
+ use_orchestrator_for_single = ui_config.get(
283
+ "use_orchestrator_for_single_agent", True
284
+ )
285
+
286
+ if len(agents) == 1 and not use_orchestrator_for_single:
287
+ # Single agent mode with history
288
+ agent = next(iter(agents.values()))
289
+ print(f"\nšŸ¤– {BRIGHT_CYAN}Single Agent Mode{RESET}", flush=True)
290
+ print(f"Agent: {agent.agent_id}", flush=True)
291
+ if history:
292
+ print(f"History: {len(history)//2} previous exchanges", flush=True)
293
+ print(f"Question: {BRIGHT_WHITE}{question}{RESET}", flush=True)
294
+ print("\n" + "=" * 60, flush=True)
295
+
296
+ response_content = ""
297
+
298
+ async for chunk in agent.chat(messages):
299
+ if chunk.type == "content" and chunk.content:
300
+ response_content += chunk.content
301
+ print(chunk.content, end="", flush=True)
302
+ elif chunk.type == "builtin_tool_results":
303
+ # Skip builtin_tool_results to avoid duplication with real-time streaming
304
+ # The backends already show tool status during execution
305
+ continue
306
+ elif chunk.type == "error":
307
+ print(f"\nāŒ Error: {chunk.error}", flush=True)
308
+ return ""
309
+
310
+ print("\n" + "=" * 60, flush=True)
311
+ return response_content
312
+
313
+ else:
314
+ # Multi-agent mode with history
315
+ orchestrator = Orchestrator(agents=agents)
316
+ # Create a fresh UI instance for each question to ensure clean state
317
+ ui = CoordinationUI(
318
+ display_type=ui_config.get("display_type", "rich_terminal"),
319
+ logging_enabled=ui_config.get("logging_enabled", True),
320
+ )
321
+
322
+ print(f"\nšŸ¤– {BRIGHT_CYAN}Multi-Agent Mode{RESET}", flush=True)
323
+ print(f"Agents: {', '.join(agents.keys())}", flush=True)
324
+ if history:
325
+ print(f"History: {len(history)//2} previous exchanges", flush=True)
326
+ print(f"Question: {BRIGHT_WHITE}{question}{RESET}", flush=True)
327
+ print("\n" + "=" * 60, flush=True)
328
+
329
+ # For multi-agent with history, we need to use a different approach
330
+ # that maintains coordination UI display while supporting conversation context
331
+
332
+ if history and len(history) > 0:
333
+ # Use coordination UI with conversation context
334
+ # Extract current question from messages
335
+ current_question = (
336
+ messages[-1].get("content", question) if messages else question
337
+ )
338
+
339
+ # Pass the full message context to the UI coordination
340
+ response_content = await ui.coordinate_with_context(
341
+ orchestrator, current_question, messages
342
+ )
343
+ else:
344
+ # Standard coordination for new conversations
345
+ response_content = await ui.coordinate(orchestrator, question)
346
+
347
+ return response_content
348
+
349
+
350
+ async def run_single_question(
351
+ question: str, agents: Dict[str, SingleAgent], ui_config: Dict[str, Any]
352
+ ) -> str:
353
+ """Run MassGen with a single question."""
354
+ # Check if we should use orchestrator for single agents (default: False for backward compatibility)
355
+ use_orchestrator_for_single = ui_config.get(
356
+ "use_orchestrator_for_single_agent", True
357
+ )
358
+
359
+ if len(agents) == 1 and not use_orchestrator_for_single:
360
+ # Single agent mode with existing SimpleDisplay frontend
361
+ agent = next(iter(agents.values()))
362
+
363
+ print(f"\nšŸ¤– {BRIGHT_CYAN}Single Agent Mode{RESET}", flush=True)
364
+ print(f"Agent: {agent.agent_id}", flush=True)
365
+ print(f"Question: {BRIGHT_WHITE}{question}{RESET}", flush=True)
366
+ print("\n" + "=" * 60, flush=True)
367
+
368
+ messages = [{"role": "user", "content": question}]
369
+ response_content = ""
370
+
371
+ async for chunk in agent.chat(messages):
372
+ if chunk.type == "content" and chunk.content:
373
+ response_content += chunk.content
374
+ print(chunk.content, end="", flush=True)
375
+ elif chunk.type == "builtin_tool_results":
376
+ # Skip builtin_tool_results to avoid duplication with real-time streaming
377
+ continue
378
+ elif chunk.type == "error":
379
+ print(f"\nāŒ Error: {chunk.error}", flush=True)
380
+ return ""
381
+
382
+ print("\n" + "=" * 60, flush=True)
383
+ return response_content
384
+
385
+ else:
386
+ # Multi-agent mode
387
+ orchestrator = Orchestrator(agents=agents)
388
+ # Create a fresh UI instance for each question to ensure clean state
389
+ ui = CoordinationUI(
390
+ display_type=ui_config.get("display_type", "rich_terminal"),
391
+ logging_enabled=ui_config.get("logging_enabled", True),
392
+ )
393
+
394
+ print(f"\nšŸ¤– {BRIGHT_CYAN}Multi-Agent Mode{RESET}", flush=True)
395
+ print(f"Agents: {', '.join(agents.keys())}", flush=True)
396
+ print(f"Question: {BRIGHT_WHITE}{question}{RESET}", flush=True)
397
+ print("\n" + "=" * 60, flush=True)
398
+
399
+ final_response = await ui.coordinate(orchestrator, question)
400
+ return final_response
401
+
402
+
403
+ def print_help_messages():
404
+ print(
405
+ "\nšŸ’¬ Type your questions below. Use slash commands or press Ctrl+C to stop.",
406
+ flush=True,
407
+ )
408
+ print("šŸ’” Commands: /quit, /exit, /reset, /help", flush=True)
409
+ print("=" * 60, flush=True)
410
+
411
+
412
+ async def run_interactive_mode(
413
+ agents: Dict[str, SingleAgent], ui_config: Dict[str, Any]
414
+ ):
415
+ """Run MassGen in interactive mode with conversation history."""
416
+ print(f"\n{BRIGHT_CYAN}šŸ¤– MassGen Interactive Mode{RESET}", flush=True)
417
+ print("="*60, flush=True)
418
+
419
+ # Display configuration
420
+ print(f"šŸ“‹ {BRIGHT_YELLOW}Configuration:{RESET}", flush=True)
421
+ print(f" Agents: {len(agents)}", flush=True)
422
+ for agent_id, agent in agents.items():
423
+ backend_name = agent.backend.__class__.__name__.replace("Backend", "")
424
+ print(f" • {agent_id}: {backend_name}", flush=True)
425
+
426
+ use_orchestrator_for_single = ui_config.get(
427
+ "use_orchestrator_for_single_agent", True
428
+ )
429
+ if len(agents) == 1:
430
+ mode = (
431
+ "Single Agent (Orchestrator)"
432
+ if use_orchestrator_for_single
433
+ else "Single Agent (Direct)"
434
+ )
435
+ else:
436
+ mode = "Multi-Agent Coordination"
437
+ print(f" Mode: {mode}", flush=True)
438
+ print(f" UI: {ui_config.get('display_type', 'rich_terminal')}", flush=True)
439
+
440
+ print_help_messages()
441
+
442
+ # Maintain conversation history
443
+ conversation_history = []
444
+
445
+ try:
446
+ while True:
447
+ try:
448
+ question = input(f"\n{BRIGHT_BLUE}šŸ‘¤ User:{RESET} ").strip()
449
+
450
+ # Handle slash commands
451
+ if question.startswith("/"):
452
+ command = question.lower()
453
+
454
+ if command in ["/quit", "/exit", "/q"]:
455
+ print("šŸ‘‹ Goodbye!", flush=True)
456
+ break
457
+ elif command in ["/reset", "/clear"]:
458
+ conversation_history = []
459
+ # Reset all agents
460
+ for agent in agents.values():
461
+ agent.reset()
462
+ print(
463
+ f"{BRIGHT_YELLOW}šŸ”„ Conversation history cleared!{RESET}",
464
+ flush=True,
465
+ )
466
+ continue
467
+ elif command in ["/help", "/h"]:
468
+ print(
469
+ f"\n{BRIGHT_CYAN}šŸ“š Available Commands:{RESET}", flush=True
470
+ )
471
+ print(" /quit, /exit, /q - Exit the program", flush=True)
472
+ print(
473
+ " /reset, /clear - Clear conversation history",
474
+ flush=True,
475
+ )
476
+ print(
477
+ " /help, /h - Show this help message",
478
+ flush=True,
479
+ )
480
+ print(
481
+ " /status - Show current status", flush=True
482
+ )
483
+ continue
484
+ elif command == "/status":
485
+ print(f"\n{BRIGHT_CYAN}šŸ“Š Current Status:{RESET}", flush=True)
486
+ print(
487
+ f" Agents: {len(agents)} ({', '.join(agents.keys())})",
488
+ flush=True,
489
+ )
490
+ use_orch_single = ui_config.get(
491
+ "use_orchestrator_for_single_agent", True
492
+ )
493
+ if len(agents) == 1:
494
+ mode_display = (
495
+ "Single Agent (Orchestrator)"
496
+ if use_orch_single
497
+ else "Single Agent (Direct)"
498
+ )
499
+ else:
500
+ mode_display = "Multi-Agent"
501
+ print(f" Mode: {mode_display}", flush=True)
502
+ print(
503
+ f" History: {len(conversation_history)//2} exchanges",
504
+ flush=True,
505
+ )
506
+ continue
507
+ else:
508
+ print(f"ā“ Unknown command: {command}", flush=True)
509
+ print("šŸ’” Type /help for available commands", flush=True)
510
+ continue
511
+
512
+ # Handle legacy plain text commands for backwards compatibility
513
+ if question.lower() in ["quit", "exit", "q"]:
514
+ print("šŸ‘‹ Goodbye!")
515
+ break
516
+
517
+ if question.lower() in ["reset", "clear"]:
518
+ conversation_history = []
519
+ for agent in agents.values():
520
+ agent.reset()
521
+ print(f"{BRIGHT_YELLOW}šŸ”„ Conversation history cleared!{RESET}")
522
+ continue
523
+
524
+ if not question:
525
+ print(
526
+ "Please enter a question or type /help for commands.",
527
+ flush=True,
528
+ )
529
+ continue
530
+
531
+ print(f"\nšŸ”„ {BRIGHT_YELLOW}Processing...{RESET}", flush=True)
532
+
533
+ response = await run_question_with_history(
534
+ question, agents, ui_config, conversation_history
535
+ )
536
+
537
+ if response:
538
+ # Add to conversation history
539
+ conversation_history.append({"role": "user", "content": question})
540
+ conversation_history.append(
541
+ {"role": "assistant", "content": response}
542
+ )
543
+ print(f"\n{BRIGHT_GREEN}āœ… Complete!{RESET}", flush=True)
544
+ print(
545
+ f"{BRIGHT_CYAN}šŸ’­ History: {len(conversation_history)//2} exchanges{RESET}",
546
+ flush=True,
547
+ )
548
+ print_help_messages()
549
+
550
+ else:
551
+ print(f"\n{BRIGHT_RED}āŒ No response generated{RESET}", flush=True)
552
+
553
+ except KeyboardInterrupt:
554
+ print("\nšŸ‘‹ Goodbye!")
555
+ break
556
+ except Exception as e:
557
+ print(f"āŒ Error: {e}", flush=True)
558
+ print("Please try again or type /quit to exit.", flush=True)
559
+
560
+ except KeyboardInterrupt:
561
+ print("\nšŸ‘‹ Goodbye!")
562
+
563
+ async def main():
564
+ """Main CLI entry point."""
565
+ parser = argparse.ArgumentParser(
566
+ description="MassGen - Multi-Agent Coordination CLI",
567
+ formatter_class=argparse.RawDescriptionHelpFormatter,
568
+ epilog="""
569
+ Examples:
570
+ # Use configuration file
571
+ python -m massgen.cli --config config.yaml "What is machine learning?"
572
+
573
+ # Quick single agent setup
574
+ python -m massgen.cli --backend openai --model gpt-4o-mini "Explain quantum computing"
575
+ python -m massgen.cli --backend claude --model claude-sonnet-4-20250514 "Analyze this data"
576
+
577
+ # Interactive mode
578
+ python -m massgen.cli --config config.yaml
579
+
580
+ # Create sample configurations
581
+ python -m massgen.cli --create-samples
582
+
583
+ Environment Variables:
584
+ OPENAI_API_KEY - Required for OpenAI backend
585
+ XAI_API_KEY - Required for Grok backend
586
+ ANTHROPIC_API_KEY - Required for Claude backend
587
+ """,
588
+ )
589
+
590
+ # Question (optional for interactive mode)
591
+ parser.add_argument(
592
+ "question",
593
+ nargs="?",
594
+ help="Question to ask (optional - if not provided, enters interactive mode)",
595
+ )
596
+
597
+ # Configuration options
598
+ config_group = parser.add_mutually_exclusive_group()
599
+ config_group.add_argument(
600
+ "--config", type=str, help="Path to YAML/JSON configuration file"
601
+ )
602
+ config_group.add_argument(
603
+ "--backend",
604
+ type=str,
605
+ choices=["openai", "grok", "claude", "gemini"],
606
+ help="Backend type for quick setup",
607
+ )
608
+
609
+ # Quick setup options
610
+ parser.add_argument(
611
+ "--model",
612
+ type=str,
613
+ default="gpt-4o-mini",
614
+ help="Model name for quick setup (default: gpt-4o-mini)",
615
+ )
616
+ parser.add_argument(
617
+ "--system-message", type=str, help="System message for quick setup"
618
+ )
619
+
620
+ # UI options
621
+ parser.add_argument(
622
+ "--no-display", action="store_true", help="Disable visual coordination display"
623
+ )
624
+ parser.add_argument("--no-logs", action="store_true", help="Disable logging")
625
+
626
+ args = parser.parse_args()
627
+
628
+ # Validate arguments
629
+ if not args.backend:
630
+ if not args.model and not args.config:
631
+ parser.error(
632
+ "If there is not --backend, either --config or --model must be specified"
633
+ )
634
+
635
+ try:
636
+ # Load or create configuration
637
+ if args.config:
638
+ config = load_config_file(args.config)
639
+ else:
640
+ model = args.model
641
+ if args.backend:
642
+ backend = args.backend
643
+ else:
644
+ backend = get_backend_type_from_model(model=model)
645
+ if args.system_message:
646
+ system_message = args.system_message
647
+ else:
648
+ system_message = None
649
+ config = create_simple_config(
650
+ backend_type=backend, model=model, system_message=system_message
651
+ )
652
+
653
+ # Apply command-line overrides
654
+ ui_config = config.get("ui", {})
655
+ if args.no_display:
656
+ ui_config["display_type"] = "simple"
657
+ if args.no_logs:
658
+ ui_config["logging_enabled"] = False
659
+
660
+ # Create agents
661
+ agents = create_agents_from_config(config)
662
+
663
+ if not agents:
664
+ raise ConfigurationError("No agents configured")
665
+
666
+ # Run mode based on whether question was provided
667
+ if args.question:
668
+ response = await run_single_question(args.question, agents, ui_config)
669
+ # if response:
670
+ # print(f"\n{BRIGHT_GREEN}Final Response:{RESET}", flush=True)
671
+ # print(f"{response}", flush=True)
672
+ else:
673
+ await run_interactive_mode(agents, ui_config)
674
+
675
+ except ConfigurationError as e:
676
+ print(f"āŒ Configuration error: {e}", flush=True)
677
+ sys.exit(1)
678
+ except KeyboardInterrupt:
679
+ print("\nšŸ‘‹ Goodbye!", flush=True)
680
+ except Exception as e:
681
+ print(f"āŒ Error: {e}", flush=True)
682
+ sys.exit(1)
683
+
684
+
685
+ if __name__ == "__main__":
686
+ asyncio.run(main())