massgen 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of massgen might be problematic. Click here for more details.
- massgen/__init__.py +1 -1
- massgen/backend/base_with_custom_tool_and_mcp.py +453 -23
- massgen/backend/capabilities.py +39 -0
- massgen/backend/chat_completions.py +111 -197
- massgen/backend/claude.py +210 -181
- massgen/backend/gemini.py +1015 -1559
- massgen/backend/grok.py +3 -2
- massgen/backend/response.py +160 -220
- massgen/chat_agent.py +340 -20
- massgen/cli.py +399 -25
- massgen/config_builder.py +20 -54
- massgen/config_validator.py +931 -0
- massgen/configs/README.md +95 -10
- massgen/configs/memory/gpt5mini_gemini_baseline_research_to_implementation.yaml +94 -0
- massgen/configs/memory/gpt5mini_gemini_context_window_management.yaml +187 -0
- massgen/configs/memory/gpt5mini_gemini_research_to_implementation.yaml +127 -0
- massgen/configs/memory/gpt5mini_high_reasoning_gemini.yaml +107 -0
- massgen/configs/memory/single_agent_compression_test.yaml +64 -0
- massgen/configs/tools/custom_tools/claude_code_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/claude_custom_tool_example_no_path.yaml +1 -1
- massgen/configs/tools/custom_tools/claude_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/computer_use_browser_example.yaml +1 -1
- massgen/configs/tools/custom_tools/computer_use_docker_example.yaml +1 -1
- massgen/configs/tools/custom_tools/gemini_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/gpt5_nano_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/gpt_oss_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/grok3_mini_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/interop/ag2_and_langgraph_lesson_planner.yaml +65 -0
- massgen/configs/tools/custom_tools/interop/ag2_and_openai_assistant_lesson_planner.yaml +65 -0
- massgen/configs/tools/custom_tools/interop/ag2_lesson_planner_example.yaml +48 -0
- massgen/configs/tools/custom_tools/interop/agentscope_lesson_planner_example.yaml +48 -0
- massgen/configs/tools/custom_tools/interop/langgraph_lesson_planner_example.yaml +49 -0
- massgen/configs/tools/custom_tools/interop/openai_assistant_lesson_planner_example.yaml +50 -0
- massgen/configs/tools/custom_tools/interop/smolagent_lesson_planner_example.yaml +49 -0
- massgen/configs/tools/custom_tools/qwen_api_custom_tool_with_mcp_example.yaml +1 -0
- massgen/configs/tools/custom_tools/two_models_with_tools_example.yaml +44 -0
- massgen/formatter/_gemini_formatter.py +61 -15
- massgen/memory/README.md +277 -0
- massgen/memory/__init__.py +26 -0
- massgen/memory/_base.py +193 -0
- massgen/memory/_compression.py +237 -0
- massgen/memory/_context_monitor.py +211 -0
- massgen/memory/_conversation.py +255 -0
- massgen/memory/_fact_extraction_prompts.py +333 -0
- massgen/memory/_mem0_adapters.py +257 -0
- massgen/memory/_persistent.py +687 -0
- massgen/memory/docker-compose.qdrant.yml +36 -0
- massgen/memory/docs/DESIGN.md +388 -0
- massgen/memory/docs/QUICKSTART.md +409 -0
- massgen/memory/docs/SUMMARY.md +319 -0
- massgen/memory/docs/agent_use_memory.md +408 -0
- massgen/memory/docs/orchestrator_use_memory.md +586 -0
- massgen/memory/examples.py +237 -0
- massgen/orchestrator.py +207 -7
- massgen/tests/memory/test_agent_compression.py +174 -0
- massgen/tests/memory/test_context_window_management.py +286 -0
- massgen/tests/memory/test_force_compression.py +154 -0
- massgen/tests/memory/test_simple_compression.py +147 -0
- massgen/tests/test_ag2_lesson_planner.py +223 -0
- massgen/tests/test_agent_memory.py +534 -0
- massgen/tests/test_config_validator.py +1156 -0
- massgen/tests/test_conversation_memory.py +382 -0
- massgen/tests/test_langgraph_lesson_planner.py +223 -0
- massgen/tests/test_orchestrator_memory.py +620 -0
- massgen/tests/test_persistent_memory.py +435 -0
- massgen/token_manager/token_manager.py +6 -0
- massgen/tool/__init__.py +2 -9
- massgen/tool/_decorators.py +52 -0
- massgen/tool/_extraframework_agents/ag2_lesson_planner_tool.py +251 -0
- massgen/tool/_extraframework_agents/agentscope_lesson_planner_tool.py +303 -0
- massgen/tool/_extraframework_agents/langgraph_lesson_planner_tool.py +275 -0
- massgen/tool/_extraframework_agents/openai_assistant_lesson_planner_tool.py +247 -0
- massgen/tool/_extraframework_agents/smolagent_lesson_planner_tool.py +180 -0
- massgen/tool/_manager.py +102 -16
- massgen/tool/_registered_tool.py +3 -0
- massgen/tool/_result.py +3 -0
- {massgen-0.1.4.dist-info → massgen-0.1.6.dist-info}/METADATA +138 -77
- {massgen-0.1.4.dist-info → massgen-0.1.6.dist-info}/RECORD +82 -37
- massgen/backend/gemini_mcp_manager.py +0 -545
- massgen/backend/gemini_trackers.py +0 -344
- {massgen-0.1.4.dist-info → massgen-0.1.6.dist-info}/WHEEL +0 -0
- {massgen-0.1.4.dist-info → massgen-0.1.6.dist-info}/entry_points.txt +0 -0
- {massgen-0.1.4.dist-info → massgen-0.1.6.dist-info}/licenses/LICENSE +0 -0
- {massgen-0.1.4.dist-info → massgen-0.1.6.dist-info}/top_level.txt +0 -0
massgen/cli.py
CHANGED
|
@@ -488,8 +488,18 @@ def create_backend(backend_type: str, **kwargs) -> Any:
|
|
|
488
488
|
raise ConfigurationError(f"Unsupported backend type: {backend_type}")
|
|
489
489
|
|
|
490
490
|
|
|
491
|
-
def create_agents_from_config(
|
|
492
|
-
|
|
491
|
+
def create_agents_from_config(
|
|
492
|
+
config: Dict[str, Any],
|
|
493
|
+
orchestrator_config: Optional[Dict[str, Any]] = None,
|
|
494
|
+
config_path: Optional[str] = None,
|
|
495
|
+
memory_session_id: Optional[str] = None,
|
|
496
|
+
) -> Dict[str, ConfigurableAgent]:
|
|
497
|
+
"""Create agents from configuration.
|
|
498
|
+
|
|
499
|
+
Args:
|
|
500
|
+
memory_session_id: Optional session ID to use for memory isolation.
|
|
501
|
+
If provided, overrides session_name from YAML config.
|
|
502
|
+
"""
|
|
493
503
|
agents = {}
|
|
494
504
|
|
|
495
505
|
agent_entries = [config["agent"]] if "agent" in config else config.get("agents", None)
|
|
@@ -497,6 +507,43 @@ def create_agents_from_config(config: Dict[str, Any], orchestrator_config: Optio
|
|
|
497
507
|
if not agent_entries:
|
|
498
508
|
raise ConfigurationError("Configuration must contain either 'agent' or 'agents' section")
|
|
499
509
|
|
|
510
|
+
# Create shared Qdrant client for all agents (avoids concurrent access errors)
|
|
511
|
+
# ONE client can be used by multiple mem0 instances safely
|
|
512
|
+
shared_qdrant_client = None
|
|
513
|
+
global_memory_config = config.get("memory", {})
|
|
514
|
+
if global_memory_config.get("enabled", False) and global_memory_config.get("persistent_memory", {}).get("enabled", False):
|
|
515
|
+
try:
|
|
516
|
+
from qdrant_client import QdrantClient
|
|
517
|
+
|
|
518
|
+
pm_config = global_memory_config.get("persistent_memory", {})
|
|
519
|
+
|
|
520
|
+
# Support both server mode and file-based mode
|
|
521
|
+
qdrant_config = pm_config.get("qdrant", {})
|
|
522
|
+
mode = qdrant_config.get("mode", "local") # "local" or "server"
|
|
523
|
+
|
|
524
|
+
if mode == "server":
|
|
525
|
+
# Server mode (RECOMMENDED for multi-agent)
|
|
526
|
+
host = qdrant_config.get("host", "localhost")
|
|
527
|
+
port = qdrant_config.get("port", 6333)
|
|
528
|
+
shared_qdrant_client = QdrantClient(host=host, port=port)
|
|
529
|
+
logger.info(f"🗄️ Shared Qdrant client created (server mode: {host}:{port})")
|
|
530
|
+
else:
|
|
531
|
+
# Local file-based mode (single agent only)
|
|
532
|
+
# WARNING: Does NOT support concurrent access by multiple agents
|
|
533
|
+
qdrant_path = pm_config.get("path", ".massgen/qdrant")
|
|
534
|
+
shared_qdrant_client = QdrantClient(path=qdrant_path)
|
|
535
|
+
logger.info(f"🗄️ Shared Qdrant client created (local mode: {qdrant_path})")
|
|
536
|
+
if len(agent_entries) > 1:
|
|
537
|
+
logger.warning(
|
|
538
|
+
"⚠️ Multi-agent setup detected with local Qdrant mode. "
|
|
539
|
+
"This may cause concurrent access errors. "
|
|
540
|
+
"Consider using server mode: set memory.persistent_memory.qdrant.mode='server'",
|
|
541
|
+
)
|
|
542
|
+
except Exception as e:
|
|
543
|
+
logger.warning(f"⚠️ Failed to create shared Qdrant client: {e}")
|
|
544
|
+
logger.warning(" Persistent memory will be disabled for all agents")
|
|
545
|
+
logger.warning(" For multi-agent setup, start Qdrant server: docker-compose -f docker-compose.qdrant.yml up -d")
|
|
546
|
+
|
|
500
547
|
for i, agent_data in enumerate(agent_entries, start=1):
|
|
501
548
|
backend_config = agent_data.get("backend", {})
|
|
502
549
|
|
|
@@ -579,7 +626,201 @@ def create_agents_from_config(config: Dict[str, Any], orchestrator_config: Optio
|
|
|
579
626
|
|
|
580
627
|
# Timeout configuration will be applied to orchestrator instead of individual agents
|
|
581
628
|
|
|
582
|
-
agent
|
|
629
|
+
# Merge global and per-agent memory configuration
|
|
630
|
+
global_memory_config = config.get("memory", {})
|
|
631
|
+
agent_memory_config = agent_data.get("memory", {})
|
|
632
|
+
|
|
633
|
+
# Deep merge: agent config overrides global config
|
|
634
|
+
def merge_configs(global_cfg, agent_cfg):
|
|
635
|
+
"""Recursively merge agent config into global config."""
|
|
636
|
+
merged = global_cfg.copy()
|
|
637
|
+
for key, value in agent_cfg.items():
|
|
638
|
+
if isinstance(value, dict) and key in merged and isinstance(merged[key], dict):
|
|
639
|
+
merged[key] = merge_configs(merged[key], value)
|
|
640
|
+
else:
|
|
641
|
+
merged[key] = value
|
|
642
|
+
return merged
|
|
643
|
+
|
|
644
|
+
memory_config = merge_configs(global_memory_config, agent_memory_config)
|
|
645
|
+
|
|
646
|
+
# Create context monitor if memory config is enabled
|
|
647
|
+
context_monitor = None
|
|
648
|
+
if memory_config.get("enabled", False):
|
|
649
|
+
from .memory._context_monitor import ContextWindowMonitor
|
|
650
|
+
|
|
651
|
+
compression_config = memory_config.get("compression", {})
|
|
652
|
+
trigger_threshold = compression_config.get("trigger_threshold", 0.75)
|
|
653
|
+
target_ratio = compression_config.get("target_ratio", 0.40)
|
|
654
|
+
|
|
655
|
+
# Get model name from backend config
|
|
656
|
+
model_name = backend_config.get("model", "unknown")
|
|
657
|
+
|
|
658
|
+
# Normalize provider name for monitor
|
|
659
|
+
provider_map = {
|
|
660
|
+
"openai": "openai",
|
|
661
|
+
"anthropic": "anthropic",
|
|
662
|
+
"claude": "anthropic",
|
|
663
|
+
"google": "google",
|
|
664
|
+
"gemini": "google",
|
|
665
|
+
}
|
|
666
|
+
provider = provider_map.get(backend_type_lower, backend_type_lower)
|
|
667
|
+
|
|
668
|
+
context_monitor = ContextWindowMonitor(
|
|
669
|
+
model_name=model_name,
|
|
670
|
+
provider=provider,
|
|
671
|
+
trigger_threshold=trigger_threshold,
|
|
672
|
+
target_ratio=target_ratio,
|
|
673
|
+
enabled=True,
|
|
674
|
+
)
|
|
675
|
+
logger.info(
|
|
676
|
+
f"📊 Context monitor created for {agent_config.agent_id}: " f"{context_monitor.context_window:,} tokens, " f"trigger={trigger_threshold*100:.0f}%, target={target_ratio*100:.0f}%",
|
|
677
|
+
)
|
|
678
|
+
|
|
679
|
+
# Create per-agent memory objects if memory is enabled
|
|
680
|
+
conversation_memory = None
|
|
681
|
+
persistent_memory = None
|
|
682
|
+
|
|
683
|
+
if memory_config.get("enabled", False):
|
|
684
|
+
from .memory import ConversationMemory
|
|
685
|
+
|
|
686
|
+
# Create conversation memory for this agent
|
|
687
|
+
if memory_config.get("conversation_memory", {}).get("enabled", True):
|
|
688
|
+
conversation_memory = ConversationMemory()
|
|
689
|
+
logger.info(f"💾 Conversation memory created for {agent_config.agent_id}")
|
|
690
|
+
|
|
691
|
+
# Create persistent memory for this agent (if enabled)
|
|
692
|
+
if memory_config.get("persistent_memory", {}).get("enabled", False):
|
|
693
|
+
from .memory import PersistentMemory
|
|
694
|
+
|
|
695
|
+
pm_config = memory_config.get("persistent_memory", {})
|
|
696
|
+
|
|
697
|
+
# Get persistent memory configuration
|
|
698
|
+
agent_name = pm_config.get("agent_name", agent_config.agent_id)
|
|
699
|
+
|
|
700
|
+
# Use unified session: memory_session_id (from CLI) > YAML session_name > None
|
|
701
|
+
session_name = memory_session_id or pm_config.get("session_name")
|
|
702
|
+
|
|
703
|
+
on_disk = pm_config.get("on_disk", True)
|
|
704
|
+
qdrant_path = pm_config.get("path", ".massgen/qdrant") # Project dir, not /tmp
|
|
705
|
+
|
|
706
|
+
try:
|
|
707
|
+
# Configure LLM for memory operations (fact extraction)
|
|
708
|
+
# RECOMMENDED: Use mem0's native LLMs (no adapter overhead, no async complexity)
|
|
709
|
+
llm_cfg = pm_config.get("llm", {})
|
|
710
|
+
|
|
711
|
+
if not llm_cfg:
|
|
712
|
+
# Default: gpt-4.1-nano-2025-04-14 (mem0's default, fast and cheap for memory ops)
|
|
713
|
+
llm_cfg = {
|
|
714
|
+
"provider": "openai",
|
|
715
|
+
"model": "gpt-4.1-nano-2025-04-14",
|
|
716
|
+
}
|
|
717
|
+
|
|
718
|
+
# Add API key if not specified
|
|
719
|
+
if "api_key" not in llm_cfg:
|
|
720
|
+
llm_provider = llm_cfg.get("provider", "openai")
|
|
721
|
+
if llm_provider == "openai":
|
|
722
|
+
llm_cfg["api_key"] = os.getenv("OPENAI_API_KEY")
|
|
723
|
+
elif llm_provider == "anthropic":
|
|
724
|
+
llm_cfg["api_key"] = os.getenv("ANTHROPIC_API_KEY")
|
|
725
|
+
elif llm_provider == "groq":
|
|
726
|
+
llm_cfg["api_key"] = os.getenv("GROQ_API_KEY")
|
|
727
|
+
# Add more providers as needed
|
|
728
|
+
|
|
729
|
+
# Configure embedding for persistent memory
|
|
730
|
+
# RECOMMENDED: Use mem0's native embedders (no adapter overhead)
|
|
731
|
+
embedding_cfg = pm_config.get("embedding", {})
|
|
732
|
+
|
|
733
|
+
if not embedding_cfg:
|
|
734
|
+
# Default: OpenAI text-embedding-3-small
|
|
735
|
+
embedding_cfg = {
|
|
736
|
+
"provider": "openai",
|
|
737
|
+
"model": "text-embedding-3-small",
|
|
738
|
+
}
|
|
739
|
+
|
|
740
|
+
# Add API key if not specified
|
|
741
|
+
if "api_key" not in embedding_cfg:
|
|
742
|
+
emb_provider = embedding_cfg.get("provider", "openai")
|
|
743
|
+
if emb_provider == "openai":
|
|
744
|
+
api_key = os.getenv("OPENAI_API_KEY")
|
|
745
|
+
if not api_key:
|
|
746
|
+
logger.warning("⚠️ OPENAI_API_KEY not found in environment - embedding will fail!")
|
|
747
|
+
else:
|
|
748
|
+
logger.debug(f"✅ Using OPENAI_API_KEY from environment (key starts with: {api_key[:7]}...)")
|
|
749
|
+
embedding_cfg["api_key"] = api_key
|
|
750
|
+
elif emb_provider == "together":
|
|
751
|
+
embedding_cfg["api_key"] = os.getenv("TOGETHER_API_KEY")
|
|
752
|
+
elif emb_provider == "azure_openai":
|
|
753
|
+
embedding_cfg["api_key"] = os.getenv("AZURE_OPENAI_API_KEY")
|
|
754
|
+
# Add more providers as needed
|
|
755
|
+
|
|
756
|
+
# Use shared Qdrant client if available
|
|
757
|
+
if shared_qdrant_client:
|
|
758
|
+
persistent_memory = PersistentMemory(
|
|
759
|
+
agent_name=agent_name,
|
|
760
|
+
session_name=session_name,
|
|
761
|
+
llm_config=llm_cfg, # Use native mem0 LLM
|
|
762
|
+
embedding_config=embedding_cfg, # Use native mem0 embedder
|
|
763
|
+
qdrant_client=shared_qdrant_client, # Share ONE client from server
|
|
764
|
+
on_disk=on_disk,
|
|
765
|
+
)
|
|
766
|
+
logger.info(
|
|
767
|
+
f"💾 Persistent memory created for {agent_config.agent_id} "
|
|
768
|
+
f"(agent_name={agent_name}, session={session_name or 'cross-session'}, "
|
|
769
|
+
f"llm={llm_cfg.get('provider')}/{llm_cfg.get('model')}, "
|
|
770
|
+
f"embedder={embedding_cfg.get('provider')}/{embedding_cfg.get('model')}, shared_qdrant=True)",
|
|
771
|
+
)
|
|
772
|
+
else:
|
|
773
|
+
# Fallback: create individual vector store (for backward compatibility)
|
|
774
|
+
# WARNING: File-based Qdrant doesn't support concurrent access
|
|
775
|
+
from mem0.vector_stores.configs import VectorStoreConfig
|
|
776
|
+
|
|
777
|
+
vector_store_config = VectorStoreConfig(
|
|
778
|
+
config={
|
|
779
|
+
"on_disk": on_disk,
|
|
780
|
+
"path": qdrant_path,
|
|
781
|
+
},
|
|
782
|
+
)
|
|
783
|
+
|
|
784
|
+
persistent_memory = PersistentMemory(
|
|
785
|
+
agent_name=agent_name,
|
|
786
|
+
session_name=session_name,
|
|
787
|
+
llm_config=llm_cfg, # Use native mem0 LLM
|
|
788
|
+
embedding_config=embedding_cfg, # Use native mem0 embedder
|
|
789
|
+
vector_store_config=vector_store_config,
|
|
790
|
+
on_disk=on_disk,
|
|
791
|
+
)
|
|
792
|
+
logger.info(
|
|
793
|
+
f"💾 Persistent memory created for {agent_config.agent_id} "
|
|
794
|
+
f"(agent_name={agent_name}, session={session_name or 'cross-session'}, "
|
|
795
|
+
f"llm={llm_cfg.get('provider')}/{llm_cfg.get('model')}, "
|
|
796
|
+
f"embedder={embedding_cfg.get('provider')}/{embedding_cfg.get('model')}, path={qdrant_path})",
|
|
797
|
+
)
|
|
798
|
+
except Exception as e:
|
|
799
|
+
logger.warning(
|
|
800
|
+
f"⚠️ Failed to create persistent memory for {agent_config.agent_id}: {e}",
|
|
801
|
+
)
|
|
802
|
+
persistent_memory = None
|
|
803
|
+
|
|
804
|
+
# Create agent
|
|
805
|
+
agent = ConfigurableAgent(
|
|
806
|
+
config=agent_config,
|
|
807
|
+
backend=backend,
|
|
808
|
+
conversation_memory=conversation_memory,
|
|
809
|
+
persistent_memory=persistent_memory,
|
|
810
|
+
context_monitor=context_monitor,
|
|
811
|
+
)
|
|
812
|
+
|
|
813
|
+
# Configure retrieval settings from YAML (if memory is enabled)
|
|
814
|
+
if memory_config.get("enabled", False):
|
|
815
|
+
retrieval_config = memory_config.get("retrieval", {})
|
|
816
|
+
agent._retrieval_limit = retrieval_config.get("limit", 5)
|
|
817
|
+
agent._retrieval_exclude_recent = retrieval_config.get("exclude_recent", True)
|
|
818
|
+
|
|
819
|
+
if retrieval_config: # Only log if custom config provided
|
|
820
|
+
logger.info(
|
|
821
|
+
f"🔧 Retrieval configured for {agent_config.agent_id}: " f"limit={agent._retrieval_limit}, exclude_recent={agent._retrieval_exclude_recent}",
|
|
822
|
+
)
|
|
823
|
+
|
|
583
824
|
agents[agent.config.agent_id] = agent
|
|
584
825
|
|
|
585
826
|
return agents
|
|
@@ -696,21 +937,25 @@ def relocate_filesystem_paths(config: Dict[str, Any]) -> None:
|
|
|
696
937
|
backend_config["cwd"] = str(massgen_dir / "workspaces" / user_cwd)
|
|
697
938
|
|
|
698
939
|
|
|
699
|
-
def load_previous_turns(session_info: Dict[str, Any], session_storage: str) -> List[Dict[str, Any]]:
|
|
940
|
+
def load_previous_turns(session_info: Dict[str, Any], session_storage: str) -> tuple[List[Dict[str, Any]], List[Dict[str, Any]]]:
|
|
700
941
|
"""
|
|
701
|
-
Load previous turns from session storage.
|
|
942
|
+
Load previous turns and winning agents history from session storage.
|
|
702
943
|
|
|
703
944
|
Returns:
|
|
704
|
-
|
|
945
|
+
tuple: (previous_turns, winning_agents_history)
|
|
946
|
+
- previous_turns: List of previous turn metadata dicts
|
|
947
|
+
- winning_agents_history: List of winning agents for memory sharing
|
|
948
|
+
Format: [{"agent_id": "agent_b", "turn": 1}, ...]
|
|
705
949
|
"""
|
|
706
950
|
session_id = session_info.get("session_id")
|
|
707
951
|
if not session_id:
|
|
708
|
-
return []
|
|
952
|
+
return [], []
|
|
709
953
|
|
|
710
954
|
session_dir = Path(session_storage) / session_id
|
|
711
955
|
if not session_dir.exists():
|
|
712
|
-
return []
|
|
956
|
+
return [], []
|
|
713
957
|
|
|
958
|
+
# Load previous turns
|
|
714
959
|
previous_turns = []
|
|
715
960
|
turn_num = 1
|
|
716
961
|
|
|
@@ -735,7 +980,17 @@ def load_previous_turns(session_info: Dict[str, Any], session_storage: str) -> L
|
|
|
735
980
|
|
|
736
981
|
turn_num += 1
|
|
737
982
|
|
|
738
|
-
|
|
983
|
+
# Load winning agents history for memory sharing across turns
|
|
984
|
+
winning_agents_history = []
|
|
985
|
+
winning_agents_file = session_dir / "winning_agents_history.json"
|
|
986
|
+
if winning_agents_file.exists():
|
|
987
|
+
try:
|
|
988
|
+
winning_agents_history = json.loads(winning_agents_file.read_text(encoding="utf-8"))
|
|
989
|
+
logger.info(f"📚 Loaded {len(winning_agents_history)} winning agent(s) from session storage: {winning_agents_history}")
|
|
990
|
+
except Exception as e:
|
|
991
|
+
logger.warning(f"⚠️ Failed to load winning agents history: {e}")
|
|
992
|
+
|
|
993
|
+
return previous_turns, winning_agents_history
|
|
739
994
|
|
|
740
995
|
|
|
741
996
|
async def handle_session_persistence(
|
|
@@ -795,6 +1050,16 @@ async def handle_session_persistence(
|
|
|
795
1050
|
metadata_file = turn_dir / "metadata.json"
|
|
796
1051
|
metadata_file.write_text(json.dumps(metadata, indent=2), encoding="utf-8")
|
|
797
1052
|
|
|
1053
|
+
# Save winning agents history for memory sharing across turns
|
|
1054
|
+
# This allows the orchestrator to restore winner tracking when recreated
|
|
1055
|
+
if final_result.get("winning_agents_history"):
|
|
1056
|
+
winning_agents_file = session_dir / "winning_agents_history.json"
|
|
1057
|
+
winning_agents_file.write_text(
|
|
1058
|
+
json.dumps(final_result["winning_agents_history"], indent=2),
|
|
1059
|
+
encoding="utf-8",
|
|
1060
|
+
)
|
|
1061
|
+
logger.info(f"📚 Saved {len(final_result['winning_agents_history'])} winning agent(s) to session storage")
|
|
1062
|
+
|
|
798
1063
|
# Create/update session summary for easy viewing
|
|
799
1064
|
session_summary_file = session_dir / "SESSION_SUMMARY.txt"
|
|
800
1065
|
summary_lines = []
|
|
@@ -896,8 +1161,8 @@ async def run_question_with_history(
|
|
|
896
1161
|
max_orchestration_restarts=coord_cfg.get("max_orchestration_restarts", 0),
|
|
897
1162
|
)
|
|
898
1163
|
|
|
899
|
-
# Load previous turns from session storage for multi-turn conversations
|
|
900
|
-
previous_turns = load_previous_turns(session_info, session_storage)
|
|
1164
|
+
# Load previous turns and winning agents history from session storage for multi-turn conversations
|
|
1165
|
+
previous_turns, winning_agents_history = load_previous_turns(session_info, session_storage)
|
|
901
1166
|
|
|
902
1167
|
orchestrator = Orchestrator(
|
|
903
1168
|
agents=agents,
|
|
@@ -905,6 +1170,7 @@ async def run_question_with_history(
|
|
|
905
1170
|
snapshot_storage=snapshot_storage,
|
|
906
1171
|
agent_temporary_workspace=agent_temporary_workspace,
|
|
907
1172
|
previous_turns=previous_turns,
|
|
1173
|
+
winning_agents_history=winning_agents_history, # Restore for memory sharing
|
|
908
1174
|
)
|
|
909
1175
|
# Create a fresh UI instance for each question to ensure clean state
|
|
910
1176
|
ui = CoordinationUI(
|
|
@@ -1883,6 +2149,7 @@ async def run_interactive_mode(
|
|
|
1883
2149
|
original_config: Dict[str, Any] = None,
|
|
1884
2150
|
orchestrator_cfg: Dict[str, Any] = None,
|
|
1885
2151
|
config_path: Optional[str] = None,
|
|
2152
|
+
memory_session_id: Optional[str] = None,
|
|
1886
2153
|
**kwargs,
|
|
1887
2154
|
):
|
|
1888
2155
|
"""Run MassGen in interactive mode with conversation history."""
|
|
@@ -1971,8 +2238,13 @@ async def run_interactive_mode(
|
|
|
1971
2238
|
if original_config and orchestrator_cfg:
|
|
1972
2239
|
config_modified = prompt_for_context_paths(original_config, orchestrator_cfg)
|
|
1973
2240
|
if config_modified:
|
|
1974
|
-
# Recreate agents with updated context paths
|
|
1975
|
-
agents = create_agents_from_config(
|
|
2241
|
+
# Recreate agents with updated context paths (use same session)
|
|
2242
|
+
agents = create_agents_from_config(
|
|
2243
|
+
original_config,
|
|
2244
|
+
orchestrator_cfg,
|
|
2245
|
+
config_path=config_path,
|
|
2246
|
+
memory_session_id=memory_session_id,
|
|
2247
|
+
)
|
|
1976
2248
|
print(f" {BRIGHT_GREEN}✓ Agents reloaded with updated context paths{RESET}", flush=True)
|
|
1977
2249
|
print()
|
|
1978
2250
|
|
|
@@ -1982,7 +2254,8 @@ async def run_interactive_mode(
|
|
|
1982
2254
|
conversation_history = []
|
|
1983
2255
|
|
|
1984
2256
|
# Session management for multi-turn filesystem support
|
|
1985
|
-
|
|
2257
|
+
# Use memory_session_id (unified with memory system) if provided, otherwise create later
|
|
2258
|
+
session_id = memory_session_id
|
|
1986
2259
|
current_turn = 0
|
|
1987
2260
|
session_storage = kwargs.get("orchestrator", {}).get("session_storage", "sessions")
|
|
1988
2261
|
|
|
@@ -2029,8 +2302,13 @@ async def run_interactive_mode(
|
|
|
2029
2302
|
new_turn_config = {"path": str(latest_turn_workspace.resolve()), "permission": "read"}
|
|
2030
2303
|
backend_config["context_paths"] = existing_context_paths + [new_turn_config]
|
|
2031
2304
|
|
|
2032
|
-
# Recreate agents from modified config
|
|
2033
|
-
agents = create_agents_from_config(
|
|
2305
|
+
# Recreate agents from modified config (use same session)
|
|
2306
|
+
agents = create_agents_from_config(
|
|
2307
|
+
modified_config,
|
|
2308
|
+
orchestrator_cfg,
|
|
2309
|
+
config_path=config_path,
|
|
2310
|
+
memory_session_id=session_id,
|
|
2311
|
+
)
|
|
2034
2312
|
logger.info(f"[CLI] Successfully recreated {len(agents)} agents with turn {current_turn} path as read-only context")
|
|
2035
2313
|
|
|
2036
2314
|
question = input(f"\n{BRIGHT_BLUE}👤 User:{RESET} ").strip()
|
|
@@ -2234,6 +2512,27 @@ async def main(args):
|
|
|
2234
2512
|
if args.debug:
|
|
2235
2513
|
logger.debug(f"Resolved config path: {resolved_path}")
|
|
2236
2514
|
logger.debug(f"Config content: {json.dumps(config, indent=2)}")
|
|
2515
|
+
|
|
2516
|
+
# Automatic config validation (unless --skip-validation flag is set)
|
|
2517
|
+
if not args.skip_validation:
|
|
2518
|
+
from .config_validator import ConfigValidator
|
|
2519
|
+
|
|
2520
|
+
validator = ConfigValidator()
|
|
2521
|
+
validation_result = validator.validate_config(config)
|
|
2522
|
+
|
|
2523
|
+
# Show errors if any
|
|
2524
|
+
if validation_result.has_errors():
|
|
2525
|
+
print(validation_result.format_errors(), file=sys.stderr)
|
|
2526
|
+
print(f"\n{BRIGHT_RED}❌ Config validation failed. Fix errors above or use --skip-validation to bypass.{RESET}\n")
|
|
2527
|
+
sys.exit(1)
|
|
2528
|
+
|
|
2529
|
+
# Show warnings (non-blocking unless --strict-validation)
|
|
2530
|
+
if validation_result.has_warnings():
|
|
2531
|
+
print(validation_result.format_warnings())
|
|
2532
|
+
if args.strict_validation:
|
|
2533
|
+
print(f"\n{BRIGHT_RED}❌ Config validation failed in strict mode (warnings treated as errors).{RESET}\n")
|
|
2534
|
+
sys.exit(1)
|
|
2535
|
+
print() # Extra newline for readability
|
|
2237
2536
|
else:
|
|
2238
2537
|
model = args.model
|
|
2239
2538
|
if args.backend:
|
|
@@ -2322,7 +2621,28 @@ async def main(args):
|
|
|
2322
2621
|
' agent_temporary_workspace: "your_temp_dir" # Directory for temporary agent workspaces',
|
|
2323
2622
|
)
|
|
2324
2623
|
|
|
2325
|
-
|
|
2624
|
+
# Create unified session ID for memory system (before creating agents)
|
|
2625
|
+
# This ensures memory is isolated per session and unifies orchestrator + memory sessions
|
|
2626
|
+
memory_session_id = None
|
|
2627
|
+
if args.question:
|
|
2628
|
+
# Single question mode: Create temp session per run
|
|
2629
|
+
from datetime import datetime
|
|
2630
|
+
|
|
2631
|
+
memory_session_id = f"temp_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
|
2632
|
+
logger.info(f"📝 Created temp session for single-question mode: {memory_session_id}")
|
|
2633
|
+
else:
|
|
2634
|
+
# Interactive mode: Create session now (will be reused by orchestrator)
|
|
2635
|
+
from datetime import datetime
|
|
2636
|
+
|
|
2637
|
+
memory_session_id = f"session_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
|
2638
|
+
logger.info(f"📝 Created session for interactive mode: {memory_session_id}")
|
|
2639
|
+
|
|
2640
|
+
agents = create_agents_from_config(
|
|
2641
|
+
config,
|
|
2642
|
+
orchestrator_cfg,
|
|
2643
|
+
config_path=str(resolved_path) if resolved_path else None,
|
|
2644
|
+
memory_session_id=memory_session_id,
|
|
2645
|
+
)
|
|
2326
2646
|
|
|
2327
2647
|
if not agents:
|
|
2328
2648
|
raise ConfigurationError("No agents configured")
|
|
@@ -2358,9 +2678,17 @@ async def main(args):
|
|
|
2358
2678
|
# print(f"\n{BRIGHT_GREEN}Final Response:{RESET}", flush=True)
|
|
2359
2679
|
# print(f"{response}", flush=True)
|
|
2360
2680
|
else:
|
|
2361
|
-
# Pass the config path to interactive mode
|
|
2681
|
+
# Pass the config path and session_id to interactive mode
|
|
2362
2682
|
config_file_path = str(resolved_path) if args.config and resolved_path else None
|
|
2363
|
-
await run_interactive_mode(
|
|
2683
|
+
await run_interactive_mode(
|
|
2684
|
+
agents,
|
|
2685
|
+
ui_config,
|
|
2686
|
+
original_config=config,
|
|
2687
|
+
orchestrator_cfg=orchestrator_cfg,
|
|
2688
|
+
config_path=config_file_path,
|
|
2689
|
+
memory_session_id=memory_session_id,
|
|
2690
|
+
**kwargs,
|
|
2691
|
+
)
|
|
2364
2692
|
finally:
|
|
2365
2693
|
# Cleanup all agents' filesystem managers (including Docker containers)
|
|
2366
2694
|
for agent_id, agent in agents.items():
|
|
@@ -2518,6 +2846,33 @@ Environment Variables:
|
|
|
2518
2846
|
action="store_true",
|
|
2519
2847
|
help="Include example configurations in schema display",
|
|
2520
2848
|
)
|
|
2849
|
+
parser.add_argument(
|
|
2850
|
+
"--validate",
|
|
2851
|
+
type=str,
|
|
2852
|
+
metavar="CONFIG_FILE",
|
|
2853
|
+
help="Validate a configuration file without running it",
|
|
2854
|
+
)
|
|
2855
|
+
parser.add_argument(
|
|
2856
|
+
"--strict",
|
|
2857
|
+
action="store_true",
|
|
2858
|
+
help="Treat warnings as errors during validation (use with --validate)",
|
|
2859
|
+
)
|
|
2860
|
+
parser.add_argument(
|
|
2861
|
+
"--json",
|
|
2862
|
+
dest="json_output",
|
|
2863
|
+
action="store_true",
|
|
2864
|
+
help="Output validation results in JSON format (use with --validate)",
|
|
2865
|
+
)
|
|
2866
|
+
parser.add_argument(
|
|
2867
|
+
"--skip-validation",
|
|
2868
|
+
action="store_true",
|
|
2869
|
+
help="Skip automatic config validation when loading config files",
|
|
2870
|
+
)
|
|
2871
|
+
parser.add_argument(
|
|
2872
|
+
"--strict-validation",
|
|
2873
|
+
action="store_true",
|
|
2874
|
+
help="Treat config warnings as errors and abort execution",
|
|
2875
|
+
)
|
|
2521
2876
|
|
|
2522
2877
|
# Timeout options
|
|
2523
2878
|
timeout_group = parser.add_argument_group("timeout settings", "Override timeout settings from config")
|
|
@@ -2529,14 +2884,26 @@ Environment Variables:
|
|
|
2529
2884
|
|
|
2530
2885
|
args = parser.parse_args()
|
|
2531
2886
|
|
|
2532
|
-
#
|
|
2533
|
-
|
|
2887
|
+
# Handle special commands first (before logging setup to avoid creating log dirs)
|
|
2888
|
+
if args.validate:
|
|
2889
|
+
from .config_validator import ConfigValidator
|
|
2534
2890
|
|
|
2535
|
-
|
|
2536
|
-
|
|
2537
|
-
|
|
2891
|
+
validator = ConfigValidator()
|
|
2892
|
+
result = validator.validate_config_file(args.validate)
|
|
2893
|
+
|
|
2894
|
+
# Output results
|
|
2895
|
+
if args.json_output:
|
|
2896
|
+
# JSON output for machine parsing
|
|
2897
|
+
print(json.dumps(result.to_dict(), indent=2))
|
|
2898
|
+
else:
|
|
2899
|
+
# Human-readable output
|
|
2900
|
+
print(result.format_all())
|
|
2901
|
+
|
|
2902
|
+
# Exit with appropriate code
|
|
2903
|
+
if not result.is_valid() or (args.strict and result.has_warnings()):
|
|
2904
|
+
sys.exit(1)
|
|
2905
|
+
sys.exit(0)
|
|
2538
2906
|
|
|
2539
|
-
# Handle special commands first
|
|
2540
2907
|
if args.list_examples:
|
|
2541
2908
|
show_available_examples()
|
|
2542
2909
|
return
|
|
@@ -2551,6 +2918,13 @@ Environment Variables:
|
|
|
2551
2918
|
show_schema(backend=args.schema_backend, show_examples=args.with_examples)
|
|
2552
2919
|
return
|
|
2553
2920
|
|
|
2921
|
+
# Setup logging for all other commands (actual execution, setup, init, etc.)
|
|
2922
|
+
setup_logging(debug=args.debug)
|
|
2923
|
+
|
|
2924
|
+
if args.debug:
|
|
2925
|
+
logger.info("Debug mode enabled")
|
|
2926
|
+
logger.debug(f"Command line arguments: {vars(args)}")
|
|
2927
|
+
|
|
2554
2928
|
# Launch interactive API key setup if requested
|
|
2555
2929
|
if args.setup:
|
|
2556
2930
|
builder = ConfigBuilder()
|