massgen 0.0.3__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of massgen might be problematic. Click here for more details.
- massgen/__init__.py +142 -8
- massgen/adapters/__init__.py +29 -0
- massgen/adapters/ag2_adapter.py +483 -0
- massgen/adapters/base.py +183 -0
- massgen/adapters/tests/__init__.py +0 -0
- massgen/adapters/tests/test_ag2_adapter.py +439 -0
- massgen/adapters/tests/test_agent_adapter.py +128 -0
- massgen/adapters/utils/__init__.py +2 -0
- massgen/adapters/utils/ag2_utils.py +236 -0
- massgen/adapters/utils/tests/__init__.py +0 -0
- massgen/adapters/utils/tests/test_ag2_utils.py +138 -0
- massgen/agent_config.py +329 -55
- massgen/api_params_handler/__init__.py +10 -0
- massgen/api_params_handler/_api_params_handler_base.py +99 -0
- massgen/api_params_handler/_chat_completions_api_params_handler.py +176 -0
- massgen/api_params_handler/_claude_api_params_handler.py +113 -0
- massgen/api_params_handler/_response_api_params_handler.py +130 -0
- massgen/backend/__init__.py +39 -4
- massgen/backend/azure_openai.py +385 -0
- massgen/backend/base.py +341 -69
- massgen/backend/base_with_mcp.py +1102 -0
- massgen/backend/capabilities.py +386 -0
- massgen/backend/chat_completions.py +577 -130
- massgen/backend/claude.py +1033 -537
- massgen/backend/claude_code.py +1203 -0
- massgen/backend/cli_base.py +209 -0
- massgen/backend/docs/BACKEND_ARCHITECTURE.md +126 -0
- massgen/backend/{CLAUDE_API_RESEARCH.md → docs/CLAUDE_API_RESEARCH.md} +18 -18
- massgen/backend/{GEMINI_API_DOCUMENTATION.md → docs/GEMINI_API_DOCUMENTATION.md} +9 -9
- massgen/backend/docs/Gemini MCP Integration Analysis.md +1050 -0
- massgen/backend/docs/MCP_IMPLEMENTATION_CLAUDE_BACKEND.md +177 -0
- massgen/backend/docs/MCP_INTEGRATION_RESPONSE_BACKEND.md +352 -0
- massgen/backend/docs/OPENAI_GPT5_MODELS.md +211 -0
- massgen/backend/{OPENAI_RESPONSES_API_FORMAT.md → docs/OPENAI_RESPONSE_API_TOOL_CALLS.md} +3 -3
- massgen/backend/docs/OPENAI_response_streaming.md +20654 -0
- massgen/backend/docs/inference_backend.md +257 -0
- massgen/backend/docs/permissions_and_context_files.md +1085 -0
- massgen/backend/external.py +126 -0
- massgen/backend/gemini.py +1850 -241
- massgen/backend/grok.py +40 -156
- massgen/backend/inference.py +156 -0
- massgen/backend/lmstudio.py +171 -0
- massgen/backend/response.py +1095 -322
- massgen/chat_agent.py +131 -113
- massgen/cli.py +1560 -275
- massgen/config_builder.py +2396 -0
- massgen/configs/BACKEND_CONFIGURATION.md +458 -0
- massgen/configs/README.md +559 -216
- massgen/configs/ag2/ag2_case_study.yaml +27 -0
- massgen/configs/ag2/ag2_coder.yaml +34 -0
- massgen/configs/ag2/ag2_coder_case_study.yaml +36 -0
- massgen/configs/ag2/ag2_gemini.yaml +27 -0
- massgen/configs/ag2/ag2_groupchat.yaml +108 -0
- massgen/configs/ag2/ag2_groupchat_gpt.yaml +118 -0
- massgen/configs/ag2/ag2_single_agent.yaml +21 -0
- massgen/configs/basic/multi/fast_timeout_example.yaml +37 -0
- massgen/configs/basic/multi/gemini_4o_claude.yaml +31 -0
- massgen/configs/basic/multi/gemini_gpt5nano_claude.yaml +36 -0
- massgen/configs/{gemini_4o_claude.yaml → basic/multi/geminicode_4o_claude.yaml} +3 -3
- massgen/configs/basic/multi/geminicode_gpt5nano_claude.yaml +36 -0
- massgen/configs/basic/multi/glm_gemini_claude.yaml +25 -0
- massgen/configs/basic/multi/gpt4o_audio_generation.yaml +30 -0
- massgen/configs/basic/multi/gpt4o_image_generation.yaml +31 -0
- massgen/configs/basic/multi/gpt5nano_glm_qwen.yaml +26 -0
- massgen/configs/basic/multi/gpt5nano_image_understanding.yaml +26 -0
- massgen/configs/{three_agents_default.yaml → basic/multi/three_agents_default.yaml} +8 -4
- massgen/configs/basic/multi/three_agents_opensource.yaml +27 -0
- massgen/configs/basic/multi/three_agents_vllm.yaml +20 -0
- massgen/configs/basic/multi/two_agents_gemini.yaml +19 -0
- massgen/configs/{two_agents.yaml → basic/multi/two_agents_gpt5.yaml} +14 -6
- massgen/configs/basic/multi/two_agents_opensource_lmstudio.yaml +31 -0
- massgen/configs/basic/multi/two_qwen_vllm_sglang.yaml +28 -0
- massgen/configs/{single_agent.yaml → basic/single/single_agent.yaml} +1 -1
- massgen/configs/{single_flash2.5.yaml → basic/single/single_flash2.5.yaml} +1 -2
- massgen/configs/basic/single/single_gemini2.5pro.yaml +16 -0
- massgen/configs/basic/single/single_gpt4o_audio_generation.yaml +22 -0
- massgen/configs/basic/single/single_gpt4o_image_generation.yaml +22 -0
- massgen/configs/basic/single/single_gpt4o_video_generation.yaml +24 -0
- massgen/configs/basic/single/single_gpt5nano.yaml +20 -0
- massgen/configs/basic/single/single_gpt5nano_file_search.yaml +18 -0
- massgen/configs/basic/single/single_gpt5nano_image_understanding.yaml +17 -0
- massgen/configs/basic/single/single_gptoss120b.yaml +15 -0
- massgen/configs/basic/single/single_openrouter_audio_understanding.yaml +15 -0
- massgen/configs/basic/single/single_qwen_video_understanding.yaml +15 -0
- massgen/configs/debug/code_execution/command_filtering_blacklist.yaml +29 -0
- massgen/configs/debug/code_execution/command_filtering_whitelist.yaml +28 -0
- massgen/configs/debug/code_execution/docker_verification.yaml +29 -0
- massgen/configs/debug/skip_coordination_test.yaml +27 -0
- massgen/configs/debug/test_sdk_migration.yaml +17 -0
- massgen/configs/docs/DISCORD_MCP_SETUP.md +208 -0
- massgen/configs/docs/TWITTER_MCP_ENESCINAR_SETUP.md +82 -0
- massgen/configs/providers/azure/azure_openai_multi.yaml +21 -0
- massgen/configs/providers/azure/azure_openai_single.yaml +19 -0
- massgen/configs/providers/claude/claude.yaml +14 -0
- massgen/configs/providers/gemini/gemini_gpt5nano.yaml +28 -0
- massgen/configs/providers/local/lmstudio.yaml +11 -0
- massgen/configs/providers/openai/gpt5.yaml +46 -0
- massgen/configs/providers/openai/gpt5_nano.yaml +46 -0
- massgen/configs/providers/others/grok_single_agent.yaml +19 -0
- massgen/configs/providers/others/zai_coding_team.yaml +108 -0
- massgen/configs/providers/others/zai_glm45.yaml +12 -0
- massgen/configs/{creative_team.yaml → teams/creative/creative_team.yaml} +16 -6
- massgen/configs/{travel_planning.yaml → teams/creative/travel_planning.yaml} +16 -6
- massgen/configs/{news_analysis.yaml → teams/research/news_analysis.yaml} +16 -6
- massgen/configs/{research_team.yaml → teams/research/research_team.yaml} +15 -7
- massgen/configs/{technical_analysis.yaml → teams/research/technical_analysis.yaml} +16 -6
- massgen/configs/tools/code-execution/basic_command_execution.yaml +25 -0
- massgen/configs/tools/code-execution/code_execution_use_case_simple.yaml +41 -0
- massgen/configs/tools/code-execution/docker_claude_code.yaml +32 -0
- massgen/configs/tools/code-execution/docker_multi_agent.yaml +32 -0
- massgen/configs/tools/code-execution/docker_simple.yaml +29 -0
- massgen/configs/tools/code-execution/docker_with_resource_limits.yaml +32 -0
- massgen/configs/tools/code-execution/multi_agent_playwright_automation.yaml +57 -0
- massgen/configs/tools/filesystem/cc_gpt5_gemini_filesystem.yaml +34 -0
- massgen/configs/tools/filesystem/claude_code_context_sharing.yaml +68 -0
- massgen/configs/tools/filesystem/claude_code_flash2.5.yaml +43 -0
- massgen/configs/tools/filesystem/claude_code_flash2.5_gptoss.yaml +49 -0
- massgen/configs/tools/filesystem/claude_code_gpt5nano.yaml +31 -0
- massgen/configs/tools/filesystem/claude_code_single.yaml +40 -0
- massgen/configs/tools/filesystem/fs_permissions_test.yaml +87 -0
- massgen/configs/tools/filesystem/gemini_gemini_workspace_cleanup.yaml +54 -0
- massgen/configs/tools/filesystem/gemini_gpt5_filesystem_casestudy.yaml +30 -0
- massgen/configs/tools/filesystem/gemini_gpt5nano_file_context_path.yaml +43 -0
- massgen/configs/tools/filesystem/gemini_gpt5nano_protected_paths.yaml +45 -0
- massgen/configs/tools/filesystem/gpt5mini_cc_fs_context_path.yaml +31 -0
- massgen/configs/tools/filesystem/grok4_gpt5_gemini_filesystem.yaml +32 -0
- massgen/configs/tools/filesystem/multiturn/grok4_gpt5_claude_code_filesystem_multiturn.yaml +58 -0
- massgen/configs/tools/filesystem/multiturn/grok4_gpt5_gemini_filesystem_multiturn.yaml +58 -0
- massgen/configs/tools/filesystem/multiturn/two_claude_code_filesystem_multiturn.yaml +47 -0
- massgen/configs/tools/filesystem/multiturn/two_gemini_flash_filesystem_multiturn.yaml +48 -0
- massgen/configs/tools/mcp/claude_code_discord_mcp_example.yaml +27 -0
- massgen/configs/tools/mcp/claude_code_simple_mcp.yaml +35 -0
- massgen/configs/tools/mcp/claude_code_twitter_mcp_example.yaml +32 -0
- massgen/configs/tools/mcp/claude_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/claude_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/five_agents_travel_mcp_test.yaml +157 -0
- massgen/configs/tools/mcp/five_agents_weather_mcp_test.yaml +103 -0
- massgen/configs/tools/mcp/gemini_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test.yaml +23 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_sharing.yaml +23 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_single_agent.yaml +17 -0
- massgen/configs/tools/mcp/gemini_mcp_filesystem_test_with_claude_code.yaml +24 -0
- massgen/configs/tools/mcp/gemini_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/gemini_notion_mcp.yaml +52 -0
- massgen/configs/tools/mcp/gpt5_nano_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/gpt5_nano_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/gpt5mini_claude_code_discord_mcp_example.yaml +38 -0
- massgen/configs/tools/mcp/gpt_oss_mcp_example.yaml +25 -0
- massgen/configs/tools/mcp/gpt_oss_mcp_test.yaml +28 -0
- massgen/configs/tools/mcp/grok3_mini_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/grok3_mini_mcp_test.yaml +27 -0
- massgen/configs/tools/mcp/multimcp_gemini.yaml +111 -0
- massgen/configs/tools/mcp/qwen_api_mcp_example.yaml +25 -0
- massgen/configs/tools/mcp/qwen_api_mcp_test.yaml +28 -0
- massgen/configs/tools/mcp/qwen_local_mcp_example.yaml +24 -0
- massgen/configs/tools/mcp/qwen_local_mcp_test.yaml +27 -0
- massgen/configs/tools/planning/five_agents_discord_mcp_planning_mode.yaml +140 -0
- massgen/configs/tools/planning/five_agents_filesystem_mcp_planning_mode.yaml +151 -0
- massgen/configs/tools/planning/five_agents_notion_mcp_planning_mode.yaml +151 -0
- massgen/configs/tools/planning/five_agents_twitter_mcp_planning_mode.yaml +155 -0
- massgen/configs/tools/planning/gpt5_mini_case_study_mcp_planning_mode.yaml +73 -0
- massgen/configs/tools/web-search/claude_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gemini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gpt5_mini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/gpt_oss_streamable_http_test.yaml +44 -0
- massgen/configs/tools/web-search/grok3_mini_streamable_http_test.yaml +43 -0
- massgen/configs/tools/web-search/qwen_api_streamable_http_test.yaml +44 -0
- massgen/configs/tools/web-search/qwen_local_streamable_http_test.yaml +43 -0
- massgen/coordination_tracker.py +708 -0
- massgen/docker/README.md +462 -0
- massgen/filesystem_manager/__init__.py +21 -0
- massgen/filesystem_manager/_base.py +9 -0
- massgen/filesystem_manager/_code_execution_server.py +545 -0
- massgen/filesystem_manager/_docker_manager.py +477 -0
- massgen/filesystem_manager/_file_operation_tracker.py +248 -0
- massgen/filesystem_manager/_filesystem_manager.py +813 -0
- massgen/filesystem_manager/_path_permission_manager.py +1261 -0
- massgen/filesystem_manager/_workspace_tools_server.py +1815 -0
- massgen/formatter/__init__.py +10 -0
- massgen/formatter/_chat_completions_formatter.py +284 -0
- massgen/formatter/_claude_formatter.py +235 -0
- massgen/formatter/_formatter_base.py +156 -0
- massgen/formatter/_response_formatter.py +263 -0
- massgen/frontend/__init__.py +1 -2
- massgen/frontend/coordination_ui.py +471 -286
- massgen/frontend/displays/base_display.py +56 -11
- massgen/frontend/displays/create_coordination_table.py +1956 -0
- massgen/frontend/displays/rich_terminal_display.py +1259 -619
- massgen/frontend/displays/simple_display.py +9 -4
- massgen/frontend/displays/terminal_display.py +27 -68
- massgen/logger_config.py +681 -0
- massgen/mcp_tools/README.md +232 -0
- massgen/mcp_tools/__init__.py +105 -0
- massgen/mcp_tools/backend_utils.py +1035 -0
- massgen/mcp_tools/circuit_breaker.py +195 -0
- massgen/mcp_tools/client.py +894 -0
- massgen/mcp_tools/config_validator.py +138 -0
- massgen/mcp_tools/docs/circuit_breaker.md +646 -0
- massgen/mcp_tools/docs/client.md +950 -0
- massgen/mcp_tools/docs/config_validator.md +478 -0
- massgen/mcp_tools/docs/exceptions.md +1165 -0
- massgen/mcp_tools/docs/security.md +854 -0
- massgen/mcp_tools/exceptions.py +338 -0
- massgen/mcp_tools/hooks.py +212 -0
- massgen/mcp_tools/security.py +780 -0
- massgen/message_templates.py +342 -64
- massgen/orchestrator.py +1515 -241
- massgen/stream_chunk/__init__.py +35 -0
- massgen/stream_chunk/base.py +92 -0
- massgen/stream_chunk/multimodal.py +237 -0
- massgen/stream_chunk/text.py +162 -0
- massgen/tests/mcp_test_server.py +150 -0
- massgen/tests/multi_turn_conversation_design.md +0 -8
- massgen/tests/test_azure_openai_backend.py +156 -0
- massgen/tests/test_backend_capabilities.py +262 -0
- massgen/tests/test_backend_event_loop_all.py +179 -0
- massgen/tests/test_chat_completions_refactor.py +142 -0
- massgen/tests/test_claude_backend.py +15 -28
- massgen/tests/test_claude_code.py +268 -0
- massgen/tests/test_claude_code_context_sharing.py +233 -0
- massgen/tests/test_claude_code_orchestrator.py +175 -0
- massgen/tests/test_cli_backends.py +180 -0
- massgen/tests/test_code_execution.py +679 -0
- massgen/tests/test_external_agent_backend.py +134 -0
- massgen/tests/test_final_presentation_fallback.py +237 -0
- massgen/tests/test_gemini_planning_mode.py +351 -0
- massgen/tests/test_grok_backend.py +7 -10
- massgen/tests/test_http_mcp_server.py +42 -0
- massgen/tests/test_integration_simple.py +198 -0
- massgen/tests/test_mcp_blocking.py +125 -0
- massgen/tests/test_message_context_building.py +29 -47
- massgen/tests/test_orchestrator_final_presentation.py +48 -0
- massgen/tests/test_path_permission_manager.py +2087 -0
- massgen/tests/test_rich_terminal_display.py +14 -13
- massgen/tests/test_timeout.py +133 -0
- massgen/tests/test_v3_3agents.py +11 -12
- massgen/tests/test_v3_simple.py +8 -13
- massgen/tests/test_v3_three_agents.py +11 -18
- massgen/tests/test_v3_two_agents.py +8 -13
- massgen/token_manager/__init__.py +7 -0
- massgen/token_manager/token_manager.py +400 -0
- massgen/utils.py +52 -16
- massgen/v1/agent.py +45 -91
- massgen/v1/agents.py +18 -53
- massgen/v1/backends/gemini.py +50 -153
- massgen/v1/backends/grok.py +21 -54
- massgen/v1/backends/oai.py +39 -111
- massgen/v1/cli.py +36 -93
- massgen/v1/config.py +8 -12
- massgen/v1/logging.py +43 -127
- massgen/v1/main.py +18 -32
- massgen/v1/orchestrator.py +68 -209
- massgen/v1/streaming_display.py +62 -163
- massgen/v1/tools.py +8 -12
- massgen/v1/types.py +9 -23
- massgen/v1/utils.py +5 -23
- massgen-0.1.0.dist-info/METADATA +1245 -0
- massgen-0.1.0.dist-info/RECORD +273 -0
- massgen-0.1.0.dist-info/entry_points.txt +2 -0
- massgen/frontend/logging/__init__.py +0 -9
- massgen/frontend/logging/realtime_logger.py +0 -197
- massgen-0.0.3.dist-info/METADATA +0 -568
- massgen-0.0.3.dist-info/RECORD +0 -76
- massgen-0.0.3.dist-info/entry_points.txt +0 -2
- /massgen/backend/{Function calling openai responses.md → docs/Function calling openai responses.md} +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/WHEEL +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/licenses/LICENSE +0 -0
- {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
# MassGen Multimodal GPT Configuration
|
|
2
|
+
# massgen --config @examples/basic/multi/gpt5nano_image_understanding "Please summarize the content in this image."
|
|
3
|
+
agents:
|
|
4
|
+
- id: "response_agent1"
|
|
5
|
+
backend:
|
|
6
|
+
type: "openai"
|
|
7
|
+
model: "gpt-5-nano"
|
|
8
|
+
upload_files:
|
|
9
|
+
- image_path: "massgen/configs/resources/v0.0.27-example/multimodality.jpg" #it will auto dected by backend
|
|
10
|
+
# enable_code_execution: true
|
|
11
|
+
system_message: "You are a helpful assistant"
|
|
12
|
+
|
|
13
|
+
- id: "response_agent2"
|
|
14
|
+
backend:
|
|
15
|
+
type: "openai"
|
|
16
|
+
model: "gpt-5-nano"
|
|
17
|
+
upload_files:
|
|
18
|
+
- image_path: "massgen/configs/resources/v0.0.27-example/multimodality.jpg" #it will auto dected by backend
|
|
19
|
+
# enable_code_execution: true
|
|
20
|
+
system_message: "You are a helpful assistant"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# Display configuration
|
|
24
|
+
ui:
|
|
25
|
+
display_type: "rich_terminal"
|
|
26
|
+
logging_enabled: true
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
# MassGen Three Agent Configuration
|
|
2
|
-
# Gemini-2.5-flash, GPT-
|
|
2
|
+
# Gemini-2.5-flash, GPT-5-nano, and Grok-3-mini with builtin tools enabled
|
|
3
3
|
|
|
4
4
|
agents:
|
|
5
5
|
- id: "gemini2.5flash"
|
|
@@ -10,10 +10,15 @@ agents:
|
|
|
10
10
|
# enable_code_execution: true
|
|
11
11
|
# system_message: "You are a helpful AI assistant with web search and code execution capabilities. For any question involving current events, recent information, or real-time data, ALWAYS use web search first."
|
|
12
12
|
|
|
13
|
-
- id: "
|
|
13
|
+
- id: "gpt5nano"
|
|
14
14
|
backend:
|
|
15
15
|
type: "openai"
|
|
16
|
-
model: "gpt-
|
|
16
|
+
model: "gpt-5-nano"
|
|
17
|
+
text:
|
|
18
|
+
verbosity: "medium"
|
|
19
|
+
reasoning:
|
|
20
|
+
effort: "medium"
|
|
21
|
+
summary: "auto"
|
|
17
22
|
enable_web_search: true
|
|
18
23
|
enable_code_interpreter: true
|
|
19
24
|
# system_message: "You are a helpful AI assistant with web search and code execution capabilities. For any question involving current events, recent information, or real-time data, ALWAYS use web search first."
|
|
@@ -23,7 +28,6 @@ agents:
|
|
|
23
28
|
type: "grok"
|
|
24
29
|
model: "grok-3-mini"
|
|
25
30
|
enable_web_search: true
|
|
26
|
-
return_citations: true
|
|
27
31
|
# system_message: "You are a helpful AI assistant with web search capabilities. For any question involving current events, recent information, or real-time data, ALWAYS use web search first."
|
|
28
32
|
|
|
29
33
|
ui:
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
# MassGen Three Agent Configuration
|
|
2
|
+
# 3 GPT-OSS-120B models
|
|
3
|
+
|
|
4
|
+
agents:
|
|
5
|
+
- id: "gpt-oss-1" # Cerebras AI
|
|
6
|
+
backend:
|
|
7
|
+
type: "chatcompletion"
|
|
8
|
+
model: "gpt-oss-120b"
|
|
9
|
+
base_url: "https://api.cerebras.ai/v1"
|
|
10
|
+
# api_key: "cerebras_api_key"
|
|
11
|
+
- id: "qwen"
|
|
12
|
+
backend:
|
|
13
|
+
type: "chatcompletion"
|
|
14
|
+
|
|
15
|
+
model: "Qwen/Qwen3-4B-fast"
|
|
16
|
+
base_url: "https://api.studio.nebius.ai/v1"
|
|
17
|
+
# api_key: "Nebius_api_key"
|
|
18
|
+
- id: "gpt-oss-2"
|
|
19
|
+
backend:
|
|
20
|
+
type: "chatcompletion"
|
|
21
|
+
model: "accounts/fireworks/models/gpt-oss-20b"
|
|
22
|
+
base_url: "https://api.fireworks.ai/inference/v1"
|
|
23
|
+
# api_key: "Fireworks_api_key"
|
|
24
|
+
ui:
|
|
25
|
+
display_type: "rich_terminal"
|
|
26
|
+
logging_enabled: true
|
|
27
|
+
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
# MassGen Three Agent Configuration with vllm and opensource models
|
|
2
|
+
agents:
|
|
3
|
+
- id: "gpt-oss"
|
|
4
|
+
backend:
|
|
5
|
+
type: "chatcompletion"
|
|
6
|
+
model: "gpt-oss-120b"
|
|
7
|
+
base_url: "https://api.cerebras.ai/v1"
|
|
8
|
+
- id: "qwen"
|
|
9
|
+
backend:
|
|
10
|
+
type: "vllm"
|
|
11
|
+
model: "Qwen/Qwen3-4B"
|
|
12
|
+
base_url: "http://localhost:8000/v1" #Change this to your vLLM server
|
|
13
|
+
- id: "glm"
|
|
14
|
+
backend:
|
|
15
|
+
type: "chatcompletion"
|
|
16
|
+
model: "glm-4.5"
|
|
17
|
+
base_url: "https://api.z.ai/api/paas/v4"
|
|
18
|
+
ui:
|
|
19
|
+
display_type: "rich_terminal"
|
|
20
|
+
logging_enabled: true
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
# cmd: massgen --config @examples/basic/multi/two_agents_gemini "Generate and refine a structured Todo list for learning about LLM multi-agent systems, complete with exciting objectives and fun activities. Each time you have a new version, create a new Notion page with a title and the current date and time (including hours, minutes, seconds, and milliseconds) to store the list. Then, verify that you can access the page and read back the content. Create this page as a subpage under an existing notion page called 'LLM Agent Research (x)', where x is either 1 or 2 depending on which you have access to."
|
|
2
|
+
agents:
|
|
3
|
+
- id: "gemini-2.5-pro1"
|
|
4
|
+
backend:
|
|
5
|
+
type: "gemini"
|
|
6
|
+
model: "gemini-2.5-pro"
|
|
7
|
+
enable_web_search: true
|
|
8
|
+
|
|
9
|
+
- id: "gemini-2.5-pro2"
|
|
10
|
+
backend:
|
|
11
|
+
type: "gemini"
|
|
12
|
+
model: "gemini-2.5-pro"
|
|
13
|
+
enable_web_search: true
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
# UI Configuration
|
|
17
|
+
ui:
|
|
18
|
+
type: "rich_terminal"
|
|
19
|
+
logging_enabled: true
|
|
@@ -5,10 +5,14 @@ agents:
|
|
|
5
5
|
- id: "primary_agent"
|
|
6
6
|
backend:
|
|
7
7
|
type: "openai"
|
|
8
|
-
model: "gpt-
|
|
9
|
-
|
|
10
|
-
|
|
8
|
+
model: "gpt-5"
|
|
9
|
+
text:
|
|
10
|
+
verbosity: "high"
|
|
11
|
+
reasoning:
|
|
12
|
+
effort: "high"
|
|
13
|
+
summary: "auto"
|
|
11
14
|
enable_web_search: true
|
|
15
|
+
enable_code_interpreter: true
|
|
12
16
|
system_message: |
|
|
13
17
|
You are a knowledgeable primary agent who provides comprehensive,
|
|
14
18
|
well-researched responses. Focus on:
|
|
@@ -20,10 +24,14 @@ agents:
|
|
|
20
24
|
- id: "secondary_agent"
|
|
21
25
|
backend:
|
|
22
26
|
type: "openai"
|
|
23
|
-
model: "gpt-
|
|
24
|
-
|
|
25
|
-
|
|
27
|
+
model: "gpt-5-nano"
|
|
28
|
+
text:
|
|
29
|
+
verbosity: "medium"
|
|
30
|
+
reasoning:
|
|
31
|
+
effort: "medium"
|
|
32
|
+
summary: "auto"
|
|
26
33
|
enable_web_search: true
|
|
34
|
+
enable_code_interpreter: true
|
|
27
35
|
system_message: |
|
|
28
36
|
You are a secondary agent who reviews, refines, and enhances responses.
|
|
29
37
|
Focus on:
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
agents:
|
|
2
|
+
|
|
3
|
+
- id: "GPT-5"
|
|
4
|
+
backend:
|
|
5
|
+
type: "openai"
|
|
6
|
+
model: "gpt-5"
|
|
7
|
+
text:
|
|
8
|
+
verbosity: "high"
|
|
9
|
+
reasoning:
|
|
10
|
+
effort: "medium"
|
|
11
|
+
summary: "auto"
|
|
12
|
+
enable_web_search: true
|
|
13
|
+
enable_code_interpreter: true
|
|
14
|
+
system_message: |
|
|
15
|
+
You are a knowledgeable primary agent who provides comprehensive,
|
|
16
|
+
well-researched responses. Focus on:
|
|
17
|
+
- Thorough analysis and research
|
|
18
|
+
- Accurate and detailed information
|
|
19
|
+
- Clear reasoning and explanation
|
|
20
|
+
- Comprehensive coverage of the topic
|
|
21
|
+
|
|
22
|
+
- id: "Qwen3-4b"
|
|
23
|
+
backend:
|
|
24
|
+
type: "lmstudio"
|
|
25
|
+
model: "qwen/qwen3-4b-2507"
|
|
26
|
+
# api_key: "lm-studio"
|
|
27
|
+
|
|
28
|
+
ui:
|
|
29
|
+
display_type: "rich_terminal"
|
|
30
|
+
logging_enabled: true
|
|
31
|
+
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
# MassGen Two Agent Configuration both calling the same model but different inference server for testing
|
|
2
|
+
# In one terminal window, in an environment with vLLM installed, run:
|
|
3
|
+
# python -m vllm.entrypoints.openai.api_server --model Qwen/Qwen3-4B --gpu-memory-utilization 0.8 --enable-auto-tool-choice --tool-call-parser hermes
|
|
4
|
+
# In another terminal window, in an environment with SGLang installed, run:
|
|
5
|
+
# python -m sglang.launch_server --model-path Qwen/Qwen3-4B --tool-call-parser qwen25
|
|
6
|
+
# In another terminal window, run:
|
|
7
|
+
# massgen --config @examples/basic/multi/two_qwen_vllm_sglang "what is machine learning?"
|
|
8
|
+
agents:
|
|
9
|
+
- id: "qwen1"
|
|
10
|
+
backend:
|
|
11
|
+
type: "vllm"
|
|
12
|
+
model: "Qwen/Qwen3-4B"
|
|
13
|
+
base_url: "http://localhost:8000/v1"
|
|
14
|
+
chat_template_kwargs:
|
|
15
|
+
enable_thinking: True
|
|
16
|
+
top_k: 50
|
|
17
|
+
- id: "qwen2"
|
|
18
|
+
backend:
|
|
19
|
+
type: "sglang"
|
|
20
|
+
model: "Qwen/Qwen3-4B"
|
|
21
|
+
base_url: "http://localhost:30000/v1"
|
|
22
|
+
extra_body:
|
|
23
|
+
chat_template_kwargs:
|
|
24
|
+
enable_thinking: True
|
|
25
|
+
|
|
26
|
+
ui:
|
|
27
|
+
display_type: "rich_terminal"
|
|
28
|
+
logging_enabled: true
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
# Example Gemini configuration for MassGen
|
|
2
|
-
# Usage: python -m massgen.cli --config example_gemini_config.yaml "Your question here"
|
|
2
|
+
# Usage: uv run python -m massgen.cli --config example_gemini_config.yaml "Your question here"
|
|
3
3
|
|
|
4
4
|
# Single agent configuration
|
|
5
5
|
agent:
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
# Single Gemini 2.5 Pro configuration for MassGen
|
|
2
|
+
# Usage: uv run python -m massgen.cli --config basic/single/single_gemini2.5pro.yaml "Your question here"
|
|
3
|
+
|
|
4
|
+
# Single agent configuration
|
|
5
|
+
agent:
|
|
6
|
+
id: "gemini2.5pro"
|
|
7
|
+
backend:
|
|
8
|
+
type: "gemini"
|
|
9
|
+
model: "gemini-2.5-pro"
|
|
10
|
+
enable_web_search: true
|
|
11
|
+
system_message: "You are a helpful assistant"
|
|
12
|
+
|
|
13
|
+
# Display configuration
|
|
14
|
+
ui:
|
|
15
|
+
display_type: "rich_terminal"
|
|
16
|
+
logging_enabled: true
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
# MassGen Configuration
|
|
2
|
+
# Usage:
|
|
3
|
+
# uv run python -m massgen.cli --config massgen/configs/basic/single/single_gpt4o_audio_generation.yaml "I want to you tell me a very short introduction about Sherlock Homes in one sentence, and I want you to use emotion voice to read it out loud."
|
|
4
|
+
agents:
|
|
5
|
+
- id: "gpt4o_agent1"
|
|
6
|
+
backend:
|
|
7
|
+
type: "openai"
|
|
8
|
+
model: "gpt-4o"
|
|
9
|
+
text:
|
|
10
|
+
verbosity: "medium"
|
|
11
|
+
# upload_files:
|
|
12
|
+
# - image_path: "/Users/danruiqi/Desktop/Danrui/Research/MassGen/git19/MassGen/massgen/configs/resources/v0.0.27-example/multimodality.jpg" # Example image to be used by the agent
|
|
13
|
+
cwd: "workspace1" # Working directory for file operations
|
|
14
|
+
enable_audio_generation: true # Enable audio generation (currently not implemented)
|
|
15
|
+
|
|
16
|
+
orchestrator:
|
|
17
|
+
snapshot_storage: "snapshots" # Directory to store workspace snapshots
|
|
18
|
+
agent_temporary_workspace: "temp_workspaces" # Directory for temporary agent workspaces
|
|
19
|
+
|
|
20
|
+
ui:
|
|
21
|
+
display_type: "rich_terminal"
|
|
22
|
+
logging_enabled: true
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
# MassGen Configuration
|
|
2
|
+
# Usage:
|
|
3
|
+
# massgen --config @examples/basic/single/single_gpt4o_image_generation "Generate an image of gray tabby cat hugging an otter with an orange scarf."
|
|
4
|
+
agents:
|
|
5
|
+
- id: "gpt4o_agent1"
|
|
6
|
+
backend:
|
|
7
|
+
type: "openai"
|
|
8
|
+
model: "gpt-4o"
|
|
9
|
+
text:
|
|
10
|
+
verbosity: "medium"
|
|
11
|
+
# upload_files:
|
|
12
|
+
# - image_path: "/Users/danruiqi/Desktop/Danrui/Research/MassGen/git19/MassGen/massgen/configs/resources/v0.0.27-example/multimodality.jpg" # Example image to be used by the agent
|
|
13
|
+
cwd: "workspace1" # Working directory for file operations
|
|
14
|
+
enable_image_generation: true # Enable image generation tools
|
|
15
|
+
|
|
16
|
+
orchestrator:
|
|
17
|
+
snapshot_storage: "snapshots" # Directory to store workspace snapshots
|
|
18
|
+
agent_temporary_workspace: "temp_workspaces" # Directory for temporary agent workspaces
|
|
19
|
+
|
|
20
|
+
ui:
|
|
21
|
+
display_type: "rich_terminal"
|
|
22
|
+
logging_enabled: true
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# MassGen Configuration
|
|
2
|
+
# Usage:
|
|
3
|
+
# uv run python -m massgen.cli --config massgen/configs/basic/single/single_gpt4o_video_generation.yaml "Generate a 4 seconds video with neon-lit alley at night, light rain, slow push-in, cinematic."
|
|
4
|
+
|
|
5
|
+
# uv run python -m massgen.cli --config massgen/configs/basic/single/single_gpt4o_video_generation.yaml "Generate a 4 seconds video for 'Cherry blossom petals falling in the spring breeze, sunlight filtering through the pink petals creating a soft halo, slow motion capture, aesthetically beautiful and romantic, depth of field effect.'"
|
|
6
|
+
|
|
7
|
+
agents:
|
|
8
|
+
- id: "gpt4o_agent1"
|
|
9
|
+
backend:
|
|
10
|
+
type: "openai"
|
|
11
|
+
model: "gpt-4o"
|
|
12
|
+
text:
|
|
13
|
+
verbosity: "medium"
|
|
14
|
+
# upload_files:
|
|
15
|
+
# - image_path: "/Users/danruiqi/Desktop/Danrui/Research/MassGen/git19/MassGen/massgen/configs/resources/v0.0.27-example/multimodality.jpg" # Example image to be used by the agent
|
|
16
|
+
cwd: "workspace1" # Working directory for file operations
|
|
17
|
+
|
|
18
|
+
orchestrator:
|
|
19
|
+
snapshot_storage: "snapshots" # Directory to store workspace snapshots
|
|
20
|
+
agent_temporary_workspace: "temp_workspaces" # Directory for temporary agent workspaces
|
|
21
|
+
|
|
22
|
+
ui:
|
|
23
|
+
display_type: "simple"
|
|
24
|
+
logging_enabled: true
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
# GPT-5-nano with Full Capabilities
|
|
2
|
+
# Single agent with reasoning, web search, and code execution enabled
|
|
3
|
+
|
|
4
|
+
agents:
|
|
5
|
+
- id: "gpt-5-nano"
|
|
6
|
+
backend:
|
|
7
|
+
type: "openai"
|
|
8
|
+
model: "gpt-5-nano"
|
|
9
|
+
text:
|
|
10
|
+
verbosity: "medium"
|
|
11
|
+
reasoning:
|
|
12
|
+
effort: "medium"
|
|
13
|
+
# summary: "auto"
|
|
14
|
+
enable_web_search: true
|
|
15
|
+
enable_code_interpreter: true
|
|
16
|
+
# system_message: "You are an advanced AI assistant with reasoning, web search, and code execution capabilities. When solving problems, think step by step using your reasoning abilities, use web search to get current information when needed, and execute code to perform calculations or data analysis. Provide clear, well-reasoned responses."
|
|
17
|
+
|
|
18
|
+
ui:
|
|
19
|
+
display_type: "rich_terminal"
|
|
20
|
+
logging_enabled: true
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
# MassGen File Search Configuration
|
|
2
|
+
# Demonstrates OpenAI File Search for document retrieval and Q&A
|
|
3
|
+
# Usage:
|
|
4
|
+
# massgen --config @examples/basic/single/single_gpt5nano_file_search "What is humanity's last exam score for OpenAI Deep Research? Also, provide details about the other models mentioned in the PDF?"
|
|
5
|
+
|
|
6
|
+
agent:
|
|
7
|
+
id: "file_search_agent"
|
|
8
|
+
backend:
|
|
9
|
+
type: "openai"
|
|
10
|
+
model: "gpt-5-nano"
|
|
11
|
+
upload_files:
|
|
12
|
+
- file_path: "https://cdn.openai.com/API/docs/deep_research_blog.pdf"
|
|
13
|
+
system_message: |
|
|
14
|
+
You are a helpful research assistant
|
|
15
|
+
|
|
16
|
+
ui:
|
|
17
|
+
display_type: "rich_terminal"
|
|
18
|
+
logging_enabled: true
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# MassGen Multimodal GPT Configuration
|
|
2
|
+
# massgen --config @examples/basic/single/single_gpt5nano_image_understanding "Please summarize the content in this image."
|
|
3
|
+
agent:
|
|
4
|
+
id: "response_agent"
|
|
5
|
+
# system_message: "You are a helpful AI assistant powered by Google Gemini."
|
|
6
|
+
backend:
|
|
7
|
+
type: "openai"
|
|
8
|
+
model: "gpt-5-nano"
|
|
9
|
+
upload_files:
|
|
10
|
+
- image_path: "massgen/configs/resources/v0.0.27-example/multimodality.jpg" #it will auto dected by backend
|
|
11
|
+
# enable_code_execution: true
|
|
12
|
+
system_message: "You are a helpful assistant"
|
|
13
|
+
|
|
14
|
+
# Display configuration
|
|
15
|
+
ui:
|
|
16
|
+
display_type: "simple"
|
|
17
|
+
logging_enabled: true
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
# MassGen Single Agent Configuration
|
|
2
|
+
# GPT-OSS-120B model
|
|
3
|
+
|
|
4
|
+
agents:
|
|
5
|
+
# Cerebras AI
|
|
6
|
+
- id: "gpt-oss-120b"
|
|
7
|
+
backend:
|
|
8
|
+
type: "chatcompletion"
|
|
9
|
+
model: "gpt-oss-120b"
|
|
10
|
+
base_url: "https://api.cerebras.ai/v1"
|
|
11
|
+
# api_key: "cerebras_api_key"
|
|
12
|
+
|
|
13
|
+
ui:
|
|
14
|
+
display_type: "rich_terminal"
|
|
15
|
+
logging_enabled: true
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
# MassGen Multimodal Configuration
|
|
2
|
+
# uv run python -m massgen.cli --config massgen/configs/basic/single/single_openrouter_audio_understanding.yaml "What is in this recording?"
|
|
3
|
+
agent:
|
|
4
|
+
id: "audio_agent"
|
|
5
|
+
backend:
|
|
6
|
+
type: "chatcompletion"
|
|
7
|
+
base_url: "https://openrouter.ai/api/v1"
|
|
8
|
+
model: "google/gemini-2.5-flash"
|
|
9
|
+
upload_files:
|
|
10
|
+
- audio_path: "https://cdn.openai.com/API/docs/audio/alloy.wav"
|
|
11
|
+
system_message: "You are a helpful assistant"
|
|
12
|
+
# Display configuration
|
|
13
|
+
ui:
|
|
14
|
+
display_type: "rich_terminal"
|
|
15
|
+
logging_enabled: true
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
# MassGen Qwen Multimodal Configuration
|
|
2
|
+
# uv run python -m massgen.cli --config massgen/configs/basic/single/single_qwen_video_understanding.yaml "What is in the video, summarise it ?"
|
|
3
|
+
agent:
|
|
4
|
+
id: "qwen_agent"
|
|
5
|
+
backend:
|
|
6
|
+
type: "chatcompletion"
|
|
7
|
+
base_url: "https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
|
|
8
|
+
model: "qwen3-vl-30b-a3b-thinking"
|
|
9
|
+
upload_files:
|
|
10
|
+
- video_path: "path-here"
|
|
11
|
+
system_message: "You are a helpful assistant"
|
|
12
|
+
# Display configuration
|
|
13
|
+
ui:
|
|
14
|
+
display_type: "rich_terminal"
|
|
15
|
+
logging_enabled: true
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# Command Execution with Additional Blacklist Filtering
|
|
2
|
+
# Blocks commands beyond the default safety checks
|
|
3
|
+
# Run with: uv run python -m massgen.cli --config massgen/configs/debug/code_execution/command_filtering_blacklist.yaml "Write and test a sorting algorithm in Python"
|
|
4
|
+
# Expected behavior: Agent cannot run Python commands or install packages, so will fail to complete the task and end or timeout.
|
|
5
|
+
# Note this is a test; we would never ask for the conflicting task in practice.
|
|
6
|
+
|
|
7
|
+
agent:
|
|
8
|
+
id: "agent_a"
|
|
9
|
+
backend:
|
|
10
|
+
type: "gemini"
|
|
11
|
+
model: "gemini-2.5-pro"
|
|
12
|
+
cwd: "workspace"
|
|
13
|
+
enable_mcp_command_line: true
|
|
14
|
+
command_line_blocked_commands:
|
|
15
|
+
- "python.*" # Matches python with or without args
|
|
16
|
+
- "python3.*"
|
|
17
|
+
- "pytest.*"
|
|
18
|
+
- "pip.*"
|
|
19
|
+
|
|
20
|
+
orchestrator:
|
|
21
|
+
snapshot_storage: "snapshots"
|
|
22
|
+
agent_temporary_workspace: "temp_workspaces"
|
|
23
|
+
|
|
24
|
+
timeout_settings:
|
|
25
|
+
orchestrator_timeout_seconds: 120 # 120 seconds max coordination
|
|
26
|
+
|
|
27
|
+
ui:
|
|
28
|
+
display_type: "rich_terminal"
|
|
29
|
+
logging_enabled: true
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
# Command Execution with Whitelist Filtering (Python/Testing Only)
|
|
2
|
+
# Run with: uv run python -m massgen.cli --config massgen/configs/debug/code_execution/command_filtering_whitelist.yaml "Write and test a sorting algorithm in a non-Python language"
|
|
3
|
+
# Expected behavior: Agent can only run Python commands and pytest for testing, so will fail to run non-Python code. It will thus fail or timeout without completing the task.
|
|
4
|
+
# Note this is a test; we would never ask for the conflicting task in practice.
|
|
5
|
+
|
|
6
|
+
agent:
|
|
7
|
+
id: "python_agent"
|
|
8
|
+
backend:
|
|
9
|
+
type: "openai"
|
|
10
|
+
model: "gpt-5-mini"
|
|
11
|
+
cwd: "workspace"
|
|
12
|
+
enable_mcp_command_line: true
|
|
13
|
+
command_line_allowed_commands:
|
|
14
|
+
- "python.*"
|
|
15
|
+
- "python3.*"
|
|
16
|
+
- "pytest.*"
|
|
17
|
+
- "pip.*"
|
|
18
|
+
|
|
19
|
+
orchestrator:
|
|
20
|
+
snapshot_storage: "snapshots"
|
|
21
|
+
agent_temporary_workspace: "temp_workspaces"
|
|
22
|
+
|
|
23
|
+
timeout_settings:
|
|
24
|
+
orchestrator_timeout_seconds: 120 # 120 seconds max coordination
|
|
25
|
+
|
|
26
|
+
ui:
|
|
27
|
+
display_type: "rich_terminal"
|
|
28
|
+
logging_enabled: true
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# Docker Isolation Verification Config
|
|
2
|
+
# Run with: uv run python -m massgen.cli --config massgen/configs/debug/code_execution/docker_verification.yaml "Run these commands and save output to verification.txt: 1) hostname, 2) cat /etc/hostname, 3) whoami, 4) cat /proc/1/cgroup | head -5, 5) ls -la /. Then install numpy and pandas packages. Create a Python script that imports these packages and prints their versions and installation locations."
|
|
3
|
+
|
|
4
|
+
# Expected behavior: Output proves execution is in Docker container (different hostname, container user, docker cgroups). Packages install in container, not on host.
|
|
5
|
+
# The hostname should show a container ID (not your host), whoami should show 'massgen', and /proc/1/cgroup should show docker paths - this proves we're in a container!"
|
|
6
|
+
# Verification: Check verification.txt - hostname should be a hex ID like 'a1b2c3d4e5f6', NOT your computer's hostname
|
|
7
|
+
# Prerequisites: Docker installed and running, massgen/mcp-runtime:latest image built (run massgen/docker/build.sh)
|
|
8
|
+
|
|
9
|
+
agent:
|
|
10
|
+
id: "agent_a"
|
|
11
|
+
backend:
|
|
12
|
+
type: "openai"
|
|
13
|
+
model: "gpt-5-mini"
|
|
14
|
+
cwd: "workspace1"
|
|
15
|
+
|
|
16
|
+
# Enable command execution with Docker isolation
|
|
17
|
+
enable_mcp_command_line: true
|
|
18
|
+
command_line_execution_mode: "docker"
|
|
19
|
+
|
|
20
|
+
orchestrator:
|
|
21
|
+
snapshot_storage: "snapshots"
|
|
22
|
+
agent_temporary_workspace: "temp_workspaces"
|
|
23
|
+
|
|
24
|
+
timeout_settings:
|
|
25
|
+
orchestrator_timeout_seconds: 300 # More time for package installation
|
|
26
|
+
|
|
27
|
+
ui:
|
|
28
|
+
display_type: "rich_terminal"
|
|
29
|
+
logging_enabled: true
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
# DEBUG config for testing final presentation mode
|
|
2
|
+
# This skips all coordination/voting rounds and goes straight to final presentation
|
|
3
|
+
# Useful for debugging final presentation logic without running full coordination
|
|
4
|
+
|
|
5
|
+
agents:
|
|
6
|
+
- id: "test_agent"
|
|
7
|
+
backend:
|
|
8
|
+
type: "gemini"
|
|
9
|
+
model: "gemini-2.5-flash"
|
|
10
|
+
cwd: "workspace1"
|
|
11
|
+
|
|
12
|
+
orchestrator:
|
|
13
|
+
snapshot_storage: "snapshots"
|
|
14
|
+
agent_temporary_workspace: "temp_workspaces"
|
|
15
|
+
|
|
16
|
+
# Example: Add a context path with write access for testing file delivery in final presentation
|
|
17
|
+
context_paths:
|
|
18
|
+
- path: "massgen/configs/resources/v0.0.21-example"
|
|
19
|
+
permission: "write"
|
|
20
|
+
|
|
21
|
+
# DEBUG: Skip coordination rounds - go straight to final presentation
|
|
22
|
+
# When enabled, the first agent is selected as winner and immediately enters final presentation mode
|
|
23
|
+
skip_coordination_rounds: true
|
|
24
|
+
|
|
25
|
+
ui:
|
|
26
|
+
display_type: "rich_terminal"
|
|
27
|
+
logging_enabled: true
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
# DEBUG: Test SDK Migration - Verify claude-agent-sdk works
|
|
2
|
+
# Run: uv run python -m massgen.cli --config massgen/configs/debug/test_sdk_migration.yaml "List files in workspace"
|
|
3
|
+
|
|
4
|
+
agents:
|
|
5
|
+
- id: "sdk_test"
|
|
6
|
+
backend:
|
|
7
|
+
type: "claude_code"
|
|
8
|
+
cwd: "workspace_sdk_test"
|
|
9
|
+
model: "claude-sonnet-4-20250514"
|
|
10
|
+
|
|
11
|
+
orchestrator:
|
|
12
|
+
snapshot_storage: "snapshots"
|
|
13
|
+
agent_temporary_workspace: "temp_workspaces"
|
|
14
|
+
|
|
15
|
+
ui:
|
|
16
|
+
type: "rich_terminal"
|
|
17
|
+
logging_enabled: true
|