massgen 0.0.3__py3-none-any.whl → 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of massgen might be problematic. Click here for more details.

Files changed (268) hide show
  1. massgen/__init__.py +142 -8
  2. massgen/adapters/__init__.py +29 -0
  3. massgen/adapters/ag2_adapter.py +483 -0
  4. massgen/adapters/base.py +183 -0
  5. massgen/adapters/tests/__init__.py +0 -0
  6. massgen/adapters/tests/test_ag2_adapter.py +439 -0
  7. massgen/adapters/tests/test_agent_adapter.py +128 -0
  8. massgen/adapters/utils/__init__.py +2 -0
  9. massgen/adapters/utils/ag2_utils.py +236 -0
  10. massgen/adapters/utils/tests/__init__.py +0 -0
  11. massgen/adapters/utils/tests/test_ag2_utils.py +138 -0
  12. massgen/agent_config.py +329 -55
  13. massgen/api_params_handler/__init__.py +10 -0
  14. massgen/api_params_handler/_api_params_handler_base.py +99 -0
  15. massgen/api_params_handler/_chat_completions_api_params_handler.py +176 -0
  16. massgen/api_params_handler/_claude_api_params_handler.py +113 -0
  17. massgen/api_params_handler/_response_api_params_handler.py +130 -0
  18. massgen/backend/__init__.py +39 -4
  19. massgen/backend/azure_openai.py +385 -0
  20. massgen/backend/base.py +341 -69
  21. massgen/backend/base_with_mcp.py +1102 -0
  22. massgen/backend/capabilities.py +386 -0
  23. massgen/backend/chat_completions.py +577 -130
  24. massgen/backend/claude.py +1033 -537
  25. massgen/backend/claude_code.py +1203 -0
  26. massgen/backend/cli_base.py +209 -0
  27. massgen/backend/docs/BACKEND_ARCHITECTURE.md +126 -0
  28. massgen/backend/{CLAUDE_API_RESEARCH.md → docs/CLAUDE_API_RESEARCH.md} +18 -18
  29. massgen/backend/{GEMINI_API_DOCUMENTATION.md → docs/GEMINI_API_DOCUMENTATION.md} +9 -9
  30. massgen/backend/docs/Gemini MCP Integration Analysis.md +1050 -0
  31. massgen/backend/docs/MCP_IMPLEMENTATION_CLAUDE_BACKEND.md +177 -0
  32. massgen/backend/docs/MCP_INTEGRATION_RESPONSE_BACKEND.md +352 -0
  33. massgen/backend/docs/OPENAI_GPT5_MODELS.md +211 -0
  34. massgen/backend/{OPENAI_RESPONSES_API_FORMAT.md → docs/OPENAI_RESPONSE_API_TOOL_CALLS.md} +3 -3
  35. massgen/backend/docs/OPENAI_response_streaming.md +20654 -0
  36. massgen/backend/docs/inference_backend.md +257 -0
  37. massgen/backend/docs/permissions_and_context_files.md +1085 -0
  38. massgen/backend/external.py +126 -0
  39. massgen/backend/gemini.py +1850 -241
  40. massgen/backend/grok.py +40 -156
  41. massgen/backend/inference.py +156 -0
  42. massgen/backend/lmstudio.py +171 -0
  43. massgen/backend/response.py +1095 -322
  44. massgen/chat_agent.py +131 -113
  45. massgen/cli.py +1560 -275
  46. massgen/config_builder.py +2396 -0
  47. massgen/configs/BACKEND_CONFIGURATION.md +458 -0
  48. massgen/configs/README.md +559 -216
  49. massgen/configs/ag2/ag2_case_study.yaml +27 -0
  50. massgen/configs/ag2/ag2_coder.yaml +34 -0
  51. massgen/configs/ag2/ag2_coder_case_study.yaml +36 -0
  52. massgen/configs/ag2/ag2_gemini.yaml +27 -0
  53. massgen/configs/ag2/ag2_groupchat.yaml +108 -0
  54. massgen/configs/ag2/ag2_groupchat_gpt.yaml +118 -0
  55. massgen/configs/ag2/ag2_single_agent.yaml +21 -0
  56. massgen/configs/basic/multi/fast_timeout_example.yaml +37 -0
  57. massgen/configs/basic/multi/gemini_4o_claude.yaml +31 -0
  58. massgen/configs/basic/multi/gemini_gpt5nano_claude.yaml +36 -0
  59. massgen/configs/{gemini_4o_claude.yaml → basic/multi/geminicode_4o_claude.yaml} +3 -3
  60. massgen/configs/basic/multi/geminicode_gpt5nano_claude.yaml +36 -0
  61. massgen/configs/basic/multi/glm_gemini_claude.yaml +25 -0
  62. massgen/configs/basic/multi/gpt4o_audio_generation.yaml +30 -0
  63. massgen/configs/basic/multi/gpt4o_image_generation.yaml +31 -0
  64. massgen/configs/basic/multi/gpt5nano_glm_qwen.yaml +26 -0
  65. massgen/configs/basic/multi/gpt5nano_image_understanding.yaml +26 -0
  66. massgen/configs/{three_agents_default.yaml → basic/multi/three_agents_default.yaml} +8 -4
  67. massgen/configs/basic/multi/three_agents_opensource.yaml +27 -0
  68. massgen/configs/basic/multi/three_agents_vllm.yaml +20 -0
  69. massgen/configs/basic/multi/two_agents_gemini.yaml +19 -0
  70. massgen/configs/{two_agents.yaml → basic/multi/two_agents_gpt5.yaml} +14 -6
  71. massgen/configs/basic/multi/two_agents_opensource_lmstudio.yaml +31 -0
  72. massgen/configs/basic/multi/two_qwen_vllm_sglang.yaml +28 -0
  73. massgen/configs/{single_agent.yaml → basic/single/single_agent.yaml} +1 -1
  74. massgen/configs/{single_flash2.5.yaml → basic/single/single_flash2.5.yaml} +1 -2
  75. massgen/configs/basic/single/single_gemini2.5pro.yaml +16 -0
  76. massgen/configs/basic/single/single_gpt4o_audio_generation.yaml +22 -0
  77. massgen/configs/basic/single/single_gpt4o_image_generation.yaml +22 -0
  78. massgen/configs/basic/single/single_gpt4o_video_generation.yaml +24 -0
  79. massgen/configs/basic/single/single_gpt5nano.yaml +20 -0
  80. massgen/configs/basic/single/single_gpt5nano_file_search.yaml +18 -0
  81. massgen/configs/basic/single/single_gpt5nano_image_understanding.yaml +17 -0
  82. massgen/configs/basic/single/single_gptoss120b.yaml +15 -0
  83. massgen/configs/basic/single/single_openrouter_audio_understanding.yaml +15 -0
  84. massgen/configs/basic/single/single_qwen_video_understanding.yaml +15 -0
  85. massgen/configs/debug/code_execution/command_filtering_blacklist.yaml +29 -0
  86. massgen/configs/debug/code_execution/command_filtering_whitelist.yaml +28 -0
  87. massgen/configs/debug/code_execution/docker_verification.yaml +29 -0
  88. massgen/configs/debug/skip_coordination_test.yaml +27 -0
  89. massgen/configs/debug/test_sdk_migration.yaml +17 -0
  90. massgen/configs/docs/DISCORD_MCP_SETUP.md +208 -0
  91. massgen/configs/docs/TWITTER_MCP_ENESCINAR_SETUP.md +82 -0
  92. massgen/configs/providers/azure/azure_openai_multi.yaml +21 -0
  93. massgen/configs/providers/azure/azure_openai_single.yaml +19 -0
  94. massgen/configs/providers/claude/claude.yaml +14 -0
  95. massgen/configs/providers/gemini/gemini_gpt5nano.yaml +28 -0
  96. massgen/configs/providers/local/lmstudio.yaml +11 -0
  97. massgen/configs/providers/openai/gpt5.yaml +46 -0
  98. massgen/configs/providers/openai/gpt5_nano.yaml +46 -0
  99. massgen/configs/providers/others/grok_single_agent.yaml +19 -0
  100. massgen/configs/providers/others/zai_coding_team.yaml +108 -0
  101. massgen/configs/providers/others/zai_glm45.yaml +12 -0
  102. massgen/configs/{creative_team.yaml → teams/creative/creative_team.yaml} +16 -6
  103. massgen/configs/{travel_planning.yaml → teams/creative/travel_planning.yaml} +16 -6
  104. massgen/configs/{news_analysis.yaml → teams/research/news_analysis.yaml} +16 -6
  105. massgen/configs/{research_team.yaml → teams/research/research_team.yaml} +15 -7
  106. massgen/configs/{technical_analysis.yaml → teams/research/technical_analysis.yaml} +16 -6
  107. massgen/configs/tools/code-execution/basic_command_execution.yaml +25 -0
  108. massgen/configs/tools/code-execution/code_execution_use_case_simple.yaml +41 -0
  109. massgen/configs/tools/code-execution/docker_claude_code.yaml +32 -0
  110. massgen/configs/tools/code-execution/docker_multi_agent.yaml +32 -0
  111. massgen/configs/tools/code-execution/docker_simple.yaml +29 -0
  112. massgen/configs/tools/code-execution/docker_with_resource_limits.yaml +32 -0
  113. massgen/configs/tools/code-execution/multi_agent_playwright_automation.yaml +57 -0
  114. massgen/configs/tools/filesystem/cc_gpt5_gemini_filesystem.yaml +34 -0
  115. massgen/configs/tools/filesystem/claude_code_context_sharing.yaml +68 -0
  116. massgen/configs/tools/filesystem/claude_code_flash2.5.yaml +43 -0
  117. massgen/configs/tools/filesystem/claude_code_flash2.5_gptoss.yaml +49 -0
  118. massgen/configs/tools/filesystem/claude_code_gpt5nano.yaml +31 -0
  119. massgen/configs/tools/filesystem/claude_code_single.yaml +40 -0
  120. massgen/configs/tools/filesystem/fs_permissions_test.yaml +87 -0
  121. massgen/configs/tools/filesystem/gemini_gemini_workspace_cleanup.yaml +54 -0
  122. massgen/configs/tools/filesystem/gemini_gpt5_filesystem_casestudy.yaml +30 -0
  123. massgen/configs/tools/filesystem/gemini_gpt5nano_file_context_path.yaml +43 -0
  124. massgen/configs/tools/filesystem/gemini_gpt5nano_protected_paths.yaml +45 -0
  125. massgen/configs/tools/filesystem/gpt5mini_cc_fs_context_path.yaml +31 -0
  126. massgen/configs/tools/filesystem/grok4_gpt5_gemini_filesystem.yaml +32 -0
  127. massgen/configs/tools/filesystem/multiturn/grok4_gpt5_claude_code_filesystem_multiturn.yaml +58 -0
  128. massgen/configs/tools/filesystem/multiturn/grok4_gpt5_gemini_filesystem_multiturn.yaml +58 -0
  129. massgen/configs/tools/filesystem/multiturn/two_claude_code_filesystem_multiturn.yaml +47 -0
  130. massgen/configs/tools/filesystem/multiturn/two_gemini_flash_filesystem_multiturn.yaml +48 -0
  131. massgen/configs/tools/mcp/claude_code_discord_mcp_example.yaml +27 -0
  132. massgen/configs/tools/mcp/claude_code_simple_mcp.yaml +35 -0
  133. massgen/configs/tools/mcp/claude_code_twitter_mcp_example.yaml +32 -0
  134. massgen/configs/tools/mcp/claude_mcp_example.yaml +24 -0
  135. massgen/configs/tools/mcp/claude_mcp_test.yaml +27 -0
  136. massgen/configs/tools/mcp/five_agents_travel_mcp_test.yaml +157 -0
  137. massgen/configs/tools/mcp/five_agents_weather_mcp_test.yaml +103 -0
  138. massgen/configs/tools/mcp/gemini_mcp_example.yaml +24 -0
  139. massgen/configs/tools/mcp/gemini_mcp_filesystem_test.yaml +23 -0
  140. massgen/configs/tools/mcp/gemini_mcp_filesystem_test_sharing.yaml +23 -0
  141. massgen/configs/tools/mcp/gemini_mcp_filesystem_test_single_agent.yaml +17 -0
  142. massgen/configs/tools/mcp/gemini_mcp_filesystem_test_with_claude_code.yaml +24 -0
  143. massgen/configs/tools/mcp/gemini_mcp_test.yaml +27 -0
  144. massgen/configs/tools/mcp/gemini_notion_mcp.yaml +52 -0
  145. massgen/configs/tools/mcp/gpt5_nano_mcp_example.yaml +24 -0
  146. massgen/configs/tools/mcp/gpt5_nano_mcp_test.yaml +27 -0
  147. massgen/configs/tools/mcp/gpt5mini_claude_code_discord_mcp_example.yaml +38 -0
  148. massgen/configs/tools/mcp/gpt_oss_mcp_example.yaml +25 -0
  149. massgen/configs/tools/mcp/gpt_oss_mcp_test.yaml +28 -0
  150. massgen/configs/tools/mcp/grok3_mini_mcp_example.yaml +24 -0
  151. massgen/configs/tools/mcp/grok3_mini_mcp_test.yaml +27 -0
  152. massgen/configs/tools/mcp/multimcp_gemini.yaml +111 -0
  153. massgen/configs/tools/mcp/qwen_api_mcp_example.yaml +25 -0
  154. massgen/configs/tools/mcp/qwen_api_mcp_test.yaml +28 -0
  155. massgen/configs/tools/mcp/qwen_local_mcp_example.yaml +24 -0
  156. massgen/configs/tools/mcp/qwen_local_mcp_test.yaml +27 -0
  157. massgen/configs/tools/planning/five_agents_discord_mcp_planning_mode.yaml +140 -0
  158. massgen/configs/tools/planning/five_agents_filesystem_mcp_planning_mode.yaml +151 -0
  159. massgen/configs/tools/planning/five_agents_notion_mcp_planning_mode.yaml +151 -0
  160. massgen/configs/tools/planning/five_agents_twitter_mcp_planning_mode.yaml +155 -0
  161. massgen/configs/tools/planning/gpt5_mini_case_study_mcp_planning_mode.yaml +73 -0
  162. massgen/configs/tools/web-search/claude_streamable_http_test.yaml +43 -0
  163. massgen/configs/tools/web-search/gemini_streamable_http_test.yaml +43 -0
  164. massgen/configs/tools/web-search/gpt5_mini_streamable_http_test.yaml +43 -0
  165. massgen/configs/tools/web-search/gpt_oss_streamable_http_test.yaml +44 -0
  166. massgen/configs/tools/web-search/grok3_mini_streamable_http_test.yaml +43 -0
  167. massgen/configs/tools/web-search/qwen_api_streamable_http_test.yaml +44 -0
  168. massgen/configs/tools/web-search/qwen_local_streamable_http_test.yaml +43 -0
  169. massgen/coordination_tracker.py +708 -0
  170. massgen/docker/README.md +462 -0
  171. massgen/filesystem_manager/__init__.py +21 -0
  172. massgen/filesystem_manager/_base.py +9 -0
  173. massgen/filesystem_manager/_code_execution_server.py +545 -0
  174. massgen/filesystem_manager/_docker_manager.py +477 -0
  175. massgen/filesystem_manager/_file_operation_tracker.py +248 -0
  176. massgen/filesystem_manager/_filesystem_manager.py +813 -0
  177. massgen/filesystem_manager/_path_permission_manager.py +1261 -0
  178. massgen/filesystem_manager/_workspace_tools_server.py +1815 -0
  179. massgen/formatter/__init__.py +10 -0
  180. massgen/formatter/_chat_completions_formatter.py +284 -0
  181. massgen/formatter/_claude_formatter.py +235 -0
  182. massgen/formatter/_formatter_base.py +156 -0
  183. massgen/formatter/_response_formatter.py +263 -0
  184. massgen/frontend/__init__.py +1 -2
  185. massgen/frontend/coordination_ui.py +471 -286
  186. massgen/frontend/displays/base_display.py +56 -11
  187. massgen/frontend/displays/create_coordination_table.py +1956 -0
  188. massgen/frontend/displays/rich_terminal_display.py +1259 -619
  189. massgen/frontend/displays/simple_display.py +9 -4
  190. massgen/frontend/displays/terminal_display.py +27 -68
  191. massgen/logger_config.py +681 -0
  192. massgen/mcp_tools/README.md +232 -0
  193. massgen/mcp_tools/__init__.py +105 -0
  194. massgen/mcp_tools/backend_utils.py +1035 -0
  195. massgen/mcp_tools/circuit_breaker.py +195 -0
  196. massgen/mcp_tools/client.py +894 -0
  197. massgen/mcp_tools/config_validator.py +138 -0
  198. massgen/mcp_tools/docs/circuit_breaker.md +646 -0
  199. massgen/mcp_tools/docs/client.md +950 -0
  200. massgen/mcp_tools/docs/config_validator.md +478 -0
  201. massgen/mcp_tools/docs/exceptions.md +1165 -0
  202. massgen/mcp_tools/docs/security.md +854 -0
  203. massgen/mcp_tools/exceptions.py +338 -0
  204. massgen/mcp_tools/hooks.py +212 -0
  205. massgen/mcp_tools/security.py +780 -0
  206. massgen/message_templates.py +342 -64
  207. massgen/orchestrator.py +1515 -241
  208. massgen/stream_chunk/__init__.py +35 -0
  209. massgen/stream_chunk/base.py +92 -0
  210. massgen/stream_chunk/multimodal.py +237 -0
  211. massgen/stream_chunk/text.py +162 -0
  212. massgen/tests/mcp_test_server.py +150 -0
  213. massgen/tests/multi_turn_conversation_design.md +0 -8
  214. massgen/tests/test_azure_openai_backend.py +156 -0
  215. massgen/tests/test_backend_capabilities.py +262 -0
  216. massgen/tests/test_backend_event_loop_all.py +179 -0
  217. massgen/tests/test_chat_completions_refactor.py +142 -0
  218. massgen/tests/test_claude_backend.py +15 -28
  219. massgen/tests/test_claude_code.py +268 -0
  220. massgen/tests/test_claude_code_context_sharing.py +233 -0
  221. massgen/tests/test_claude_code_orchestrator.py +175 -0
  222. massgen/tests/test_cli_backends.py +180 -0
  223. massgen/tests/test_code_execution.py +679 -0
  224. massgen/tests/test_external_agent_backend.py +134 -0
  225. massgen/tests/test_final_presentation_fallback.py +237 -0
  226. massgen/tests/test_gemini_planning_mode.py +351 -0
  227. massgen/tests/test_grok_backend.py +7 -10
  228. massgen/tests/test_http_mcp_server.py +42 -0
  229. massgen/tests/test_integration_simple.py +198 -0
  230. massgen/tests/test_mcp_blocking.py +125 -0
  231. massgen/tests/test_message_context_building.py +29 -47
  232. massgen/tests/test_orchestrator_final_presentation.py +48 -0
  233. massgen/tests/test_path_permission_manager.py +2087 -0
  234. massgen/tests/test_rich_terminal_display.py +14 -13
  235. massgen/tests/test_timeout.py +133 -0
  236. massgen/tests/test_v3_3agents.py +11 -12
  237. massgen/tests/test_v3_simple.py +8 -13
  238. massgen/tests/test_v3_three_agents.py +11 -18
  239. massgen/tests/test_v3_two_agents.py +8 -13
  240. massgen/token_manager/__init__.py +7 -0
  241. massgen/token_manager/token_manager.py +400 -0
  242. massgen/utils.py +52 -16
  243. massgen/v1/agent.py +45 -91
  244. massgen/v1/agents.py +18 -53
  245. massgen/v1/backends/gemini.py +50 -153
  246. massgen/v1/backends/grok.py +21 -54
  247. massgen/v1/backends/oai.py +39 -111
  248. massgen/v1/cli.py +36 -93
  249. massgen/v1/config.py +8 -12
  250. massgen/v1/logging.py +43 -127
  251. massgen/v1/main.py +18 -32
  252. massgen/v1/orchestrator.py +68 -209
  253. massgen/v1/streaming_display.py +62 -163
  254. massgen/v1/tools.py +8 -12
  255. massgen/v1/types.py +9 -23
  256. massgen/v1/utils.py +5 -23
  257. massgen-0.1.0.dist-info/METADATA +1245 -0
  258. massgen-0.1.0.dist-info/RECORD +273 -0
  259. massgen-0.1.0.dist-info/entry_points.txt +2 -0
  260. massgen/frontend/logging/__init__.py +0 -9
  261. massgen/frontend/logging/realtime_logger.py +0 -197
  262. massgen-0.0.3.dist-info/METADATA +0 -568
  263. massgen-0.0.3.dist-info/RECORD +0 -76
  264. massgen-0.0.3.dist-info/entry_points.txt +0 -2
  265. /massgen/backend/{Function calling openai responses.md → docs/Function calling openai responses.md} +0 -0
  266. {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/WHEEL +0 -0
  267. {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/licenses/LICENSE +0 -0
  268. {massgen-0.0.3.dist-info → massgen-0.1.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,27 @@
1
+ # Example configuration for using AG2 with MassGen
2
+ # Single AG2 agent example
3
+ # uv run python -m massgen.cli --config ag2/ag2_case_study.yaml "Compare the differences between AG2 and MassGen for LLM agents"
4
+ agents:
5
+ - id: "ag2_assistant"
6
+ backend:
7
+ type: ag2
8
+ agent_config:
9
+ type: assistant
10
+ name: "AG2_Assistant"
11
+ system_message: "You are a helpful AI assistant powered by AG2."
12
+ llm_config:
13
+ - api_type: "anthropic"
14
+ model: "claude-sonnet-4-20250514"
15
+ temperature: 0.3
16
+ cache_seed: 42
17
+
18
+ - id: "gemini2.5pro"
19
+ backend:
20
+ type: "gemini"
21
+ model: "gemini-2.5-pro"
22
+ enable_web_search: true
23
+
24
+ # UI Configuration
25
+ ui:
26
+ type: "rich_terminal"
27
+ logging_enabled: true
@@ -0,0 +1,34 @@
1
+ # Example configuration for using AG2 with MassGen
2
+ # Single AG2 agent example
3
+ # uv run python -m massgen.cli --config ag2/ag2_coder.yaml "Create a factorial function and calculate the factorial of 8. Show the result?"
4
+ agents:
5
+ - id: "ag2_coder"
6
+ backend:
7
+ type: ag2
8
+ agent_config:
9
+ type: assistant
10
+ name: "AG2_coder"
11
+ system_message: |
12
+ You are a helpful coding assistant. When asked to create and run code:
13
+ 1. Write Python code in a markdown code block (```python ... ```)
14
+ 2. The code will be automatically executed
15
+ 3. Always print the results so they are visible
16
+
17
+ For factorial calculations:
18
+ - Define the factorial function
19
+ - Call it with the provided number
20
+ - Print the result clearly
21
+ llm_config:
22
+ api_type: "openai"
23
+ model: "gpt-5"
24
+ code_execution_config:
25
+ executor:
26
+ type: "LocalCommandLineCodeExecutor"
27
+ timeout: 60
28
+ work_dir: "./code_execution_workspace"
29
+
30
+
31
+ # UI Configuration
32
+ ui:
33
+ type: "rich_terminal"
34
+ logging_enabled: true
@@ -0,0 +1,36 @@
1
+ # Example configuration for using AG2 with MassGen
2
+ # Single AG2 agent example
3
+ # uv run python -m massgen.cli --config ag2/ag2_coder_case_study.yaml "Output a summary comparing the differences between AG2 (https://github.com/ag2ai/ag2) and MassGen (https://github.com/Leezekun/MassGen) for LLM agents."
4
+ agents:
5
+ - id: "ag2_coder"
6
+ backend:
7
+ type: ag2
8
+ agent_config:
9
+ type: assistant
10
+ name: "AG2_coder"
11
+ system_message: |
12
+ You are a helpful coding assistant. When asked to create and run code:
13
+ 1. Write Python code in a markdown code block (```python ... ```)
14
+ 2. The code will be automatically executed
15
+ 3. Always print the results so they are visible
16
+
17
+ If you need to access information from the web, create and run a web scraping script.
18
+ llm_config:
19
+ api_type: "openai"
20
+ model: "gpt-5"
21
+ code_execution_config:
22
+ executor:
23
+ type: "LocalCommandLineCodeExecutor"
24
+ timeout: 60
25
+ work_dir: "./code_execution_workspace"
26
+
27
+ - id: "gemini2.5pro"
28
+ backend:
29
+ type: "gemini"
30
+ model: "gemini-2.5-pro"
31
+ enable_web_search: true
32
+
33
+ # UI Configuration
34
+ ui:
35
+ type: "rich_terminal"
36
+ logging_enabled: true
@@ -0,0 +1,27 @@
1
+ # Example configuration for using AG2 with MassGen
2
+ # Single AG2 agent example
3
+ # uv run python -m massgen.cli --config ag2/ag2_gemini.yaml "what is quantum computing?"
4
+ agents:
5
+ - id: "ag2_assistant"
6
+ backend:
7
+ type: ag2
8
+ agent_config:
9
+ type: assistant
10
+ name: "AG2_Assistant"
11
+ system_message: "You are a helpful AI assistant powered by AG2."
12
+ llm_config:
13
+ - api_type: "anthropic"
14
+ model: "claude-sonnet-4-20250514"
15
+ temperature: 0.7
16
+ cache_seed: 42
17
+
18
+ - id: "gemini2.5flash"
19
+ backend:
20
+ type: "gemini"
21
+ model: "gemini-2.5-flash"
22
+ enable_web_search: true
23
+
24
+ # UI Configuration
25
+ ui:
26
+ type: "rich_terminal"
27
+ logging_enabled: true
@@ -0,0 +1,108 @@
1
+ # Example configuration for using AG2 GroupChat with MassGen
2
+ # The entire GroupChat acts as a single agent in MassGen's orchestration
3
+ # uv run python -m massgen.cli --config ag2/ag2_groupchat.yaml "Write a Python function to calculate factorial."
4
+ agents:
5
+ - id: "ag2_team"
6
+ backend:
7
+ type: ag2
8
+ group_config:
9
+ # Default llm_config for all agents (REQUIRED)
10
+ # Individual agents can override this if needed
11
+ llm_config:
12
+ api_type: "openai"
13
+ model: "gpt-5"
14
+
15
+ agents:
16
+ - type: assistant
17
+ name: "Coder"
18
+ system_message: "You are an expert programmer who writes clean, efficient code."
19
+ # Uses default llm_config from group_config
20
+
21
+ - type: assistant
22
+ name: "Reviewer"
23
+ description: "Code reviewer who provides constructive feedback. Should be selected after Coder"
24
+ system_message: |
25
+ You are a code reviewer who provides constructive feedback. You role is to:
26
+ 1. Review the code for correctness, efficiency, and style
27
+ 2. Suggest improvements and optimizations
28
+ 3. DO NOT write complete to address original request
29
+ 4. DO NOT write test code
30
+ 5. When you think the code is good enough to submit, say "LGTM" (Looks Good To Me)
31
+ # Override with different model
32
+ llm_config:
33
+ api_type: "google"
34
+ model: "gemini-2.5-flash"
35
+
36
+ - type: assistant
37
+ name: "Tester"
38
+ description: "QA engineer who writes and runs tests. Should be selected if new code has not been validated."
39
+ system_message: |
40
+ You are a QA engineer. Your role is to:
41
+ 1. Write test code in Python that validates the provided function
42
+ 2. Test edge cases (empty input, negative numbers, large values, etc.)
43
+ 3. Execute the tests using code blocks
44
+ 4. Report results and any failures
45
+
46
+ IMPORTANT: Always write executable Python code in markdown code blocks like:
47
+ ```python
48
+ # your test code here
49
+ ```
50
+
51
+ Do NOT just repeat the function code. Write NEW test code that calls the function.
52
+ llm_config:
53
+ api_type: "google"
54
+ model: "gemini-2.5-flash"
55
+ code_execution_config:
56
+ executor:
57
+ type: "LocalCommandLineCodeExecutor"
58
+ timeout: 60
59
+ work_dir: "./code_execution_workspace"
60
+
61
+ # Pattern configuration (REQUIRED)
62
+ # Determines how agents are selected to speak in the group chat
63
+ pattern:
64
+ type: "auto" # Currently supported: "auto" only
65
+ # Required: Name of the agent that starts the conversation
66
+ initial_agent: "Coder"
67
+
68
+ # Optional: Additional pattern-specific arguments
69
+ # For AutoPattern, supported arguments include:
70
+ # - exclude_transit_message (bool): Hide transit messages
71
+ # - summary_method (str): How to summarize conversation
72
+ # Any additional arguments are passed directly to the pattern constructor
73
+
74
+ # Group manager configuration
75
+ group_manager_args:
76
+ # Optional: override llm_config for the group manager
77
+ # If not provided, uses default llm_config from group_config
78
+ llm_config:
79
+ api_type: "google"
80
+ model: "gemini-2.5-flash"
81
+
82
+ # Optional: Override the default user agent behavior. Not recommended to change.
83
+ # By default, a minimal user_agent is created automatically
84
+ # You can provide custom configuration here.
85
+ # However, it plays an important role in terminating the chat, so it shouldn't be changed ideally.
86
+ # user_agent:
87
+ # name: "User" # Must be "User" for termination to work
88
+ # system_message: "MUST say 'TERMINATE' when the original request is well answered. Do NOT do anything else."
89
+ # description: "MUST ONLY be selected when the original request is well answered and the conversation should terminate."
90
+ # llm_config: # Optional: override llm_config
91
+ # api_type: "openai"
92
+ # model: "gpt-4o"
93
+
94
+ # Maximum rounds of conversation before termination
95
+ max_rounds: 10
96
+
97
+ # # Optional: Mix with native MassGen agents
98
+ # - id: "claude_architect"
99
+ # backend:
100
+ # type: claude
101
+ # model: claude-3-opus-20240229
102
+ # temperature: 0.7
103
+ # system_message: "You are a software architect who designs scalable systems."
104
+
105
+ # UI Configuration
106
+ ui:
107
+ type: "rich_terminal"
108
+ logging_enabled: true
@@ -0,0 +1,118 @@
1
+ # Example configuration for using AG2 GroupChat with MassGen
2
+ # The entire GroupChat acts as a single agent in MassGen's orchestration
3
+ # uv run python -m massgen.cli --config ag2/ag2_groupchat_gpt.yaml "Write a Python function to calculate factorial."
4
+ agents:
5
+ - id: "gpt-5-nano"
6
+ system_message: "You are an expert programmer who writes clean, efficient code."
7
+ backend:
8
+ type: "openai"
9
+ model: "gpt-5-nano"
10
+ text:
11
+ verbosity: "medium"
12
+ reasoning:
13
+ effort: "medium"
14
+
15
+ - id: "ag2_team"
16
+ backend:
17
+ type: ag2
18
+ group_config:
19
+ # Default llm_config for all agents (REQUIRED)
20
+ # Individual agents can override this if needed
21
+ llm_config:
22
+ api_type: "openai"
23
+ model: "gpt-5"
24
+
25
+ agents:
26
+ - type: assistant
27
+ name: "Coder"
28
+ system_message: "You are an expert programmer who writes clean, efficient code."
29
+ # Uses default llm_config from group_config
30
+
31
+ - type: assistant
32
+ name: "Reviewer"
33
+ description: "Code reviewer who provides constructive feedback. Should be selected after Coder"
34
+ system_message: |
35
+ You are a code reviewer who provides constructive feedback. You role is to:
36
+ 1. Review the code for correctness, efficiency, and style
37
+ 2. Suggest improvements and optimizations
38
+ 3. DO NOT write complete to address original request
39
+ 4. DO NOT write test code
40
+ 5. When you think the code is good enough to submit, say "LGTM" (Looks Good To Me)
41
+ # Override with different model
42
+ llm_config:
43
+ api_type: "google"
44
+ model: "gemini-2.5-flash"
45
+
46
+ - type: assistant
47
+ name: "Tester"
48
+ description: "QA engineer who writes and runs tests. Should be selected if new code has not been validated."
49
+ system_message: |
50
+ You are a QA engineer. Your role is to:
51
+ 1. Write test code in Python that validates the provided function
52
+ 2. Test edge cases (empty input, negative numbers, large values, etc.)
53
+ 3. Execute the tests using code blocks
54
+ 4. Report results and any failures
55
+
56
+ IMPORTANT: Always write executable Python code in markdown code blocks like:
57
+ ```python
58
+ # your test code here
59
+ ```
60
+
61
+ Do NOT just repeat the function code. Write NEW test code that calls the function.
62
+ llm_config:
63
+ api_type: "google"
64
+ model: "gemini-2.5-flash"
65
+ code_execution_config:
66
+ executor:
67
+ type: "LocalCommandLineCodeExecutor"
68
+ timeout: 60
69
+ work_dir: "./code_execution_workspace"
70
+
71
+ # Pattern configuration (REQUIRED)
72
+ # Determines how agents are selected to speak in the group chat
73
+ pattern:
74
+ type: "auto" # Currently supported: "auto" only
75
+ # Required: Name of the agent that starts the conversation
76
+ initial_agent: "Coder"
77
+
78
+ # Optional: Additional pattern-specific arguments
79
+ # For AutoPattern, supported arguments include:
80
+ # - exclude_transit_message (bool): Hide transit messages
81
+ # - summary_method (str): How to summarize conversation
82
+ # Any additional arguments are passed directly to the pattern constructor
83
+
84
+ # Group manager configuration
85
+ group_manager_args:
86
+ # Optional: override llm_config for the group manager
87
+ # If not provided, uses default llm_config from group_config
88
+ llm_config:
89
+ api_type: "google"
90
+ model: "gemini-2.5-flash"
91
+
92
+ # Optional: Override the default user agent behavior
93
+ # By default, a minimal user_agent is created automatically
94
+ # You can provide custom configuration here.- agents:
95
+ # However, it plays an important role in terminating the chat, so it shouldn't be changed ideally1
96
+ # user_agent:
97
+ # name: "User" # Must be "User" for termination to work
98
+ # system_message: "MUST say 'TERMINATE' when the original request is well answered. Do NOT do anything else."
99
+ # description: "MUST ONLY be selected when the original request is well answered and the conversation should terminate."
100
+ # llm_config: # Optional: override llm_config
101
+ # api_type: "openai"
102
+ # model: "gpt-4o"
103
+
104
+ # Maximum rounds of conversation before termination
105
+ max_rounds: 10
106
+
107
+ # # Optional: Mix with native MassGen agents
108
+ # - id: "claude_architect"
109
+ # backend:
110
+ # type: claude
111
+ # model: claude-3-opus-20240229
112
+ # temperature: 0.7
113
+ # system_message: "You are a software architect who designs scalable systems."
114
+
115
+ # UI Configuration
116
+ ui:
117
+ type: "rich_terminal"
118
+ logging_enabled: true
@@ -0,0 +1,21 @@
1
+ # Example configuration for using AG2 with MassGen
2
+ # Single AG2 agent example
3
+ # uv run python -m massgen.cli --config ag2/ag2_single_agent.yaml "what is quantum computing?"
4
+ agents:
5
+ - id: "ag2_assistant"
6
+ backend:
7
+ type: ag2
8
+ agent_config:
9
+ type: assistant
10
+ name: "AG2_Assistant"
11
+ system_message: "You are a helpful AI assistant powered by AG2."
12
+ llm_config:
13
+ api_type: "openai"
14
+ model: "gpt-4o"
15
+ temperature: 0.7
16
+ cache_seed: 42
17
+
18
+ # UI Configuration
19
+ ui:
20
+ type: "rich_terminal"
21
+ logging_enabled: true
@@ -0,0 +1,37 @@
1
+ # MassGen Fast Configuration with Conservative Timeouts
2
+ # For cost-conscious users who want to prevent runaway usage
3
+
4
+ # Conservative timeout settings to prevent excessive token usage
5
+ timeout_settings:
6
+ orchestrator_timeout_seconds: 30 # 30 seconds max coordination
7
+
8
+ agents:
9
+ - id: "fast-gpt-5-1"
10
+ backend:
11
+ type: "openai"
12
+ model: "gpt-5"
13
+ text:
14
+ verbosity: "medium"
15
+ reasoning:
16
+ effort: "high"
17
+ summary: "auto"
18
+ enable_web_search: true
19
+ enable_code_interpreter: true
20
+ # system_message: "You are a helpful AI assistant with web search and code execution capabilities. For any question involving current events, recent information, or real-time data, ALWAYS use web search first."
21
+ - id: "fast-gpt-5-2"
22
+ backend:
23
+ type: "openai"
24
+ model: "gpt-5"
25
+ text:
26
+ verbosity: "medium"
27
+ reasoning:
28
+ effort: "high"
29
+ summary: "auto"
30
+ enable_web_search: true
31
+ enable_code_interpreter: true
32
+ # system_message: "You are a helpful AI assistant with web search and code execution capabilities. For any question involving current events, recent information, or real-time data, ALWAYS use web search first."
33
+
34
+
35
+ ui:
36
+ display_type: "rich_terminal"
37
+ logging_enabled: true
@@ -0,0 +1,31 @@
1
+ # MassGen Three Agent Configuration
2
+ # Gemini-2.5-flash, GPT-4o, and Claude-3.5-Haiku with builtin tools enabled
3
+
4
+ agents:
5
+ - id: "gemini2.5flash"
6
+ backend:
7
+ type: "gemini"
8
+ model: "gemini-2.5-flash"
9
+ enable_web_search: true
10
+ # enable_code_execution: true # Disabled by default - can preempt web search, resulting in weaker search capability
11
+ # system_message: "You are a helpful AI assistant with web search and code execution capabilities. For any question involving current events, recent information, or real-time data, ALWAYS use web search first."
12
+
13
+ - id: "gpt-4o"
14
+ backend:
15
+ type: "openai"
16
+ model: "gpt-4o"
17
+ enable_web_search: true
18
+ enable_code_interpreter: true
19
+ # system_message: "You are a helpful AI assistant with web search and code execution capabilities. For any question involving current events, recent information, or real-time data, ALWAYS use web search first."
20
+
21
+ - id: "claude-3-5-haiku"
22
+ backend:
23
+ type: "claude"
24
+ model: "claude-3-5-haiku-20241022"
25
+ enable_web_search: true
26
+ enable_code_execution: true
27
+ # system_message: "You are a helpful AI assistant with web search capabilities. For any question involving current events, recent information, or real-time data, ALWAYS use web search first."
28
+
29
+ ui:
30
+ display_type: "rich_terminal"
31
+ logging_enabled: true
@@ -0,0 +1,36 @@
1
+ # MassGen Three Agent Configuration
2
+ # Gemini-2.5-flash, GPT-5-nano, and Claude-3.5-Haiku with builtin tools enabled
3
+
4
+ agents:
5
+ - id: "gemini2.5flash"
6
+ backend:
7
+ type: "gemini"
8
+ model: "gemini-2.5-flash"
9
+ enable_web_search: true
10
+ # enable_code_execution: true # Disabled by default - can preempt web search, resulting in weaker search capability
11
+ # system_message: "You are a helpful AI assistant with web search and code execution capabilities. For any question involving current events, recent information, or real-time data, ALWAYS use web search first."
12
+
13
+ - id: "gpt-5-nano"
14
+ backend:
15
+ type: "openai"
16
+ model: "gpt-5-nano"
17
+ text:
18
+ verbosity: "medium"
19
+ reasoning:
20
+ effort: "medium"
21
+ summary: "auto"
22
+ enable_web_search: true
23
+ enable_code_interpreter: true
24
+ # system_message: "You are a helpful AI assistant with web search and code execution capabilities. For any question involving current events, recent information, or real-time data, ALWAYS use web search first."
25
+
26
+ - id: "claude-3-5-haiku"
27
+ backend:
28
+ type: "claude"
29
+ model: "claude-3-5-haiku-20241022"
30
+ enable_web_search: true
31
+ enable_code_execution: true
32
+ # system_message: "You are a helpful AI assistant with web search capabilities. For any question involving current events, recent information, or real-time data, ALWAYS use web search first."
33
+
34
+ ui:
35
+ display_type: "rich_terminal"
36
+ logging_enabled: true
@@ -1,5 +1,5 @@
1
1
  # MassGen Three Agent Configuration
2
- # Gemini-2.5-flash, GPT-4o-mini, and Grok-3-mini with builtin tools enabled
2
+ # Gemini-2.5-flash, GPT-4o, and claude-3-5-haiku-20241022 with builtin tools enabled
3
3
 
4
4
  agents:
5
5
  - id: "gemini2.5flash"
@@ -7,7 +7,7 @@ agents:
7
7
  type: "gemini"
8
8
  model: "gemini-2.5-flash"
9
9
  enable_web_search: true
10
- # enable_code_execution: true
10
+ enable_code_execution: true
11
11
  # system_message: "You are a helpful AI assistant with web search and code execution capabilities. For any question involving current events, recent information, or real-time data, ALWAYS use web search first."
12
12
 
13
13
  - id: "gpt-4o"
@@ -23,7 +23,7 @@ agents:
23
23
  type: "claude"
24
24
  model: "claude-3-5-haiku-20241022"
25
25
  enable_web_search: true
26
- return_citations: true
26
+ enable_code_execution: true
27
27
  # system_message: "You are a helpful AI assistant with web search capabilities. For any question involving current events, recent information, or real-time data, ALWAYS use web search first."
28
28
 
29
29
  ui:
@@ -0,0 +1,36 @@
1
+ # MassGen Three Agent Configuration
2
+ # Gemini-2.5-flash, GPT-5-nano, and Claude-3.5-Haiku with builtin tools enabled
3
+
4
+ agents:
5
+ - id: "gemini2.5flash"
6
+ backend:
7
+ type: "gemini"
8
+ model: "gemini-2.5-flash"
9
+ enable_web_search: true
10
+ enable_code_execution: true
11
+ # system_message: "You are a helpful AI assistant with web search and code execution capabilities. For any question involving current events, recent information, or real-time data, ALWAYS use web search first."
12
+
13
+ - id: "gpt-5-nano"
14
+ backend:
15
+ type: "openai"
16
+ model: "gpt-5-nano"
17
+ text:
18
+ verbosity: "medium"
19
+ reasoning:
20
+ effort: "medium"
21
+ summary: "auto"
22
+ enable_web_search: true
23
+ enable_code_interpreter: true
24
+ # system_message: "You are a helpful AI assistant with web search and code execution capabilities. For any question involving current events, recent information, or real-time data, ALWAYS use web search first."
25
+
26
+ - id: "claude-3-5-haiku"
27
+ backend:
28
+ type: "claude"
29
+ model: "claude-3-5-haiku-20241022"
30
+ enable_web_search: true
31
+ enable_code_execution: true
32
+ # system_message: "You are a helpful AI assistant with web search capabilities. For any question involving current events, recent information, or real-time data, ALWAYS use web search first."
33
+
34
+ ui:
35
+ display_type: "rich_terminal"
36
+ logging_enabled: true
@@ -0,0 +1,25 @@
1
+ agents:
2
+ - id: "glm-4.5"
3
+ backend:
4
+ type: "zai"
5
+ model: "glm-4.5"
6
+ base_url: "https://api.z.ai/api/paas/v4/"
7
+
8
+ - id: "gemini-2.5-flash"
9
+ backend:
10
+ type: "gemini"
11
+ model: "gemini-2.5-flash"
12
+ enable_web_search: true
13
+
14
+ - id: "claude-3.5-haiku"
15
+ backend:
16
+ type: "claude"
17
+ model: "claude-3-5-haiku-20241022"
18
+ enable_web_search: true
19
+ enable_code_execution: true
20
+
21
+
22
+ ui:
23
+ display_type: "rich_terminal"
24
+ logging_enabled: true
25
+
@@ -0,0 +1,30 @@
1
+ # Example configuration demonstrating context_paths filesystem access control
2
+ # This config shows how agents can access user-specified files with permission control
3
+ # uv run python -m massgen.cli --config massgen/configs/basic/multi/gpt4o_audio_generation.yaml "I want to you tell me a very short introduction about Sherlock Homes in one sentence, and I want you to use emotion voice to read it out loud."
4
+ agents:
5
+ - id: "gpt4o_1"
6
+ backend:
7
+ type: "openai"
8
+ model: "gpt-4o"
9
+ text:
10
+ verbosity: "medium"
11
+ cwd: "workspace1"
12
+ enable_audio_generation: true # Enable audio generation (currently not implemented)
13
+
14
+ - id: "gpt4o_2"
15
+ backend:
16
+ type: "openai"
17
+ model: "gpt-4o"
18
+ text:
19
+ verbosity: "medium"
20
+ cwd: "workspace2"
21
+ enable_audio_generation: true # Enable audio generation (currently not implemented)
22
+
23
+ orchestrator:
24
+ snapshot_storage: "snapshots"
25
+ agent_temporary_workspace: "temp_workspaces"
26
+
27
+ # UI Configuration
28
+ ui:
29
+ type: "rich_terminal"
30
+ logging_enabled: true
@@ -0,0 +1,31 @@
1
+ # Example configuration demonstrating context_paths filesystem access control
2
+ # This config shows how agents can access user-specified files with permission control
3
+ # massgen --config @examples/basic/multi/gpt4o_image_generation "Generate an image of gray tabby cat hugging an otter with an orange scarf."
4
+
5
+ agents:
6
+ - id: "gpt4o_1"
7
+ backend:
8
+ type: "openai"
9
+ model: "gpt-4o"
10
+ text:
11
+ verbosity: "medium"
12
+ cwd: "workspace1"
13
+ enable_image_generation: true # Enable image generation tools
14
+
15
+ - id: "gpt4o_2"
16
+ backend:
17
+ type: "openai"
18
+ model: "gpt-4o"
19
+ text:
20
+ verbosity: "medium"
21
+ cwd: "workspace2"
22
+ enable_image_generation: true # Enable image generation tools
23
+
24
+ orchestrator:
25
+ snapshot_storage: "snapshots"
26
+ agent_temporary_workspace: "temp_workspaces"
27
+
28
+ # UI Configuration
29
+ ui:
30
+ type: "rich_terminal"
31
+ logging_enabled: true
@@ -0,0 +1,26 @@
1
+ agents:
2
+ - id: "gpt-oss-1" # Cerebras AI
3
+ backend:
4
+ type: "chatcompletion"
5
+ model: "gpt-oss-120b"
6
+ base_url: "https://api.cerebras.ai/v1"
7
+ # api_key: "cerebras_api_key"
8
+
9
+ - id: "zai_glm45_agent"
10
+ backend:
11
+ type: "zai"
12
+ model: "glm-4.5-air"
13
+ base_url: "https://api.z.ai/api/paas/v4/"
14
+ temperature: 0.7
15
+ top_p: 0.7
16
+
17
+ - id: "Qwen3-4b"
18
+ backend:
19
+ type: "lmstudio"
20
+ model: "qwen/qwen3-4b-2507"
21
+ # api_key: "lm-studio"
22
+
23
+ ui:
24
+ display_type: "rich_terminal"
25
+ logging_enabled: true
26
+